In [1]:
import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf from sklearn.preprocessing import StandardScalerIn [2]:
(x_train_all, y_train_all), (x_test, y_test) = tf.keras.datasets.cifar10.load_data() x_train_all.shapeOut[2]:
(50000, 32, 32, 3)In [3]:
x_valid, x_train = x_train_all[:5000], x_train_all[5000:] y_valid, y_train = y_train_all[:5000], y_train_all[5000:] scaler = StandardScaler() x_train_scaled = scaler.fit_transform( x_train.astype(np.float32).reshape(-1, 1) ).reshape(-1, 32, 32, 3) x_valid_scaled = scaler.transform( x_valid.astype(np.float32).reshape(-1, 1) ).reshape(-1, 32, 32, 3) x_test_scaled = scaler.transform( x_test.astype(np.float32).reshape(-1, 1) ).reshape(-1, 32, 32, 3)In [4]:
def make_dataset(data, target, repeat, batch_size, shuffle=True): dataset = tf.data.Dataset.from_tensor_slices((data, target)) if shuffle: dataset = dataset.shuffle(10000) dataset = dataset.repeat(repeat).batch(batch_size).prefetch(50) return dataset batch_size = 64 train_dataset = make_dataset(x_train_scaled, y_train, repeat=10, batch_size=batch_size) eval_dataset = make_dataset(x_valid_scaled, y_valid, repeat=1, batch_size=32, shuffle=False)In [5]:
model = tf.keras.models.Sequential() # 卷积 # input_shape 输入参数为 (height, width, channels) model.add(tf.keras.layers.Conv2D(filters = 32, kernel_size = 3, padding = 'same', activation = 'relu', input_shape = (32, 32, 3))) model.add(tf.keras.layers.Conv2D(filters = 32, kernel_size = 3, padding = 'same', activation = 'relu')) # 卷积 model.add(tf.keras.layers.MaxPool2D()) # 池化 model.add(tf.keras.layers.Conv2D(filters = 64, kernel_size = 3, padding = 'same', activation = 'relu')) # 卷积 model.add(tf.keras.layers.Conv2D(filters = 64, kernel_size = 3, padding = 'same', activation = 'relu')) # 卷积 model.add(tf.keras.layers.MaxPool2D()) # 池化 model.add(tf.keras.layers.Conv2D(filters = 128, kernel_size = 3, padding = 'same', activation = 'relu')) # 卷积 model.add(tf.keras.layers.Conv2D(filters = 128, kernel_size = 3, padding = 'same', activation = 'relu')) # 卷积 model.add(tf.keras.layers.MaxPool2D()) # 池化 # 输入输出shape: 具体而言,是将一个维度大于或等于3的高维矩阵,“压扁”为一个二维矩阵。即保留第一个维度(如:batch的个数), # 然后将剩下维度的值相乘为“压扁”矩阵的第二个维度。如输入是(None, 32,32,3),则输出是(None, 3072) model.add(tf.keras.layers.Flatten()) # Flatten层用来将输入“压平”,常用在从卷积层到全连接层的过渡。Flatten不影响batch的大小。 model.add(tf.keras.layers.Dense(1536, activation='selu', input_shape=(3072,))) # 32*32*3 = 3072 model.add(tf.keras.layers.AlphaDropout(0.25)) model.add(tf.keras.layers.Dense(768, activation='selu')) model.add(tf.keras.layers.AlphaDropout(0.2)) model.add(tf.keras.layers.Dense(384, activation='selu')) model.add(tf.keras.layers.Dense(128, activation='selu')) model.add(tf.keras.layers.Dense(10, activation='softmax')) # 配置网络 model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['acc'])In [6]:
# 训练 steps_per_epoch = x_train_scaled.shape[0] // batch_size display(steps_per_epoch) history = model.fit(train_dataset, steps_per_epoch=steps_per_epoch, epochs=10, validation_data=eval_dataset)
703
Epoch 1/10 703/703 [==============================] - 64s 90ms/step - loss: 1.6196 - acc: 0.4142 - val_loss: 1.6997 - val_acc: 0.5114 Epoch 2/10 703/703 [==============================] - 67s 96ms/step - loss: 1.0461 - acc: 0.6294 - val_loss: 1.0882 - val_acc: 0.6660 Epoch 3/10 703/703 [==============================] - 68s 96ms/step - loss: 0.8426 - acc: 0.7060 - val_loss: 0.9333 - val_acc: 0.7176 Epoch 4/10 703/703 [==============================] - 68s 97ms/step - loss: 0.6994 - acc: 0.7598 - val_loss: 0.9219 - val_acc: 0.7424 Epoch 5/10 703/703 [==============================] - 69s 98ms/step - loss: 0.6004 - acc: 0.7944 - val_loss: 1.1453 - val_acc: 0.7390 Epoch 6/10 703/703 [==============================] - 67s 95ms/step - loss: 0.5187 - acc: 0.8203 - val_loss: 0.9907 - val_acc: 0.7634 Epoch 7/10 703/703 [==============================] - 65s 93ms/step - loss: 0.4417 - acc: 0.8495 - val_loss: 1.0884 - val_acc: 0.7578 Epoch 8/10 703/703 [==============================] - 65s 93ms/step - loss: 0.3789 - acc: 0.8716 - val_loss: 0.9646 - val_acc: 0.7854 Epoch 9/10 703/703 [==============================] - 65s 93ms/step - loss: 0.3718 - acc: 0.8750 - val_loss: 1.3729 - val_acc: 0.7606 Epoch 10/10 703/703 [==============================] - 65s 93ms/step - loss: 0.2818 - acc: 0.9052 - val_loss: 1.4302 - val_acc: 0.7692In [7]:
pd.DataFrame(history.history)
loss | acc | val_loss | val_acc | |
---|---|---|---|---|
0 | 1.619645 | 0.414229 | 1.699681 | 0.5114 |
1 | 1.046071 | 0.629423 | 1.088171 | 0.6660 |
2 | 0.842616 | 0.705992 | 0.933270 | 0.7176 |
3 | 0.699434 | 0.759846 | 0.921863 | 0.7424 |
4 | 0.600412 | 0.794408 | 1.145335 | 0.7390 |
5 | 0.518747 | 0.820257 | 0.990689 | 0.7634 |
6 | 0.441742 | 0.849462 | 1.088383 | 0.7578 |
7 | 0.378852 | 0.871622 | 0.964595 | 0.7854 |
8 | 0.371824 | 0.874978 | 1.372856 | 0.7606 |
9 | 0.281802 | 0.905205 | 1.430226 | 0.7692 |
pd.DataFrame(history.history).plot(figsize=(8, 5)) plt.grid() plt.gca().set_ylim(0, 2) plt.show()In [9]:
model.evaluate(x_test_scaled, y_test)
313/313 [==============================] - 5s 16ms/step - loss: 1.4817 - acc: 0.7670Out[9]:
[1.4817293882369995, 0.7670000195503235]In [10]:
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 32, 32, 32) 896 conv2d_1 (Conv2D) (None, 32, 32, 32) 9248 max_pooling2d (MaxPooling2 (None, 16, 16, 32) 0 D) conv2d_2 (Conv2D) (None, 16, 16, 64) 18496 conv2d_3 (Conv2D) (None, 16, 16, 64) 36928 max_pooling2d_1 (MaxPoolin (None, 8, 8, 64) 0 g2D) conv2d_4 (Conv2D) (None, 8, 8, 128) 73856 conv2d_5 (Conv2D) (None, 8, 8, 128) 147584 max_pooling2d_2 (MaxPoolin (None, 4, 4, 128) 0 g2D) flatten (Flatten) (None, 2048) 0 dense (Dense) (None, 1536) 3147264 alpha_dropout (AlphaDropou (None, 1536) 0 t) dense_1 (Dense) (None, 768) 1180416 alpha_dropout_1 (AlphaDrop (None, 768) 0 out) dense_2 (Dense) (None, 384) 295296 dense_3 (Dense) (None, 128) 49280 dense_4 (Dense) (None, 10) 1290 ================================================================= Total params: 4960554 (18.92 MB) Trainable params: 4960554 (18.92 MB) Non-trainable params: 0 (0.00 Byte) _________________________________________________________________标签:acc,loss,CiFar10,卷积,32,703,tf,TensorFlow,model From: https://www.cnblogs.com/funsion/p/18680463