代码来自:https://weread.qq.com/web/reader/33f32c90813ab71c6g018fffkd3d322001ad3d9446802347 《python深度学习》
from tensorflow.keras.datasets import mnist
from tensorflow.keras import optimizers
import tensorflow as tf
import numpy as np
class NaiveDense:
def __init__(self, input_size, output_size, activation):
self.activation = activation
w_shape = (input_size, output_size)
w_initial_value = tf.random.uniform(w_shape, minval=0, maxval=1e-1)
self.W = tf.Variable(w_initial_value)
b_shape = (output_size,)
b_initial_value = tf.zeros(b_shape)
self.b = tf.Variable(b_initial_value)
def __call__(self, inputs):
return self.activation(tf.matmul(inputs, self.W) + self.b)
@property
def weights(self):
return [self.W, self.b]
class NaiveSequential:
def __init__(self, layers):
self.layers = layers
def __call__(self, inputs):
x = inputs
for layer in self.layers:
x = layer(x)
return x
@property
def weights(self):
weights = []
for layer in self.layers:
weights += layer.weights
return weights
class BatchGenerator:
def __init__(self, images, labels, batch_size=128):
assert len(images) == len(labels)
self.index = 0
self.images = images
self.labels = labels
self.batch_size = batch_size
self.num_batches = math.ceil(len(images) / batch_size)
def next(self):
images = self.images[self.index : self.index + self.batch_size]
labels = self.labels[self.index : self.index + self.batch_size]
self.index += self.batch_size
return images, labels
def one_training_step(model, images_batch, labels_batch):
with tf.GradientTape() as tape:
predictions = model(images_batch)
per_sample_losses = tf.keras.losses.sparse_categorical_crossentropy(
labels_batch, predictions)
average_loss = tf.reduce_mean(per_sample_losses)
gradients = tape.gradient(average_loss, model.weights)
update_weights(gradients, model.weights)
return average_loss
def update_weights(gradients, weights):
for g, w in zip(gradients, weights):
w.assign_sub(g * learning_rate)
def update_weights(gradients, weights):
optimizer.apply_gradients(zip(gradients, weights))
def fit(model, images, labels, epochs, batch_size=128):
for epoch_counter in range(epochs):
print(f"Epoch {epoch_counter}")
batch_generator = BatchGenerator(images, labels)
for batch_counter in range(batch_generator.num_batches):
images_batch, labels_batch = batch_generator.next()
loss = one_training_step(model, images_batch, labels_batch)
if batch_counter % 100 == 0:
print(f"loss at batch {batch_counter}: {loss:.2f}")
model = NaiveSequential([
NaiveDense(input_size=28 * 28, output_size=512, activation=tf.nn.relu),
NaiveDense(input_size=512, output_size=10, activation=tf.nn.softmax)
])
assert len(model.weights) == 4
import math
learning_rate = 1e-3
optimizer = optimizers.SGD(learning_rate=1e-3)
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.reshape((60000, 28 * 28))
train_images = train_images.astype("float32") / 255
test_images = test_images.reshape((10000, 28 * 28))
test_images = test_images.astype("float32") / 255
fit(model, train_images, train_labels, epochs=10, batch_size=128)
predictions = model(test_images)
predictions = predictions.numpy()
predicted_labels = np.argmax(predictions, axis=1)
matches = predicted_labels == test_labels
print(f"accuracy: {matches.mean():.2f}")
输出:
Epoch 0
loss at batch 0: 3.28
loss at batch 100: 2.21
loss at batch 200: 2.15
loss at batch 300: 2.06
loss at batch 400: 2.15
Epoch 1
loss at batch 0: 1.87
loss at batch 100: 1.86
loss at batch 200: 1.78
loss at batch 300: 1.68
loss at batch 400: 1.76
Epoch 2
loss at batch 0: 1.55
loss at batch 100: 1.56
loss at batch 200: 1.47
loss at batch 300: 1.41
loss at batch 400: 1.46
Epoch 3
loss at batch 0: 1.29
loss at batch 100: 1.32
loss at batch 200: 1.21
loss at batch 300: 1.20
loss at batch 400: 1.24
Epoch 4
loss at batch 0: 1.11
loss at batch 100: 1.15
loss at batch 200: 1.02
loss at batch 300: 1.04
loss at batch 400: 1.08
Epoch 5
loss at batch 0: 0.97
loss at batch 100: 1.01
loss at batch 200: 0.88
loss at batch 300: 0.92
loss at batch 400: 0.96
Epoch 6
loss at batch 0: 0.86
loss at batch 100: 0.90
loss at batch 200: 0.78
loss at batch 300: 0.84
loss at batch 400: 0.88
Epoch 7
loss at batch 0: 0.78
loss at batch 100: 0.82
loss at batch 200: 0.71
loss at batch 300: 0.77
loss at batch 400: 0.81
Epoch 8
loss at batch 0: 0.72
loss at batch 100: 0.75
loss at batch 200: 0.65
loss at batch 300: 0.71
loss at batch 400: 0.76
Epoch 9
loss at batch 0: 0.67
loss at batch 100: 0.70
loss at batch 200: 0.60
loss at batch 300: 0.67
loss at batch 400: 0.72
accuracy: 0.82
这段代码实现了一个简单的神经网络模型,用于手写数字识别。主要使用了 TensorFlow 库进行实现。
以下是代码的主要部分和它们的功能:
1. 定义神经网络层:NaiveDense 类定义了一个全连接层,包含权重 W 和偏置 b,并使用了一个激活函数。
2. 定义神经网络模型:NaiveSequential 类定义了一个神经网络模型,它由多个 NaiveDense 层组成。
3. 定义批量生成器:BatchGenerator 类用于生成训练批次。
4. 定义训练步骤:one_training_step 函数定义了一步训练过程,包括前向传播、计算损失、反向传播和更新权重。
5. 定义训练过程:fit 函数定义了整个训练过程,包括多个训练周期和每个周期中的多个训练步骤。
6. 创建模型和优化器:创建了一个由两个 NaiveDense 层组成的 NaiveSequential 模型,以及一个 SGD 优化器。
7. 加载和预处理数据:加载了 MNIST 手写数字数据集,并进行了预处理。
8. 训练模型:使用 fit 函数训练了模型。
9. 测试模型:使用测试数据对模型进行了测试,并计算了准确率。
这段代码的主要目的是展示如何使用 TensorFlow 实现一个简单的神经网络模型,并用它进行手写数字识别。
【tensorflow作用】
认为TensorFlow看起来很像NumPy。但是NumPy无法做到的是,检索任意可微表达式相对于其输入的梯度。你只需要创建一个GradientTape作用域,对一个或多个输入张量做一些计算,然后就可以检索计算结果相对于输入的梯度
import tensorflow as tf
time = tf.Variable(0.)
with tf.GradientTape() as outer_tape:
with tf.GradientTape() as inner_tape:
position = 4.9 * time ** 2
speed = inner_tape.gradient(position, time)
acceleration = outer_tape.gradient(speed, time)
print(acceleration)
print(speed)
输出:
tf.Tensor(9.8, shape=(), dtype=float32)
tf.Tensor(0.0, shape=(), dtype=float32)
其他补充 :
动量法(Momentum)是一种用于优化神经网络的梯度下降方法,它的主要思想是引入一个动量项,使得参数更新时不仅考虑当前的梯度,还会考虑之前的梯度方向,从而加快学习速度并提高稳定性。
在标准的梯度下降法中,参数的更新公式为:
w = w - learning_rate * gradient
其中,w 是参数,learning_rate 是学习率,gradient 是梯度。
而在动量法中,参数的更新公式为:
v = momentum * v - learning_rate * gradient
w = w + v
其中,v 是动量项,momentum 是动量因子,通常取值为 0.9 或者其他接近 1 的值。
可以看到,动量项 v 在每次更新时,都会考虑之前的动量项和当前的梯度,这就像是给参数的更新加上了一个“惯性”,使得参数在梯度方向上的移动更加平滑,而不是每次都完全按照当前的梯度方向。
通俗来说,动量法就像是下山时的滑雪,即使在平坦或者稍微上坡的地方,由于惯性的作用,也能继续前进,从而更快地到达山脚。这就是动量法的基本原理和直观理解。
relu函数:
def naive_relu(x):
assert len(x.shape) == 2
x = x.copy()
for i in range(x.shape[0]):
for j in range(x.shape[1]):
x[i, j] = max(x[i, j], 0)
return x