二维卷积层:
from mxnet import autograd, nd from mxnet.gluon import nn # 定义函数corr2d,用于实现二维卷积操作 def corr2d(x, k): # 获取卷积核的高度和宽度 h, w = k.shape # 初始化输出y,其形状为(x.shape[0] - h + 1, x.shape[1] - w + 1) y = nd.zeros((x.shape[0] - h + 1, x.shape[1] - w + 1)) # 通过两个for循环计算二维卷积操作 for i in range(y.shape[0]): for j in range(y.shape[1]): y[i, j] = (x[i:i + h, j:j + w] * k).sum() return y # 定义输入张量x和卷积核k,并使用corr2d函数计算二维卷积结果 x = nd.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) # 输入张量x k = nd.array([[0, 1], [2, 3]]) # 卷积核k print(corr2d(x, k)) # 输出二维卷积结果 # 二维卷积层的定义 class Conv2D(nn.Block): def __init__(self, kernel_size, **kwargs): super(Conv2D, self).__init__(**kwargs) # 定义卷积层的权重参数self.weight和偏置参数self.bias self.weight = self.params.get('weight', shape=kernel_size) self.bias = self.params.get('bias', shape=(1,)) def forward(self, x): # 在forward方法中使用corr2d函数进行二维卷积操作,并返回结果 return corr2d(x, self.weight.data() + self.bias.data()) # 定义输入张量x、卷积核k,计算二维卷积结果y x = nd.ones((6, 8)) # 输入张量x x[:, 2:6] = 0 print(x) k = nd.array([[1, -1]]) # 卷积核k print(k) y = corr2d(x, k) # 输出二维卷积结果 print(y) # 通过数据学习核数组 # 定义一个含有1个输出通道、核数组形状为(1, 2)的二维卷积层 conv2d = nn.Conv2D(1, kernel_size=(1, 2)) conv2d.initialize() # 将输入x和输出y进行形状变换,使其适应二维卷积层的输入要求 x = x.reshape((1, 1, 6, 8)) # 改变输入张量x的形状 y = y.reshape((1, 1, 6, 7)) # 改变输出张量y的形状 # 进行10次迭代的训练 for i in range(10): with autograd.record(): y_hat = conv2d(x) # 获取卷积层的输出结果 loss = (y_hat - y) ** 2 # 计算损失函数,即预测值与真实值的差的平方 loss.backward() # 自动求导,计算损失函数关于参数的梯度 conv2d.weight.data()[:] -= 3e-2 * conv2d.weight.grad() # 更新卷积层的权重参数 if (i + 1) % 2 == 0: print('batch %d, loss %.3f' % (i + 1, loss.sum().asscalar())) # 打印学习得到的核数组 print(conv2d.weight.data().reshape((1, 2)))
标签:weight,--,self,二维,corr2d,shape,卷积,神经网络 From: https://www.cnblogs.com/o-Sakurajimamai-o/p/17597427.html