这里的数字范围是0~9共10个数字,用5×3的数字矩阵表示,把它当成图像那么可以看成5×3的图片。如下图中的数字0,用“1”代表有颜色(亮),“0”代表没颜色(灭)。
网络是经典的BP神经网络,15个输入,10个输出。当输入是形状“0”时,输出索引为0的数字最大接近于1;当输入是形状“1”时,输出索引为1的数字最大接近于1,以此类推,以达到识别数字的功能。这个网络是我随便设置的参数没有任何优化就可以训练使用。测试环境是VS2017和libtorch1.13.1。下面是头文件:
class LinearSigImpl : public torch::nn::Module { public: LinearSigImpl(int intput, int output); torch::Tensor forward(torch::Tensor x); private: torch::nn::Linear ln; torch::nn::Sigmoid sn; }; TORCH_MODULE(LinearSig); class Mlp : public torch::nn::Module { public: Mlp(int input, int outputCount); torch::Tensor forward(torch::Tensor x); private: LinearSig ln1; LinearSig ln2; LinearSig ln3; LinearSig output; };
CPP文件如下。这里换了1个损失函数,用的是SmoothL1Loss函数。在libtorch里有非常多的损失函数和优化方法可以选择,只要合适都可以使用。代码最后我加了个手输5×3的数字识别结果,经过验证,对于有较小干扰的数字可以正确识别:
LinearSigImpl::LinearSigImpl(int input, int output) : ln(nullptr), sn(nullptr) { ln = register_module("ln", torch::nn::Linear(input, output)); sn = register_module("sn", torch::nn::Sigmoid()); } torch::Tensor LinearSigImpl::forward(torch::Tensor x) { x = ln->forward(x); x = sn->forward(x); return x; } Mlp::Mlp(int input, int outputCount) : ln1(nullptr), ln2(nullptr), ln3(nullptr), output(nullptr) { const int layer[] = { 30, 30, 20 }; ln1 = register_module("ln1", LinearSig(input, layer[0])); ln2 = register_module("ln2", LinearSig(layer[0], layer[1])); ln3 = register_module("ln3", LinearSig(layer[1], layer[2])); output = register_module("output", LinearSig(layer[2], outputCount)); } torch::Tensor Mlp::forward(torch::Tensor x) { x = ln1->forward(x); x = ln2->forward(x); x = ln3->forward(x); x = output->forward(x); return x; } static torch::Tensor inputSample = torch::tensor({ { 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f }, /* 0 */ { 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f }, { 1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f }, { 1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f }, { 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f }, /* 4 */ { 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f }, { 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f }, { 1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f }, { 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f }, { 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f }, /* 9 */ }); static torch::Tensor outputSample = torch::tensor({ { 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }, { 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }, { 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }, { 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }, { 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }, { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f }, { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f }, { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f }, { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f }, { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f }, }); int main() { Mlp mechine(15, 10); /* 训练过程 */ torch::optim::SGD optim(mechine.parameters(), torch::optim::SGDOptions(0.2)); torch::nn::SmoothL1Loss lossFunc; mechine.train(); for (int i = 0; i < 500000; i++) { torch::Tensor predict = mechine.forward(inputSample); torch::Tensor loss = lossFunc(predict, outputSample); optim.zero_grad(); loss.backward(); optim.step(); if (i % 5000 == 0) { /* 每5000次循环输出一次损失函数值 */ cout << "LOOP:" << i << ",LOSS=" << loss.item() << endl; } } /* 做个测试 */ at::Tensor x = torch::tensor({ { 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.1f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f }, /* 1 */ { 1.0f, 0.9f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f }, /* 0 */ { 1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 0.9f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f }, /* 2 */ }); at::Tensor y = mechine.forward(x); std::tuple<torch::Tensor, torch::Tensor> maxValue = y.max(1); for (int i = 0; i < 3; i++) { cout << "第" << i << "形状预测输出=" << std::get<1>(maxValue)[i].item().toInt() << endl; } vector<float> inputs; cout << "输入5*3的数字[每个数字0~1之间]:" << endl; for (int i = 0; i < 15; i++) { float number; cin >> number; inputs.push_back(number); } x = torch::from_blob(inputs.data(), { 1, 15 }, c10::TensorOptions(c10::ScalarType::Float)); y = mechine.forward(x); maxValue = y.max(1); cout << "预测输出=" << std::get<1>(maxValue)[0].item().toInt() << endl; return 0; }
控制台输出是:
... LOOP:475000,LOSS=0.000606246 LOOP:480000,LOSS=0.000572149 LOOP:485000,LOSS=0.000541376 LOOP:490000,LOSS=0.000513536 LOOP:495000,LOSS=0.000488265 第0形状预测输出=1 第1形状预测输出=0 第2形状预测输出=2 输入5*3的数字[每个数字0~1之间]: 1 0.9 1 0.1 0 0.98 0 0.1 0.97 0.3 0 0.92 0 0 1 预测输出=7
标签:1.0,Tensor,libtorch,int,0.0,torch,数码管,forward,识别 From: https://www.cnblogs.com/mengxiangdu/p/18025672