首页 > 其他分享 >实验3 手写字体识别【机器学习】

实验3 手写字体识别【机器学习】

时间:2022-11-16 12:08:28浏览次数:72  
标签:plt self torch train 字体 test 手写 识别 data


推荐

​​python实现手写数字识别(小白入门)​​

原文​​MNIST Handwritten Digit Recognition in PyTorch​​​ 翻译用PyTorch实现MNIST手写数字识别(非常详细)
mnist.gz/mnist.csv数据集下载

​​mnist 数据集 下载 训练 测试 pytorch​​pytorch及torchvision下载不超过可看
清华镜像源下载pytorch及torchvision

​​配置环境miniconda+pycharm【机器学习】​​ 的第四步 创建 虚拟环境
anaconda常用指令,更新查看添加下载源等
和第五步使用虚拟环境 7扩展 在虚拟环境中安装必要的包

注意

​​sklearn.externals.joblib导入问题​​​from sklearn.externals import joblib​​改为​​import joblib​

代码 python实现手写数字识别(小白入门)

项目结构

实验3 手写字体识别【机器学习】_机器学习

1.py

需要下载joblib包

import numpy as np
from sklearn.linear_model import LogisticRegression
import os
import joblib

#数据预处理
trainData = np.loadtxt(open('digits_training.csv', 'r'), delimiter=",",skiprows=1)#装载数据
MTrain, NTrain = np.shape(trainData) #行列数
print("训练集:",MTrain,NTrain)
xTrain = trainData[:,1:NTrain]
xTrain_col_avg = np.mean(xTrain, axis=0) #对各列求均值
xTrain =(xTrain- xTrain_col_avg)/255 #归一化
yTrain = trainData[:,0]

'''================================='''
#训练模型
model = LogisticRegression(solver='lbfgs', multi_class='multinomial', max_iter=500)
model.fit(xTrain, yTrain)
print("训练完毕")

'''================================='''
#测试模型
testData = np.loadtxt(open('digits_testing.csv', 'r'), delimiter=",",skiprows=1)
MTest,NTest = np.shape(testData)
print("测试集:",MTest,NTest)
xTest = testData[:,1:NTest]
xTest = (xTest-xTrain_col_avg) /255 # 使用训练数据的列均值进行处理
yTest = testData[:,0]
yPredict = model.predict(xTest)
errors = np.count_nonzero(yTest - yPredict) #返回非零项个数
print("预测完毕。错误:", errors, "条")
print("测试数据正确率:", (MTest - errors) / MTest)

'''================================='''
#保存模型

# 创建文件目录
dirs = 'testModel'
if not os.path.exists(dirs):
os.makedirs(dirs)
joblib.dump(model, dirs+'/model.pkl')
print("模型已保存")

结果1

实验3 手写字体识别【机器学习】_2d_02


实验3 手写字体识别【机器学习】_机器学习_03

2.py

需要下载cv2包

实验3 手写字体识别【机器学习】_机器学习_04


在test下放置几张数字图片

实验3 手写字体识别【机器学习】_python_05

import cv2
import numpy as np
import joblib

map=cv2.imread(r"test/img1.png")
GrayImage = cv2.cvtColor(map, cv2.COLOR_BGR2GRAY)
# 获取图片的宽和高
width, height = map.shape[:2][::-1]
ret,thresh2=cv2.threshold(GrayImage,width,height,cv2.THRESH_BINARY_INV)
Image=cv2.resize(thresh2,(28,28))
img_array = np.asarray(Image)
z=img_array.reshape(1,-1)

'''================================================'''

model = joblib.load('testModel'+'/model.pkl')
yPredict = model.predict(z)
print(yPredict)
y=str(yPredict)
cv2.putText(map,y, (10,20), cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,255), 2, cv2.LINE_AA)
cv2.imshow("map",map)
cv2.waitKey(0)

结果2






代码 用PyTorch实现MNIST手写数字识别(非常详细)

pytorch.c

import torch
import torchvision
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt

n_epochs = 3
batch_size_train = 64
batch_size_test = 1000
learning_rate = 0.01
momentum = 0.5
log_interval = 10
random_seed = 1
torch.manual_seed(random_seed)

train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('./data/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('./data/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)

examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
# print(example_targets)
# print(example_data.shape)

fig = plt.figure()
for i in range(6):
plt.subplot(2, 3, i + 1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
plt.show()


class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)

def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)


network = Net()
optimizer = optim.SGD(network.parameters(), lr=learning_rate, momentum=momentum)

train_losses = []
train_counter = []
test_losses = []
test_counter = [i * len(train_loader.dataset) for i in range(n_epochs + 1)]


def train(epoch):
network.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(data),
len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item()))
train_losses.append(loss.item())
train_counter.append((batch_idx * 64) + ((epoch - 1) * len(train_loader.dataset)))
torch.save(network.state_dict(), './model.pth')
torch.save(optimizer.state_dict(), './optimizer.pth')


def test():
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = network(data)
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))


train(1)

test() # 不加这个,后面画图就会报错:x and y must be the same size
for epoch in range(1, n_epochs + 1):
train(epoch)
test()

fig = plt.figure()
plt.plot(train_counter, train_losses, color='blue')
plt.scatter(test_counter, test_losses, color='red')
plt.legend(['Train Loss', 'Test Loss'], loc='upper right')
plt.xlabel('number of training examples seen')
plt.ylabel('negative log likelihood loss')

examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
with torch.no_grad():
output = network(example_data)
fig = plt.figure()
for i in range(6):
plt.subplot(2, 3, i + 1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Prediction: {}".format(output.data.max(1, keepdim=True)[1][i].item()))
plt.xticks([])
plt.yticks([])
plt.show()

# ----------------------------------------------------------- #

continued_network = Net()
continued_optimizer = optim.SGD(network.parameters(), lr=learning_rate, momentum=momentum)

network_state_dict = torch.load('model.pth')
continued_network.load_state_dict(network_state_dict)
optimizer_state_dict = torch.load('optimizer.pth')
continued_optimizer.load_state_dict(optimizer_state_dict)

# 注意不要注释前面的“for epoch in range(1, n_epochs + 1):”部分,
# 不然报错:x and y must be the same size
# 为什么是“4”开始呢,因为n_epochs=3,上面用了[1, n_epochs + 1)
for i in range(4, 9):
test_counter.append(i * len(train_loader.dataset))
train(i)
test()

fig = plt.figure()
plt.plot(train_counter, train_losses, color='blue')
plt.scatter(test_counter, test_losses, color='red')
plt.legend(['Train Loss', 'Test Loss'], loc='upper right')
plt.xlabel('number of training examples seen')
plt.ylabel('negative log likelihood loss')
plt.show()

运行结果

实验3 手写字体识别【机器学习】_人工智能_06


实验3 手写字体识别【机器学习】_手写数字识别_07

实验3 手写字体识别【机器学习】_python_08


实验3 手写字体识别【机器学习】_人工智能_09


实验3 手写字体识别【机器学习】_python_10


实验3 手写字体识别【机器学习】_机器学习_11

实验3 手写字体识别【机器学习】_2d_12


实验3 手写字体识别【机器学习】_2d_13


实验3 手写字体识别【机器学习】_2d_14


实验3 手写字体识别【机器学习】_2d_15


实验3 手写字体识别【机器学习】_python_16


实验3 手写字体识别【机器学习】_人工智能_17

实验3 手写字体识别【机器学习】_人工智能_18

实验3 手写字体识别【机器学习】_手写数字识别_19


运行之后项目结构

实验3 手写字体识别【机器学习】_机器学习_20

代码 自己

import torch
import torchvision
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# import matplotlib.pyplot as plt


num_epochs = 6
batch_size = 100
learning_rate = 0.1
momentum = 0.5
log_interval = 10
random_seed = 1
torch.manual_seed(random_seed)
input_size=28*28
num_classes=10

train_dataset= torchvision.datasets.MNIST('./data/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
]))
test_dataset=torchvision.datasets.MNIST('./data/', train=False, download=True,transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
]))

train_loader = torch.utils.data.DataLoader(dataset=train_dataset,batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,batch_size=batch_size, shuffle=False)

class NeuralNet(nn.Module):
def __init__(self,input_size,hidden_size,num_classes):
super(NeuralNet, self).__init__()
self.fc1=nn.Linear(input_size,hidden_size[0])
self.fc2=nn.Linear(hidden_size[0],hidden_size[1])
self.fc3=nn.Linear(hidden_size[1],num_classes)
self.relu = nn.ReLU()

def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
out = self.relu(out)
out = self.fc3(out)
return out


model = NeuralNet(input_size, [256, 64], num_classes)

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate)

for epoch in range(num_epochs):
for i,(images,labels) in enumerate(train_loader):
images=images.reshape(-1,28*28)
outputs=model(images)
labels_onehot = F.one_hot(labels)
loss=criterion(outputs,labels_onehot.float())

optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 600 == 0:
print("Epoch :{} \t Loss:{:.6f}".format(epoch, loss.item()))

torch.save(model,'model_total.ckpt')
# torch.save(net.state_dict(),'model_para.ckpt')

#-------------------------------------------------------------------

model=torch.load('model_total.ckpt')
'''
net=NeuralNet(input_size,[500,100],num_classes)
net.load_state_dict(torch.load('model_para.ckpt'))
'''

def acc(labels,outputs):
_,predicted=torch.max(outputs.data,1)
num=len(labels)
right=(predicted==labels).sum().item()
return num,right


with torch.no_grad():
correct,total=0,0
for images,labels in test_loader:
images=images.reshape(-1,28*28)
outputs=model(images)
num,right=acc(labels,outputs)
correct=correct+right
total=total+num
print('Accuracy of the network on the 10000 test images:{}%'.format(100*correct/total))

结果

Epoch :0   Loss:2.294961
Epoch :1 Loss:0.086749
Epoch :2 Loss:0.101823
Epoch :3 Loss:0.045709
Epoch :4 Loss:0.053201
Epoch :5 Loss:0.032638
Accuracy of the network on the 10000 test images:98.0%


标签:plt,self,torch,train,字体,test,手写,识别,data
From: https://blog.51cto.com/u_15719556/5855678

相关文章

  • Qt 在Mac上无法识别编译器
    由于Mac系统更新,导致我之前的Xcode不能用了,然后我就把Xcode卸载了,结果悲剧了,Qt无法使用了,提示无法识别Apple的clang编译器。使用Qt前,必须先安装Xcode!!!!关闭QtCreator,在终......
  • JDK字体导入
    可能安装多版本的JDK,需确认是哪个版本rpm-qa|grepjavamkdir-p/usr/share/fonts/japanese/TureType或者#cd/usr/share/fonts/#mkdirjapanese#cdjapanese/......
  • 深度学习基础课:用全连接层识别手写数字(上)
    大家好~我开设了“深度学习基础班”的线上课程,带领同学从0开始学习全连接和卷积神经网络,进行数学推导,并且实现可以运行的Demo程序线上课程资料:本节课录像回放加QQ群,获得......
  • 字体大小自适应宽度功能核心逻辑
    项目中用到,故做了下整理字体大小自适应宽度功能核心逻辑通过文字面积与div面积进行比较,从而对字体大小进行自适应处理如果小于12px,使用css缩放(transform:scale)效果如......
  • JAVA 调佣百度ai识别身份证和车牌号
    识别身份证和车牌号的方法:packagefunction;importcom.baidu.aip.ocr.AipOcr;importorg.json.JSONObject;importjava.util.HashMap;/***图像识别sdk*/p......
  • JAVA 调佣百度ai识别动植物
    项目结构:    调用sdk分别实现动物识别和植物识别类:packagefounction;importutil.AuthService;importutil.Base64Util;importutil.FileUtil;importut......
  • IDEA开发工具:字体,快捷键,其他操作
    IDEA开发工具:字体,快捷键,其他操作1.常用快捷键​ main/psvm、sout、...快速键入相关代码​ Ctrl+D复制当前行数据到下一行​ Ctrl+Y删除所在行建议用CTRL+X​ Ctrl......
  • 手写堆
    voidheapup(){ inti=a[0]; while(i>1&&a[i]<a[i/2]){ swap(a[i],a[i/2]); i/=2; }}voidheapdown(){ inti=1,j; while(i*2<=a[0]){ if(i*2==a[0]||a[i*2]<......
  • 记录--手写$forceUpdate,vm.$destroy方法
    这里给大家分享我在网上总结出来的一些知识,希望对大家有所帮助vm.$forceUpdate(1)作用迫使Vue.js实例重新渲染。注意它仅仅影响实例本身以及插入插槽内容的子组件,而不是......
  • 手写弹出框代码详解
    1.代码<!DOCTYPEhtml><htmllang="zh-CN"><head><metahttp-equiv="content-Type"charset="UTF-8"><metahttp-equiv="x-ua-compatible"content="IE=edge"><......