(一)选题背景:
苹果树是优良的经济作物,目前我国苹果树的种植面积较大,产量较高,而且苹果的品种也在不断改良和更新。苹果的种植条件比较宽泛,大部分栽植于北方地区,种植面积大,市场需求量也大,其中陕西洛川、甘肃天水、宜川、新疆阿克苏等地盛产苹果,这几个地方产的苹果品质优良。农场采摘了许多苹果和香蕉,但由于机器人没有辨别程序和天气太热,将苹果和香蕉放在了一起,容易导致苹果和香蕉腐烂变质,导致农民经济收入降低,为了提高效益,机器人应该如何判断水果是否腐烂,于是通过计算机视觉和机器学习设计了一套程序用于判断水果是否腐烂。
(二)机器学习设计案例:从网站中下载相关的数据集,对数据集进行整理,在python的环境中,给数据集中的文件打上标签,对数据进行预处理,利用keras--gpu和tensorflow,通过构建输入层,隐藏层,输出层建立训练模型,导入图片测试模型。
参考来源:机器学习-周志华.关于水果如何分类
数据集来源:kaggle,网址:https://www.kaggle.com/
(三)机器学习的实现步骤:
1.下载数据集
2.导入需要用到的库
import numpy as np
import pandas as pd
import os
import numpy as np
import matplotlib.pyplot as plt
import os
import shutil
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout
from tensorflow.keras.models import Sequential, Model
TRAIN_PATH = './chenhuangyu/data/dataset/train/'#设置锻炼数据所在的路径
TEST_PATH = './chenhuangyu/data/dataset/test/' #设置测试数据所在的路径
SIZE = (240, 240)
labels = []
for class_ in os.listdir(TRAIN_PATH):
labels.append(class_)
NUM_LABELS = len(labels)
print(labels)
4.得到每个文件中图片的数量并进行图像预处理
from keras.preprocessing.image import ImageDataGenerator
datagen=ImageDataGenerator(rescale=1./255, validation_split = 0.1)
test_datagen = ImageDataGenerator(rescale=1./255)
train_dataset = datagen.flow_from_directory(batch_size=32,
directory=TRAIN_PATH,
shuffle=True,
classes=labels,
target_size=SIZE,
subset="training",
class_mode='categorical')
val_dataset = datagen.flow_from_directory(batch_size=32,
directory=TRAIN_PATH,
shuffle=True,
classes=labels,
target_size=SIZE,
subset="validation",
class_mode='categorical')
‘’‘
-- 图像数据生成器将调整所有图像的大小为target_size
-- x_col是图像名称所在的列
-- y_col是标签所在的列
-- has_ext表示图像的名称包含文件扩展名,例如image_name.jpg
-- 在这里您可以更改targe_size将所有图像调整为不同的形状。
’‘’
5.导入训练所需要的图片,查看经过处理的图片以及它的标签
fig, ax = plt.subplots(nrows=3, ncols=4, figsize=(15, 12))
idx = 0
for i in range(3):
for j in range(4):
label = labels[np.argmax(train_dataset[0][1][idx])]
ax[i, j].set_title(f"{label}")
ax[i, j].imshow(train_dataset[0][0][idx][:, :, :])
ax[i, j].axis("off")
idx += 1
# plt.tight_layout()
plt.suptitle("Sample Training Images", fontsize=21)
plt.show()
8.构建神经网络并对模型进行训练
model = tf.keras.Sequential() #构建神经网络
# 1.Conv2D层,32个过滤器
model.add(tf.keras.layers.Conv2D(32, kernel_size=(3,3), activation='relu', input_shape = (240,240,3)))
model.add(tf.keras.layers.MaxPool2D((2,2)))
model.add(tf.keras.layers.Dropout(0.2))
# 2.Conv2D层,64个过滤器
model.add(tf.keras.layers.Conv2D(64, kernel_size=(3,3), activation='relu'))
model.add(tf.keras.layers.MaxPool2D((2,2)))
model.add(tf.keras.layers.Dropout(0.2))
#3.Conv2D层,128个过滤器
model.add(tf.keras.layers.Conv2D(128, kernel_size=(3,3), activation='relu'))
model.add(tf.keras.layers.MaxPool2D((2,2)))
model.add(tf.keras.layers.Dropout(0.2))
#将输入层的数据压缩
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(6, activation='softmax'))
# 模型编译
model.compile(optimizer='adam', loss = 'categorical_crossentropy', metrics=['accuracy'])
9.总结模型
model.summary()
#设置训练次数为12次
history = model.fit_generator(generator=train_dataset, steps_per_epoch=len(train_dataset), epochs=12, validation_data=val_dataset, validation_steps=len(val_dataset))
#保存模型
9.绘制损失曲线和精度曲线图
import matplotlib.pyplot as plt
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(['Acc','Val'], loc = 'lower right')
plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model Loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend(['loss','Val'], loc = 'upper right')
test_datagen = ImageDataGenerator(rescale=1./255) test_generator = test_datagen.flow_from_directory(TEST_PATH, batch_size=1, target_size=SIZE, shuffle = False, classes=labels, class_mode='categorical')
10.测试模型 filenames = test_generator.filenames nb_samples = len(filenames) loss, acc = model.evaluate(test_generator,steps = (nb_samples), verbose=1) print('accuracy test: ',acc) print('loss test: ',loss)
predictions = model.predict(test_generator)
files=test_generator.filenames class_dict=test_generator.class_indices # a dictionary of the form class name: class index rev_dict={} for key, value in class_dict.items(): rev_dict[value]=key for i, p in enumerate(predictions): index=np.argmax(p) klass=rev_dict[index] prob=p[index]
11.显示图片
fig, ax = plt.subplots(nrows=3, ncols=4, figsize=(12, 10))
idx = 0
for i in range(3):
for j in range(4):
predicted_label = labels[np.argmax(predictions[idx])]
ax[i, j].set_title(f"{predicted_label}")
ax[i, j].imshow(test_generator[idx][0].reshape(240,240,3))
ax[i, j].axis("off")
idx += 200
# plt.tight_layout()
plt.suptitle("Test Dataset Predictions", fontsize=20)
plt.show()
#预测比较准确
四)收获:本次的程序设计主要内容是机器学习,通过本次课程设计,是我对机器学习的理解更加深刻,同时,使我掌握了机器学习的步骤 1.提出问题2.理解数据,3.导入数据,4.查看数据,5.数据清洗(数据预处理:缺失值处理、重复值处理、数据类型的转换、字符串数据的规整),特征提取(特征工程.),特征选择,6.构建模型 ,7.选择算法(逻辑回归(logisic regression)随机森林(Random Forests Model)支持向量机(Support Vector Machines)Gradient Boosting ClassifierK-nearest neighbors) 8.评论模型,9撰写报告
总结:这次试验的缺陷 ,识别的水果种类比较少,可以适当的改进升级提高识别的种类,训练次数较少和精度不够高.
改进:可以进行多次训练,提高次数
#全代码
import numpy as np
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import shutil
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout
from tensorflow.keras.models import Sequential, Model
TRAIN_PATH = './chenhuangyu/data/dataset/train/'
TEST_PATH = './chenhuangyu/data/dataset/test/'
SIZE = (240, 240)
labels = []
for class_ in os.listdir(TRAIN_PATH):
labels.append(class_)
NUM_LABELS = len(labels)
print(labels)
from keras.preprocessing.image import ImageDataGenerator
datagen=ImageDataGenerator(rescale=1./255, validation_split = 0.1)
test_datagen = ImageDataGenerator(rescale=1./255)
train_dataset = datagen.flow_from_directory(batch_size=32,
directory=TRAIN_PATH,
shuffle=True,
classes=labels,
target_size=SIZE,
subset="training",
class_mode='categorical')
val_dataset = datagen.flow_from_directory(batch_size=32,
directory=TRAIN_PATH,
shuffle=True,
classes=labels,
target_size=SIZE,
subset="validation",
class_mode='categorical')
fig, ax = plt.subplots(nrows=3, ncols=4, figsize=(15, 12))
idx = 0
for i in range(3):
for j in range(4):
label = labels[np.argmax(train_dataset[0][1][idx])]
ax[i, j].set_title(f"{label}")
ax[i, j].imshow(train_dataset[0][0][idx][:, :, :])
ax[i, j].axis("off")
idx += 1
plt.suptitle("Sample Training Images", fontsize=21)
plt.show()
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=(3,3), activation='relu', input_shape = (240,240,3)))
model.add(tf.keras.layers.MaxPool2D((2,2)))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Conv2D(64, kernel_size=(3,3), activation='relu'))
model.add(tf.keras.layers.MaxPool2D((2,2)))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Conv2D(128, kernel_size=(3,3), activation='relu'))
model.add(tf.keras.layers.MaxPool2D((2,2)))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(6, activation='softmax'))
model.compile(optimizer='adam', loss = 'categorical_crossentropy', metrics=['accuracy'])
model.summary()
history = model.fit_generator(generator=train_dataset, steps_per_epoch=len(train_dataset), epochs=12, validation_data=val_dataset, validation_steps=len(val_dataset))
import matplotlib.pyplot as plt
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(['Acc','Val'], loc = 'lower right')
learning_rate = history.history['lr']
plt.plot(history.history['lr'])
plt.title('Learning Rate')
plt.xlabel('Epochs')
plt.ylabel('Learning Rate')
plt.legend(['loss','Val'], loc = 'upper right')
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(TEST_PATH,
batch_size=1,
target_size=SIZE,
shuffle = False,
classes=labels,
class_mode='categorical')
filenames = test_generator.filenames
nb_samples = len(filenames)
loss, acc = model.evaluate(test_generator,steps = (nb_samples), verbose=1)
print('accuracy test: ',acc)
print('loss test: ',loss)
predictions = model.predict(test_generator)
files=test_generator.filenames
class_dict=test_generator.class_indices # a dictionary of the form class name: class index
rev_dict={}
for key, value in class_dict.items():
rev_dict[value]=key
for i, p in enumerate(predictions):
index=np.argmax(p)
klass=rev_dict[index]
prob=p[index]
fig, ax = plt.subplots(nrows=3, ncols=4, figsize=(12, 10))
idx = 0
for i in range(3):
for j in range(4):
predicted_label = labels[np.argmax(predictions[idx])]
ax[i, j].set_title(f"{predicted_label}")
ax[i, j].imshow(test_generator[idx][0].reshape(240,240,3))
ax[i, j].axis("off")
idx += 200
# plt.tight_layout()
plt.suptitle("Test Dataset Predictions", fontsize=20)
plt.show()
标签:layers,水果,plt,keras,--,腐烂,add,tf,model From: https://www.cnblogs.com/chenhuangyu/p/17442038.html