多层感知器定义了人工神经网络最复杂的体系结构,多层感知器学习的示意图如下所示-
MLP网络通常用于监督学习格式,用于MLP网络的典型学习算法也称为反向传播算法。
现在,无涯教程将重点介绍针对图像分类问题的MLP实现。
# 导入 MINST 数据 from tensorflow.examples.Learnfk.mnist import input_data mnist=input_data.read_data_sets("/tmp/data/", one_hot=True) import tensorflow as tf import matplotlib.pyplot as plt # 参数 learning_rate=0.001 training_epochs=20 batch_size=100 display_step=1 # 网络参数 n_hidden_1=256 # 第一层 num 特征 n_hidden_2=256 # 第二层 num 特征 n_input=784 # MNIST 数据输入 (img shape: 28*28) n_classes=10 #MNIST 总类(0-9 位) # tf 图形输入 x=tf.placeholder("float", [None, n_input]) y=tf.placeholder("float", [None, n_classes]) #权重第 1 层 h=tf.Variable(tf.random_normal([n_input, n_hidden_1])) # bias layer 1 bias_layer_1=tf.Variable(tf.random_normal([n_hidden_1])) # layer 1 layer_1=tf.nn.sigmoid(tf.add(tf.matmul(x, h), bias_layer_1)) # 权重第 2 层 w=tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])) # 偏置层 2 bias_layer_2=tf.Variable(tf.random_normal([n_hidden_2])) # layer 2 layer_2=tf.nn.sigmoid(tf.add(tf.matmul(layer_1, w), bias_layer_2)) # 权重输出层 output=tf.Variable(tf.random_normal([n_hidden_2, n_classes])) # biar output layer bias_output=tf.Variable(tf.random_normal([n_classes])) # output layer output_layer=tf.matmul(layer_2, output) + bias_output # cost function cost=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=output_layer, labels=y)) #cost=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(output_layer, y)) # optimizer optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # optimizer=tf.train.GradientDescentOptimizer( learning_rate=learning_rate).minimize(cost) # Plot settings avg_set=[] epoch_set=[] # 初始化变量 init=tf.global_variables_initializer() # 启动图表 with tf.Session() as sess: sess.run(init) # 训练周期 for epoch in range(training_epochs): avg_cost=0. total_batch=int(mnist.train.num_examples/batch_size) # 循环遍历所有批次 for i in range(total_batch): batch_xs, batch_ys=mnist.train.next_batch(batch_size) # 使用批量数据进行拟合训练 sess.run(optimizer, feed_dict={ x: batch_xs, y: batch_ys}) # Compute average loss avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch # 显示每个纪元步骤的日志 if epoch % display_step == 0: print Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost) avg_set.append(avg_cost) epoch_set.append(epoch + 1) print "Training phase finished" plt.plot(epoch_set, avg_set, 'o', label='MLP Training phase') plt.ylabel('cost') plt.xlabel('epoch') plt.legend() plt.show() # Test model correct_prediction=tf.equal(tf.argmax(output_layer, 1), tf.argmax(y, 1)) # Calculate accuracy accuracy=tf.reduce_mean(tf.cast(correct_prediction, "float")) print "Model Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})
上面的代码行生成以下输出-
参考链接
https://www.learnfk.com/tensorflow/tensorflow-multi-layer-perceptron-learning.html
标签:layer,感知器,无涯,batch,epoch,cost,output,tf,TensorFlow From: https://blog.51cto.com/u_14033984/7161462