import tensorflow as tf import numpy as np def add_layer(inputs,in_size,out_size,activation_function=None): #inputs是输入的值,in_size输入值的大小,out_size输出值的大小 #activation_function=None 表示是一个线性函数 Weights = tf.Variable(tf.random_normal([in_size,out_size])) #tf.random_normal生成随机变量,矩阵 #如果表示的是一个矩阵的话,则一定一定大写 biases = tf.Variable(tf.zeros([1,out_size])+0.1) #类似列表的东西 [1,out_size]表示有一行和有out_size这么多列 因为初始值 不能为0,所以加上0.1 Wx_plus_b = tf.matmul(inputs,Weights) + biases #预测的值,没有被激活的值还是存储在Wx_plus_b上面 if activation_function is None: outputs = Wx_plus_b #如果activation_function is None,激活函数是线性关系,那就直接输出Wx_plus_b else: outputs = activation_function(Wx_plus_b) #不为None的时候,将Wx_plus_b的值传入 激活函数jiu'xing return outputs x_data = np.linspace(-1,1,300)[:,np.newaxis] noise = np.random.normal(0,0.05,x_data.shape) y_data = np.square(x_data)-0.5 + noise xs = tf.placeholder(tf.float32,[None,1]) ys = tf.placeholder(tf.float32,[None,1]) l1 = add_layer(xs,1,10,activation_function=tf.nn.relu) perdiction = add_layer(l1,10,1,activation_function=None) #已经定义好神经层 #进行预测 loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - perdiction),reduction_indices=[1])) #怎么提升误差 train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)#学习率小于1 #进行一个初始化 init = tf.initialize_all_variables() sess = tf.Session() sess.run(init) for i in range(1000):#训练1000次 sess.run(train_step,feed_dict={xs:x_data,ys:y_data}) if i%50: print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
标签:function,None,一个,activation,神经网络,构建,tf,data,size From: https://www.cnblogs.com/bokeyuanjj/p/16826242.html