首页 > 其他分享 >tensorflow搭建神经网络写法

tensorflow搭建神经网络写法

时间:2023-03-13 10:11:24浏览次数:52  
标签:compat sess data v1 神经网络 tf tensorflow 写法

import tensorflow as tf
import numpy as np

#creat data
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data*0.1+0.3

"""create tensorflow structure start"""
Weights = tf.Variable(tf.random.uniform([1],-1.0,1.0))
biases = tf.Variable(tf.zero([1]))

y = Weights*x_data + biases
loss = tf.reduce_mean(tf.square(y-y_data))

#build optimizer, 0.5 represents learning rate
optimizer = tf.compat.v1.train.GradientDescentOpyimizer(0.5)
tf.compat.v1.disable_eager_executionO()
train = optimizer.minimize(loss,var_list=(Weights,biases))

#initialization
init = tf.compat.v1.global_variables_initializer()
"""create tensorflow structure end"""

#activation会话两种写法
#method 1
sess = tf.compat.v1.Session()
sess.run(init)


#method2
matrix1 = tf.constant([[3,3]])
matrix2 = tf.constant([[2],[2]])

product = tf.matcul(matrix1,matrix2) # matrix multiply np.dot(m1,m2)

with tf.compat.v1.Session() as sess:
    result2 = sess.run(product)
    print(reault2)
    
[[12]]
"""Variable"""

import tensorflow as tf

state = tf.Variable(0,name='counter')
#print(state.name)
one = tf.constant(1)

new_value = tf.add(state,one)
update = tf.compat.v1.assign(state,new_value)

init = tf.compat.v1.global_variables_initializer()#must use if define variable

with tf.compat.v1.Session() as sess:
    sess.run(init)
    for _ in range(3):
        sess.run(update)
        print(sess.run(state))
        
1
2
3

 

"""placeholder是Tensorflow中的占位符,暂时存储变量"""
"""如果想要从外部传入date,就需要tf.placeholder(),然后用sess.run(paraA,feed_dict={})的形式传递和数据"""

import tensorflow.compat.v1 as tf
import tensorflow

input1 = tf.placeholder(tf.float32)
input2 = tf.placeholder(tf,float32)

output = tensorflow.multiply(input1,input2)

 with tf.Session() as sess:
        print(sess.run(output,feed_dict={input1:[7],input2:[2.]}))
        
[14.]
        
"""添加层def add_layer()"""

import tensorflow.compat.v1 as tf
import tensorflow as tf

def add_layer(inputs,in_size,out_size,activation_function=None):
    Weights = tf.Variable(tf.random.normal([in_size,out_size]))
    biases = tf.Variable(tf.zeros([1,out_size])+0.1)
    Wx_plus_b = tf.matmul(inputs,Weights)+biases # y = W*x + b
    if activation_function is None:
        outputs = Wx_plus_b
    else:
        outputs = activation_function(Wx_plus_b)
    return outputs

    
import tensorflow.compat.v1 as tf
# import tensorflow as tf
tf.compat.v1.disable_eager_execution()
import numpy as np

def add_layer(inputs,in_size,out_size,activation_function=None):
    Weights = tf.Variable(tf.random.normal([in_size,out_size]))
    biases = tf.Variable(tf.zeros([1,out_size])+0.1)
    Wx_plus_b = tf.matmul(inputs,Weights)+biases # y=wx+b
    if activation_function == None:
        outputs = Wx_plus_b
    else:
        outputs = activation_function(Wx_plus_b)
    return outputs


x_data = np.linspace(-1,1,300,dtype=float32)[:,np.newaxis]
noise = np.random.normal(0,0.05,x_data.shape)
y_data = np.square(x_data) - 0.5 + noise

xs = tf.placeholder(tf.float32,[None,1])
ys = tf.placeholder(tf.float32,[None,1])
l1 = add_layer(x_data,1,10,activation_function=tf.nn.relu)
prediction = add_layer(l1,10,1,activation_function=None)

loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction)))

train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

iniy = tf.global_variable_initializer()
sess = tf.Session()
sess.run(init)

for i in range(100):
    sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
    if  i % 50 ==0:
        print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))

https://zhuanlan.zhihu.com/p/373664997

标签:compat,sess,data,v1,神经网络,tf,tensorflow,写法
From: https://www.cnblogs.com/juneyiiii/p/17210411.html

相关文章