首页 > 其他分享 >tensorflow中slim详解

tensorflow中slim详解

时间:2023-02-06 18:07:04浏览次数:45  
标签:loss slim 详解 scope tf tensorflow net conv2d


1.变量的定义

 

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf

slim = tf.contrib.slim


#模型变量
weights = slim.model_variable('weights',
shape=[1, 1, 3 , 3],
initializer=tf.truncated_normal_initializer(stddev=0.1),
regularizer=slim.l2_regularizer(0.05),
device='/GPU:0')

# 局部变量
my_var = slim.variable('my_var',
shape=[10, 1],
initializer=tf.zeros_initializer())

#get_variables 返回所有的变量
regular_variables_and_model_variables = slim.get_variables()

print()


with tf.Session() as sess:

sess.run(tf.global_variables_initializer())
print(sess.run(weights))
print(sess.run(my_var))
print(sess.run(regular_variables_and_model_variables))

#而模型变量会再save的时候保存下来。 诸如global_step之类的就是局部变量。
#slim中可以写明变量存放的设备,正则和初始化规则。还有获取变量的函数也需要注意一下,get_variables是返回所有的变量。

2.卷积的操作

  2.1传统的卷积

#传统的卷积
input = ...
with tf.name_scope('conv1_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(input, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope)

 2.2slim的卷积

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf

slim = tf.contrib.slim

#卷积的实现




#slim实现
net = slim.conv2d(input, 128, [3, 3], scope='conv1_1')
'''
底层代码
@staticmethod
def conv2d(features, weight):
"""Produces a convolutional layer that filters an image subregion

:param features: The layer input.
:param weight: The size of the layer filter.
:return: Returns a convolutional layer.
"""
return tf.nn.conv2d(features, weight, strides=[1, 1, 1, 1], padding='SAME')
'''

2.3slim定义相同层

 

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf

slim = tf.contrib.slim
'''
repeat操作 减少代码量

stack是处理卷积核或者输出不一样的情况

'''

#1.

'''
假设定义三个相同的卷积层:
input = slim.conv2d(input, 256, [3, 3], scope='conv3_1')
input = slim.conv2d(input, 256, [3, 3], scope='conv3_2')
input = slim.conv2d(input, 256, [3, 3], scope='conv3_3')
input = slim.max_pool2d(input, [2, 2], scope='pool2')

'''
#在slim中的repeat操作可以减少代码量:
net = slim.repeat(input, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(input, [2, 2], scope='pool2')



#2.
'''
假设定义三层FC:
# Verbose way:
x = slim.fully_connected(x, 32, scope='fc/fc_1')
x = slim.fully_connected(x, 64, scope='fc/fc_2')
x = slim.fully_connected(x, 128, scope='fc/fc_3')

'''
#使用stack操作:
slim.stack(x, slim.fully_connected, [32, 64, 128], scope='fc')


#3.
# 普通方法:
x = slim.conv2d(x, 32, [3, 3], scope='core/core_1')
x = slim.conv2d(x, 32, [1, 1], scope='core/core_2')
x = slim.conv2d(x, 64, [3, 3], scope='core/core_3')
x = slim.conv2d(x, 64, [1, 1], scope='core/core_4')

# 简便方法:
slim.stack(x, slim.conv2d, [(32, [3, 3]), (32, [1, 1]), (64, [3, 3]), (64, [1, 1])], scope='core')

2.4slim中的argscope

    如果你的网络有大量相同的参数,如下:

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf

slim = tf.contrib.slim

'''
argscope 定义参数 ,不重复的定义

'''


net = slim.conv2d(inputs, 64, [11, 11], 4, padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(0.0005), scope='conv1')
net = slim.conv2d(net, 128, [11, 11], padding='VALID',
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(0.0005), scope='conv2')
net = slim.conv2d(net, 256, [11, 11], padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(0.0005), scope='conv3')

#arg_scope操作 用scope提取相同的特征
with slim.arg_scope([slim.conv2d], padding='SAME',
weights_initializer=tf.truncated_normal_initializer(stddev=0.01)
weights_regularizer=slim.l2_regularizer(0.0005)):
net = slim.conv2d(inputs, 64, [11, 11], scope='conv1')
net = slim.conv2d(net, 128, [11, 11], padding='VALID', scope='conv2')
net = slim.conv2d(net, 256, [11, 11], scope='conv3')

#嵌套的使用 和多层的定义
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
weights_regularizer=slim.l2_regularizer(0.0005)):
with slim.arg_scope([slim.conv2d], stride=1, padding='SAME'):
net = slim.conv2d(inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
net = slim.conv2d(net, 256, [5, 5],
weights_initializer=tf.truncated_normal_initializer(stddev=0.03),
scope='conv2')
net = slim.fully_connected(net, 1000, activation_fn=None, scope='fc')



#定义VGG网络
def vgg16(inputs):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.0005)):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1') # 定义两个conv 卷积核为3*2
net = slim.max_pool2d(net, [2, 2], scope='pool1') #定义池化层 2*2
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
net = slim.fully_connected(net, 4096, scope='fc6') #全连接网络
net = slim.dropout(net, 0.5, scope='dropout6')
net = slim.fully_connected(net, 4096, scope='fc7')
net = slim.dropout(net, 0.5, scope='dropout7') #dropout
net = slim.fully_connected(net, 1000, activation_fn=None, scope='fc8')
return net

3. slim封装网络

from tensorflow.contrib.slim.python.slim.nets import alexnet
from tensorflow.contrib.slim.python.slim.nets import inception
from tensorflow.contrib.slim.python.slim.nets import overfeat
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.contrib.slim.python.slim.nets import resnet_v1
from tensorflow.contrib.slim.python.slim.nets import resnet_v2
from tensorflow.contrib.slim.python.slim.nets import vgg
from tensorflow.python.util.all_util import make_all

import tensorflow as tf

vgg = tf.contrib.slim.nets.vgg

# Load the images and labels.
images, labels = ...

# Create the model.
predictions, _ = vgg.vgg_16(images)

# Define the loss functions and get the total loss.
loss = slim.losses.softmax_cross_entropy(predictions, labels)

4.loss

 

损失函数定义了我们想要最小化的数量。 对于分类问题,这通常是跨分类的真实分布和预测概率分布之间的交叉熵。 对于回归问题,这通常是预测值和真值之间的平方和差异。

某些模型(如多任务学习模型)需要同时使用多个损失函数。 换句话说,最终被最小化的损失函数是各种其他损失函数的总和。 例如,考虑预测图像中的场景类型以及每个像素的相机深度的模型。 这个模型的损失函数将是分类损失和深度预测损失的总和。
 

TF-Slim提供了一个易于使用的机制,通过损失模块定义和跟踪损失功能。 考虑一下我们想要训练VGG网络的简单情况:

import tensorflow as tf
vgg = tf.contrib.slim.nets.vgg

# Load the images and labels.
images, labels = ...

# Create the model.
predictions, _ = vgg.vgg_16(images)

# Define the loss functions and get the total loss.
loss = slim.losses.softmax_cross_entropy(predictions, labels)

在这个例子中,我们首先创建模型(使用TF-Slim的VGG实现),并添加标准分类损失。 现在,让我们假设有一个多任务模型,产生多个输出的情况:

# Load the images and labels.
images, scene_labels, depth_labels = ...

# Create the model.
scene_predictions, depth_predictions = CreateMultiTaskModel(images)

# Define the loss functions and get the total loss.
classification_loss = slim.losses.softmax_cross_entropy(scene_predictions, scene_labels)
sum_of_squares_loss = slim.losses.sum_of_squares(depth_predictions, depth_labels)

# The following two lines have the same effect:
total_loss = classification_loss + sum_of_squares_loss
total_loss = slim.losses.get_total_loss(add_regularization_losses=False)

在这个例子中,我们有两个损失,我们通过调用slim.losses.softmax_cross_entropy和slim.losses.sum_of_squares来添加。 我们可以通过将它们相加(total_loss)或调用slim.losses.get_total_loss()来获得全部损失。 这是如何工作的? 当您通过TF-Slim创建loss function时,TF-Slim将损失添加到损失函数中特殊的TensorFlow集合中。 这使您可以手动管理全部损失,或允许TF-Slim为您管理它们。

如果你想让TF-Slim管理你的损失,通过一个自定义的损失函数呢? loss_ops.py也有一个功能,把这个损失添加到TF-Slims集合中。 例如:

# Load the images and labels.
images, scene_labels, depth_labels, pose_labels = ...

# Create the model.
scene_predictions, depth_predictions, pose_predictions = CreateMultiTaskModel(images)

# Define the loss functions and get the total loss.
classification_loss = slim.losses.softmax_cross_entropy(scene_predictions, scene_labels)
sum_of_squares_loss = slim.losses.sum_of_squares(depth_predictions, depth_labels)
pose_loss = MyCustomLossFunction(pose_predictions, pose_labels)
slim.losses.add_loss(pose_loss) # Letting TF-Slim know about the additional loss.

# The following two ways to compute the total loss are equivalent:
regularization_loss = tf.add_n(slim.losses.get_regularization_losses())
total_loss1 = classification_loss + sum_of_squares_loss + pose_loss + regularization_loss

# (Regularization Loss is included in the total loss by default).
total_loss2 = slim.losses.get_total_loss()

在这个例子中,我们可以再次手动产生总损失函数,或者让TF-Slim知道额外的损失,让TF-Slim处理损失。

5.保存读取模型

通过以下功能我们可以载入模型的部分变量:

# Create some variables.
v1 = slim.variable(name="v1", ...)
v2 = slim.variable(name="nested/v2", ...)
...

# Get list of variables to restore (which contains only 'v2').
variables_to_restore = slim.get_variables_by_name("v2")

# Create the saver which will be used to restore the variables.
restorer = tf.train.Saver(variables_to_restore)

with tf.Session() as sess:
# Restore variables from disk.
restorer.restore(sess, "/tmp/model.ckpt")
print("Model restored.")

除了这种部分变量加载的方法外,我们甚至还能加载到不同名字的变量中。

假设我们定义的网络变量是conv1/weights,而从VGG加载的变量名为vgg16/conv1/weights,正常load肯定会报错(找不到变量名),但是可以这样:

def name_in_checkpoint(var):
return 'vgg16/' + var.op.name

variables_to_restore = slim.get_model_variables()
variables_to_restore = {name_in_checkpoint(var):var for var in variables_to_restore}
restorer = tf.train.Saver(variables_to_restore)

with tf.Session() as sess:
# Restore variables from disk.
restorer.restore(sess, "/tmp/model.ckpt")

通过这种方式我们可以加载不同变量名的变量!!

标签:loss,slim,详解,scope,tf,tensorflow,net,conv2d
From: https://blog.51cto.com/u_15955675/6040024

相关文章

  • python3中zip详解
    描述zip()函数用于将可迭代的对象作为参数,将对象中对应的元素打包成一个个元组,然后返回由这些元组组成的列表。如果各个迭代器的元素个数不一致,则返回列表长度与最短的对象......
  • TensorFlow图像处理函数
    1.读取图片importmatplotlib.pyplotaspltimporttensorflowastfimportnumpyasnpimage_raw_data=tf.gfile.FastGFile('./datasets/cat.png','rb').read()withtf.......
  • 一文详解TensorFlow模型迁移及模型训练实操步骤
    摘要:本文介绍将TensorFlow网络模型迁移到昇腾AI平台,并执行训练的全流程。然后以TensorFlow1.15训练脚本为例,详细介绍了自动迁移、手工迁移以及模型训练的操作步骤。本文分......
  • 一文详解TensorFlow模型迁移及模型训练实操步骤
    摘要:本文介绍将TensorFlow网络模型迁移到昇腾AI平台,并执行训练的全流程。然后以TensorFlow1.15训练脚本为例,详细介绍了自动迁移、手工迁移以及模型训练的操作步骤。本文......
  • JVM垃圾回收机制,万字详解
    JVM垃圾回收机制jvm的基本组成虚拟机的组成所谓java能实现跨平台,是因为在不同平台上运行不同的虚拟机决定的,因此java文件的执行不直接在操作系统上执行,而是通过jvm虚拟机执......
  • 自编码器 Tensorflow 实战 MNIST 数据集
    1、生成模型1.1、什么是生成模型概率统计层面:能够在给丁某一些隐含参数的条件下,随机生成观测数据的这样一种模型,称之为“生成模型”。它给观测值和比周数据系列制定一个连......
  • Redis详解
    Redis配置ymlspring:redis:host:82.157.248.243#host地址port:6379#地址端口号password:#密码database:......
  • java注解与反射详解
    一、注解篇1.1、注解的基本概念注解:一种代码级别的说明,它是JDK1.5及以后版本引入的一个特性,与类、接口、枚举是在同一个层次;它可以声明在包、类、字段、方法、局部变量......
  • C++右值引用,移动语义与完美转发详解
    tags:C++Interview写在前面总结一下深入理解C++11这本书的第三章第三节,右值引用部分.文中全部代码可以参考我在GitHub上传的部分:​​Learn_C_Cpp/c++11-14/Depth_unde......
  • Node.JS模块化详解(Math加乘实现/模块外包围)
    视频math.js/* 定义一个模块math -在该模块中提供两个方法 add(a,b);//求两个数的和 mul(a,b);//求两个数的积*/module.exports.add=function(a......