61.
he_init = slim.variance_scaling_initializer()
xavier_init = slim.xavier_initializer()
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=he_init,
weights_regularizer=slim.l2_regularizer(0.0005)):
with slim.arg_scope([slim.conv2d], stride=1, padding='SAME'):
net = slim.conv2d(inputs, 64, [11, 11], 4, scope='conv1')
net = slim.conv2d(net, 256, [5, 5],
weights_initializer=xavier_init,
scope='conv2')
net = slim.fully_connected(net, 1000,
activation_fn=None, scope='fc')
62.
he_init = slim.variance_scaling_initializer()
xavier_init = slim.xavier_initializer()
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=he_init,
weights_regularizer=slim.l2_regularizer(0.0005)):
with slim.arg_scope([slim.conv2d], stride=1, padding='SAME'):
net = slim.conv2d(inputs, 64, [11, 11], 4, scope='conv1')
net = slim.conv2d(net, 256, [5, 5],
weights_initializer=xavier_init,
scope='conv2')
net = slim.fully_connected(net, 1000,
activation_fn=None, scope='fc')
[..]
63.
he_init = slim.variance_scaling_initializer()
xavier_init = slim.xavier_initializer()
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=he_init,
weights_regularizer=slim.l2_regularizer(0.0005)):
with slim.arg_scope([slim.conv2d], stride=1, padding='SAME'):
net = slim.conv2d(inputs, 64, [11, 11], 4, scope='conv1')
net = slim.conv2d(net, 256, [5, 5],
weights_initializer=xavier_init,
scope='conv2')
net = slim.fully_connected(net, 1000,
activation_fn=None, scope='fc')
[..]
64.
# Define the loss functions and get the total loss.
loss1 = slim.losses.softmax_cross_entropy(pred1, label1)
loss2 = slim.losses.mean_squared_error(pred2, label2)
# The following two lines have the same effect:
total_loss = loss1 + loss2
slim.losses.get_total_loss(add_regularization_losses=False)
# If you want to add regularization loss
reg_loss = tf.add_n(slim.losses.get_regularization_losses())
total_loss = loss1 + loss2 + reg_loss
# or
total_loss = slim.losses.get_total_loss()
82. import tensorflow as tf
slim = tf.contrib.slim
# Import MINST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
max_steps = 10000
batch_size = 128
lr = 0.001
keep_prob = 0.5
weight_decay = 0.0004
logs_path = "/tmp/tensorflow_logs/example"
def my_arg_scope(is_training, weight_decay):
with slim.arg_scope([slim.conv2d],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
biases_initializer=tf.zeros_initializer,
stride=1, padding="SAME"):
with slim.arg_scope([slim.dropout],
is_training=is_training) as arg_sc:
return arg_sc
83. def my_net(x, keep_prob, outputs_collections="my_net"):
x = tf.reshape(x, shape=[-1, 28, 28, 1])
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
outputs_collections=outputs_collections):
net = slim.conv2d(x, 64, [3, 3], scope="conv1")
net = slim.max_pool2d(net, [2, 2], scope="pool1")
net = slim.conv2d(net, 128, [3, 3], scope="conv2")
net = slim.max_pool2d(net, [2, 2], scope="pool2")
net = slim.conv2d(net, 256, [3, 3], scope="conv3")
# global average pooling
net = tf.reduce_mean(net, [1, 2], name="pool3", keep_dims=True)
net = slim.dropout(net, keep_prob, scope="dropout3")
net = slim.conv2d(net, 1024, [1, 1], scope="fc4")
net = slim.dropout(net, keep_prob, scope="dropout4")
net = slim.conv2d(net, 10, [1, 1],
activation_fn=None, scope="fc5")
end_points =
slim.utils.convert_collection_to_dict(outputs_collections)
return tf.reshape(net, [-1, 10]), end_points
84. def my_net(x, keep_prob, outputs_collections="my_net"):
x = tf.reshape(x, shape=[-1, 28, 28, 1])
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
outputs_collections=outputs_collections):
net = slim.conv2d(x, 64, [3, 3], scope="conv1")
net = slim.max_pool2d(net, [2, 2], scope="pool1")
net = slim.conv2d(net, 128, [3, 3], scope="conv2")
net = slim.max_pool2d(net, [2, 2], scope="pool2")
net = slim.conv2d(net, 256, [3, 3], scope="conv3")
# global average pooling
net = tf.reduce_mean(net, [1, 2], name="pool3", keep_dims=True)
net = slim.dropout(net, keep_prob, scope="dropout3")
net = slim.conv2d(net, 1024, [1, 1], scope="fc4")
net = slim.dropout(net, keep_prob, scope="dropout4")
net = slim.conv2d(net, 10, [1, 1],
activation_fn=None, scope="fc5")
end_points =
slim.utils.convert_collection_to_dict(outputs_collections)
return tf.reshape(net, [-1, 10]), end_points
85. x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
is_training = tf.placeholder(tf.bool)
with slim.arg_scope(my_arg_scope(is_training, weight_decay)):
net, end_pts = my_net(x, keep_prob)
pred = slim.softmax(net, scope="prediction")
with tf.variable_scope("losses"):
cls_loss = slim.losses.softmax_cross_entropy(net, y)
reg_loss = tf.add_n(slim.losses.get_regularization_losses())
loss_op = cls_loss + reg_loss
with tf.variable_scope("Adam"):
opt = tf.train.AdamOptimizer(lr)
# Op to calculate every variable gradient
grads = tf.gradients(loss_op, tf.trainable_variables())
grads = list(zip(grads, tf.trainable_variables()))
# Op to update all variables according to their gradient
apply_grads = opt.apply_gradients(grads_and_vars=grads)
with tf.variable_scope("accuracy"):
correct_op = tf.equal(tf.argmax(net, 1), tf.argmax(y, 1))
acc_op = tf.reduce_mean(tf.cast(correct_op, tf.float32))
86. x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
is_training = tf.placeholder(tf.bool)
with slim.arg_scope(my_arg_scope(is_training, weight_decay)):
net, end_pts = my_net(x, keep_prob)
pred = slim.softmax(net, scope="prediction")
with tf.variable_scope("losses"):
cls_loss = slim.losses.softmax_cross_entropy(net, y)
reg_loss = tf.add_n(slim.losses.get_regularization_losses())
loss_op = cls_loss + reg_loss
with tf.variable_scope("Adam"):
opt = tf.train.AdamOptimizer(lr)
# Op to calculate every variable gradient
grads = tf.gradients(loss_op, tf.trainable_variables())
grads = list(zip(grads, tf.trainable_variables()))
# Op to update all variables according to their gradient
apply_grads = opt.apply_gradients(grads_and_vars=grads)
with tf.variable_scope("accuracy"):
correct_op = tf.equal(tf.argmax(net, 1), tf.argmax(y, 1))
acc_op = tf.reduce_mean(tf.cast(correct_op, tf.float32))
87. # Create a summary to monitor loss and accuracy
summ_loss = tf.summary.scalar("loss", loss_op)
summ_acc = tf.summary.scalar("accuracy_test", acc_op)
# Create summaries to visualize weights and grads
for var in tf.trainable_variables():
tf.summary.histogram(var.name, var, collections=["my_summ"])
for grad, var in grads:
tf.summary.histogram(var.name + "/gradient", grad,
collections=["my_summ"])
summ_wg = tf.summary.merge_all(key="my_summ")
sess = tf.Session()
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter(logs_path,
graph=sess.graph)