参考上面的解释,我写下了以下代码:
import os
import tensorflow as tf
import numpy as np
# hyper-params
learning_rate = 0.0002
epochs = 250
batch_size = 16
N_w = 11 #number of frames concatenated together
channels = 9*N_w
drop_out = [0.5, 0.5, 0.5, 0, 0, 0, 0, 0]
# input_tensor X
X = tf.placeholder(tf.float32, [batch_size, 256, 256, channels]) # batch_size x Height x Width x N_w
def conv_down(x, N, stride, count): #Conv [4x4, str_2] > Batch_Normalization > Leaky_ReLU
with tf.variable_scope("conv_down_{}_{}".format(N, count)) : #N == depth of tensor
with tf.variable_scope("conv_down_4x4_str{}".format(stride)) : #this's used for downsampling
x = tf.layers.conv2d(x, N, kernel_size=4, strides=stride, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
x = tf.contrib.layers.batch_norm(x)
x = tf.nn.relu(x) #change it into leaky_relu in version 1.8 : now in 1.1
return x
def conv_up(x, N, drop_rate, stride, count): #Conv_transpose [4x4, str_2] > Batch_Normalizaiton > DropOut > ReLU
with tf.variable_scope("conv_up_{}_{}".format(N, count)) : #N == depth of tensor
with tf.variable_scope("conv_up_4x4_str{}".format(stride)) :
x = tf.layers.conv2d_transpose(x, N, kernel_size=4, strides=stride, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
x = tf.contrib.layers.batch_norm(x)
if drop_rate is not 0:
x = tf.nn.dropout(x, keep_prob=drop_rate)
x = tf.nn.relu(x)
return x
def conv_refine1(x, N, drop_rate, count): #Conv [3x3, str_1] > Batch_Normalization > DropOut > ReLU
with tf.variable_scope("conv_refine_1_{}_{}".format(N, count)) :
with tf.variable_scope("conv_refine_3x3_str1") :
x = tf.layers.conv2d(x, N, kernel_size=3, strides=1, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
x = tf.contrib.layers.batch_norm(x)
if drop_rate is not 0:
x = tf.nn.dropout(x, keep_prob=drop_rate)
x = tf.nn.relu(x)
return x
def conv_refine2(x, N, drop_rate, count): #Conv [3x3, str_1] > Batch_Normalization > DropOut > ReLU
with tf.variable_scope("conv_refine_2_{}_{}".format(N, count)) :
with tf.variable_scope("conv_refine_3x3_str1") :
x = tf.layers.conv2d(x, N, kernel_size=3, strides=1, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
x = tf.contrib.layers.batch_norm(x)
if drop_rate is not 0:
x = tf.nn.dropout(x, keep_prob=drop_rate)
x = tf.nn.relu(x)
return x
def conv_upsample(x, N, drop_rate, stride, count):
with tf.variable_scope("conv_upsamp_{}_{}".format(N,count)) :
x = conv_up(x, 2*N, drop_rate, stride, count)
x = conv_refine1(x, N, drop_rate, count)
x = conv_refine2(x, N, drop_rate, count)
return x
def biLinearDown(x, N):
return tf.image.resize_images(x, [N, N])
def finalTanH(x):
return tf.nn.tanh(x)
def T(x):
#channel_output_structure
down_channel_output = [64, 128, 256, 512, 512, 512, 512, 512]
up_channel_output= [512, 512, 512, 512, 256, 128, 64, 3]
biLinearDown_output= [32, 64, 128] #for skip-connection
#down_sampling
conv1 = conv_down(x, down_channel_output[0], 2, 1)
conv2 = conv_down(conv1, down_channel_output[1], 2, 2)
conv3 = conv_down(conv2, down_channel_output[2], 2, 3)
conv4 = conv_down(conv3, down_channel_output[3], 1, 4)
conv5 = conv_down(conv4, down_channel_output[4], 1, 5)
conv6 = conv_down(conv5, down_channel_output[5], 1, 6)
conv7 = conv_down(conv6, down_channel_output[6], 1, 7)
conv8 = conv_down(conv7, down_channel_output[7], 1, 8)
#upsampling
dconv1 = conv_upsample(conv8, up_channel_output[0], drop_out[0], 1, 1)
dconv2 = conv_upsample(dconv1, up_channel_output[1], drop_out[1], 1, 2)
dconv3 = conv_upsample(dconv2, up_channel_output[2], drop_out[2], 1, 3)
dconv4 = conv_upsample(dconv3, up_channel_output[3], drop_out[3], 1, 4)
dconv5 = conv_upsample(dconv3, up_channel_output[4], drop_out[4], 1, 5)
dconv6 = conv_upsample(tf.concat([dconv5, biLinearDown(x, biLinearDown_output[0])], axis=3), up_channel_output[5], drop_out[5], 2, 6)
dconv7 = conv_upsample(tf.concat([dconv6, biLinearDown(x, biLinearDown_output[1])], axis=3), up_channel_output[6], drop_out[6], 2, 7)
dconv8 = conv_upsample(tf.concat([dconv7, biLinearDown(x, biLinearDown_output[2])], axis=3), up_channel_output[7], drop_out[7], 2, 8)
#final_tanh
T_x = finalTanH(dconv8)
return T_x
sheudo_np = np.random.uniform(low=-1., high=1., size=[16, 256,256, 11])
sheudo_input = tf.Variable(np.float32(sheudo_np))
T_x = T(sheudo_input)
最后一个变量T_x
的形状为(16, 256, 256, 3)
,因此我认为尺寸是可以的。我不仅要检查尺寸,还要检查网络结构是否编码正确?
是否有使用张量流进行深度学习的医生指南?
答案 0 :(得分:0)
是的-欢迎来到tensorflow的世界,那里的所有事物都被涂上了黑框,而您对它是否确实在按照自己的意愿去做却一无所知。我认为这始终是该平台的一大障碍,并且没有出色的解决方法。尽管如此,这里还提供了一些非详尽的选项列表,用于弄清网络中的情况。
最后,我想制作简单的测试脚本来仔细检查我所有的重塑/平铺/并置操作是否都精确地执行了 我希望它们执行的操作。
这意味着创建一个简单的脚本,例如:
A = tf.constant([1, 2], [3, 4]); B = tf.reshape(A, [-1]); sess = tf.Session(); print(sess.run(B));
调试愉快!