使用TensorFlow构建BNN。我试图用-1和1互换来填充我的张量,如FBNA所示:完全二值化的神经网络加速器图3.下一个2D数组应具有相反的-1,1模式。我可以在嵌套的for循环中实现此目标,但这非常慢。
谁能找到更快的方法?
谢谢
答案 0 :(得分:0)
好的,我想出了一个解决方案,但是它可能不是很漂亮(我是TensorFlow的新手)。这需要大量的反复试验。此外,它仅支持通道长度= 1、3或任何偶数通道长度的4D张量。
如果有人可以做一个更漂亮/更通用的版本,请发表,谢谢。
def OddEvenPad(X):
#create 1D padding tile
unit_tile = tf.Variable([1., -1.], tf.float32, validate_shape=False)
unit_tile = tf.reshape(unit_tile, [2,1])
tf.global_variables_initializer().run()
#variables
pad_col_size = int(X.get_shape()[1])
pad_row_size = int(X.get_shape()[2] + 2)
num_channels = int(X.get_shape()[3])
num_batch = int(X.get_shape()[0])
#tile padding 1D
pad_col = tf.tile(tf.negative(unit_tile), [int(pad_col_size/2),1])
pad_row = tf.tile(unit_tile, [int(pad_row_size/2),1])
if num_channels%2==0:
#tile padding 2D
pad_col = tf.concat([pad_col, tf.negative(pad_col)], axis=1)
pad_row = tf.concat([pad_row, tf.negative(pad_row)], axis=1)
#tile padding 4D
pad_col = tf.reshape(pad_col, [1,pad_col_size,1,2])
pad_row = tf.reshape(pad_row, [1,1,pad_row_size,2])
#tile padding down two channels
pad_col = tf.tile(pad_col, [2,1,1,int(num_channels/2)])
pad_row = tf.tile(pad_row, [2,1,1,int(num_channels/2)])
elif num_channels==3:
#tile padding 2D
pad_col = tf.concat([pad_col, tf.negative(pad_col), pad_col], axis=1)
pad_row = tf.concat([pad_row, tf.negative(pad_row), pad_row], axis=1)
#tile padding 4D
pad_col = tf.reshape(pad_col, [1,pad_col_size,1,num_channels])
pad_row = tf.reshape(pad_row, [1,1,pad_row_size,num_channels])
#tile padding down two channels
pad_col = tf.concat([pad_col, tf.negative(pad_col)], axis=0)
pad_row = tf.concat([pad_row, tf.negative(pad_row)], axis=0)
elif num_channels==1:
#tile padding 4D
pad_col = tf.reshape(pad_col, [1,pad_col_size,1,num_channels])
pad_row = tf.reshape(pad_row, [1,1,pad_row_size,num_channels])
#tile padding down two channels
pad_col = tf.concat([pad_col, tf.negative(pad_col)], axis=0)
pad_row = tf.concat([pad_row, tf.negative(pad_row)], axis=0)
else:
print('This OddEvenPad function only supports channel lengths = 1, 3, 2*(any int)')
#tile down batch
pad_col = tf.tile(pad_col, [int(num_batch/2),1,1,1])
pad_row = tf.tile(pad_row, [int(num_batch/2),1,1,1])
#add column padding to tensor
padding_X = tf.concat([pad_col, X], axis=2)
padding_X = tf.concat([padding_X, tf.negative(pad_col)], axis=2)
#add row padding to tensor
padding_X = tf.concat([pad_row, padding_X], axis=1)
padded_X = tf.concat([padding_X, tf.negative(pad_row)], axis=1)
return padded_X