RNN定义如下:
def RNN(X, weights_rnn, biases,n_inputs,n_steps,n_hidden_units,batch_size=None):
# hidden layer for input to cell
########################################
X = tf.reshape(X, [-1, n_inputs])
# into hidden
# X_in = (128 batch * 28 steps, 128 hidden)
X_in = tf.matmul(X, weights_rnn['in']) + biases_rnn['in']
# X_in ==> (128 batch, 28 steps, 128 hidden)
X_in = tf.reshape(X_in, [-1, n_steps, n_hidden_units])
# cell
##########################################
# basic LSTM Cell.
# if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
# cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden_units, forget_bias=1.0, state_is_tuple=True)
# else:
# cell = tf.contrib.rnn.BasicLSTMCell(n_hidden_units)
cell = tf.contrib.rnn.MultiRNNCell([attn_cell() for _ in range(1)], state_is_tuple=True)
# lstm cell is divided into two parts (c_state, h_state)
init_state = cell.zero_state(batch_size, dtype=tf.float32)
# tf.nn.dynamic_rnn(cell, inputs).
# unpack to list [(batch, outputs)..] * steps
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
outputs = tf.unpack(tf.transpose(outputs, [1, 0, 2])) # states is the last outputs
else:
outputs = tf.unstack(tf.transpose(outputs, [1,0,2]))
results = tf.matmul(outputs[-1], weights_rnn['out']) + biases_rnn['out'] # shape = (128, 10)
return results
以下是我调用函数RNN
的方法。
x_rnn = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
n_inputs =52
n_steps = 10 # time steps
n_hidden_units = 100 # neurons in hidden layer
n_classes = 22
weights_rnn = {
'in': tf.Variable(tf.random_normal([n_inputs, n_hidden_units])),
'out': tf.Variable(tf.random_normal([n_hidden_units, n_classes]))
}
biases_rnn = {
'in': tf.Variable(tf.constant(0.1, shape=[n_hidden_units, ])),
'out': tf.Variable(tf.constant(0.1, shape=[n_classes, ]))
}
y_rnn = RNN(x_rnn, weights_rnn, biases_rnn,n_inputs,n_steps,n_hidden_units,batch_size=x_rnn.shape[0])
使用张量来初始化带有张量而不是标量的batch_size是错误。
line 430, in RNN
init_state = cell.zero_state(batch_size, dtype=tf.float32)
ValueError: Provided a prefix or suffix of None: ? and 100
任何人都有这个想法吗?
答案 0 :(得分:1)
我认为你需要获得x_rnn的动态形状而不是静态形状。您可以将x_rnn.shape[0]
替换为tf.shape(x_rnn)[0]