InvalidArgumentError:ConcatOp:输入的维度应匹配

时间:2018-04-21 02:27:45

标签: tensorflow rnn

使用dynamic_rnn时的Tensorflow 1.7。它首先运行正常,但是在第32步(当我运行代码时它会改变)步骤,出现错误。当我使用较小的批处理时,似乎代码可以运行更长时间,但错误仍然加快。只能找出错误。

    from mapping import *


def my_input_fn(features, targets, batch_size=20, shuffle=True, num_epochs=None, sequece_lenth=None):
    ds = tf.data.Dataset.from_tensor_slices(
        (features, targets, sequece_lenth))  # warning: 2GB limit
    ds = ds.batch(batch_size).repeat(num_epochs)

    if shuffle:
        ds = ds.shuffle(10000)
    features, labels, sequence = ds.make_one_shot_iterator().get_next()
    return features, labels, sequence


def lstm_cell(lstm_size=50):
    return tf.contrib.rnn.BasicLSTMCell(lstm_size)


class RnnModel:
    def __init__(self,
                 batch_size,
                 hidden_units,
                 time_steps,
                 num_features
                 ):
        self.batch_size = batch_size
        self.hidden_units = hidden_units
        stacked_lstm = tf.contrib.rnn.MultiRNNCell(
            [lstm_cell(i) for i in self.hidden_units])
        self.initial_state = stacked_lstm.zero_state(batch_size, tf.float32)
        self.model = stacked_lstm
        self.state = self.initial_state
        self.time_steps = time_steps
        self.num_features = num_features

    def loss_mean_squre(self, outputs, targets):
        pos = tf.add(outputs, tf.ones(self.batch_size))
        eve = tf.div(pos, 2)
        error = tf.subtract(eve,
                            targets)
        return tf.reduce_mean(tf.square(error))

    def train(self,
              num_steps,
              learningRate,
              input_fn,
              inputs,
              targets,
              sequenceLenth):

        periods = 10
        step_per_periods = int(num_steps / periods)

        input, target, sequence = input_fn(inputs, targets, self.batch_size, shuffle=True, sequece_lenth=sequenceLenth)

        initial_state = self.model.zero_state(self.batch_size, tf.float32)

        outputs, state = tf.nn.dynamic_rnn(self.model, input, initial_state=initial_state)  

        loss = self.loss_mean_squre(tf.reshape(outputs, [self.time_steps, self.batch_size])[-1], target)
        optimizer = tf.train.AdamOptimizer(learning_rate=learningRate)
        grads_and_vars = optimizer.compute_gradients(loss, self.model.variables)
        optimizer.apply_gradients(grads_and_vars)

        init_op = tf.global_variables_initializer()
        with tf.Session() as sess:

            for i in range(num_steps):
                sess.run(init_op)
                state2, current_loss= sess.run([state, loss])
                if i % step_per_periods == 0:
                    print("period " + str(int(i / step_per_periods)) + ":" + str(current_loss))
        return self.model, self.state


def processFeature(df):
    df = df.drop('class', 1)
    features = []

    for i in range(len(df["vecs"])):
        features.append(df["vecs"][i])

    aa = pd.Series(features).tolist()  # tramsform into list
    featuresList = []
    for i in features:
        p1 = []
        for k in i:
            p1.append(list(k))
        featuresList.append(p1)

    return featuresList


def processTargets(df):
    selected_features = df[
        "class"]
    processed_features = selected_features.copy()
    return tf.convert_to_tensor(processed_features.astype(float).tolist())


if __name__ == '__main__':
    dividNumber = 30
    """
    some code here to modify my data to input 

    it looks like this:
    inputs before use input function : [fullLenth, charactorLenth, embeddinglenth]
    """

    model = RnnModel(15, [100, 80, 80, 1], time_steps=dividNumber, num_features=25)
    model.train(5000, 0.0001, my_input_fn, training_examples, training_targets, sequenceLenth=trainSequenceL)

错误在这里

追踪(最近一次通话):       在_do_call中输入文件“D:\ Anaconda3 \ envs \ tensorflow-cpu \ lib \ site-packages \ tensorflow \ python \ client \ session.py”,第1330行         return fn(* args)       在_run_fn中输入文件“D:\ Anaconda3 \ envs \ tensorflow-cpu \ lib \ site-packages \ tensorflow \ python \ client \ session.py”,第1315行         options,feed_dict,fetch_list,target_list,run_metadata)       在_call_tf_sessionrun中输入文件“D:\ Anaconda3 \ envs \ tensorflow-cpu \ lib \ site-packages \ tensorflow \ python \ client \ session.py”,第1423行         status,run_metadata)       文件“D:\ Anaconda3 \ envs \ tensorflow -cpu \ lib \ site-packages \ tensorflow \ python \ framework \ errors_impl.py”,第516行,退出         c_api.TF_GetCode(self.status.status))     tensorflow.python.framework.errors_impl.InvalidArgumentError:ConcatOp:输入的维度应匹配:shape [0] = [20,25] vs. shape [1] = [30,100]          [[节点:rnn / while / rnn / multi_rnn_cell / cell_0 / basic_lstm_cell / concat = ConcatV2 [N = 2,T = DT_FLOAT,Tidx = DT_INT32,_device =“/ job:localhost / replica:0 / task:0 / device: CPU:0“](rnn / while / TensorArrayReadV3,rnn / while / Switch_4:1,rnn / while / rnn / multi_rnn_cell / cell_3 / basic_lstm_cell / Const)]]

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "D:/programming/mlwords/dnn_gragh.py", line 198, in <module>
    model.train(5000, 0.0001, my_input_fn, training_examples, training_targets, sequenceLenth=trainSequenceL)
  File "D:/programming/mlwords/dnn_gragh.py", line 124, in train
    state2, current_loss, nowAccuracy = sess.run([state, loss, accuracy])
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\client\session.py", line 908, in run
    run_metadata_ptr)
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\client\session.py", line 1143, in _run
    feed_dict_tensor, options, run_metadata)
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\client\session.py", line 1324, in _do_run
    run_metadata)
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\client\session.py", line 1343, in _do_call
    raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: ConcatOp : Dimensions of inputs should match: shape[0] = [20,25] vs. shape[1] = [30,100]
     [[Node: rnn/while/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/concat = ConcatV2[N=2, T=DT_FLOAT, Tidx=DT_INT32, _device="/job:localhost/replica:0/task:0/device:CPU:0"](rnn/while/TensorArrayReadV3, rnn/while/Switch_4:1, rnn/while/rnn/multi_rnn_cell/cell_3/basic_lstm_cell/Const)]]

Caused by op 'rnn/while/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/concat', defined at:
  File "D:/programming/mlwords/dnn_gragh.py", line 198, in <module>
    model.train(5000, 0.0001, my_input_fn, training_examples, training_targets, sequenceLenth=trainSequenceL)
  File "D:/programming/mlwords/dnn_gragh.py", line 95, in train
    outputs, state = tf.nn.dynamic_rnn(self.model, input, initial_state=initial_state)#,sequence_length=sequence
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\rnn.py", line 627, in dynamic_rnn
    dtype=dtype)
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\rnn.py", line 824, in _dynamic_rnn_loop
    swap_memory=swap_memory)
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\control_flow_ops.py", line 3205, in while_loop
    result = loop_context.BuildLoop(cond, body, loop_vars, shape_invariants)
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\control_flow_ops.py", line 2943, in BuildLoop
    pred, body, original_loop_vars, loop_vars, shape_invariants)
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\control_flow_ops.py", line 2880, in _BuildLoop
    body_result = body(*packed_vars_for_body)
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\control_flow_ops.py", line 3181, in <lambda>
    body = lambda i, lv: (i + 1, orig_body(*lv))
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\rnn.py", line 795, in _time_step
    (output, new_state) = call_cell()
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\rnn.py", line 781, in <lambda>
    call_cell = lambda: cell(input_t, state)
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py", line 232, in __call__
    return super(RNNCell, self).__call__(inputs, state)
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\layers\base.py", line 714, in __call__
    outputs = self.call(inputs, *args, **kwargs)
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py", line 1283, in call
    cur_inp, new_state = cell(cur_inp, cur_state)
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py", line 339, in __call__
    *args, **kwargs)
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\layers\base.py", line 714, in __call__
    outputs = self.call(inputs, *args, **kwargs)
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py", line 620, in call
    array_ops.concat([inputs, h], 1), self._kernel)
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\array_ops.py", line 1181, in concat
    return gen_array_ops.concat_v2(values=values, axis=axis, name=name)
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\gen_array_ops.py", line 1101, in concat_v2
    "ConcatV2", values=values, axis=axis, name=name)
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 787, in _apply_op_helper
    op_def=op_def)
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\framework\ops.py", line 3309, in create_op
    op_def=op_def)
  File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\framework\ops.py", line 1669, in __init__
    self._traceback = self._graph._extract_stack()  # pylint: disable=protected-access

InvalidArgumentError (see above for traceback): ConcatOp : Dimensions of inputs should match: shape[0] = [20,25] vs. shape[1] = [30,100]
     [[Node: rnn/while/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/concat = ConcatV2[N=2, T=DT_FLOAT, Tidx=DT_INT32, _device="/job:localhost/replica:0/task:0/device:CPU:0"](rnn/while/TensorArrayReadV3, rnn/while/Switch_4:1, rnn/while/rnn/multi_rnn_cell/cell_3/basic_lstm_cell/Const)]]

这是我用来检查输入的代码

def checkData(inputs, targets, sequencelence):
    batch_size = 20
    features, target, sequece = my_input_fn(inputs, targets, batch_size=batch_size, shuffle=True, num_epochs=None,
                                            sequece_lenth=sequencelence)
    with tf.Session() as sess:
        for i in range(1000):
            features1, target1, sequece1 = sess.run([features, target, sequece])
            assert len(features1) == batch_size
            for sentence in features1 :
                assert len(sentence) == 30
                for word in sentence:
                    assert len(word) == 25

            assert len(target1) == batch_size
            assert len(sequece1) == batch_size
            print(target1)
    print("OK")

1 个答案:

答案 0 :(得分:6)

错误来自LSTMCell.call来电method。在那里,我们试图tf.concat([inputs, h], 1)意味着我们希望在matmul之前使用内核变量矩阵将下一个输入与当前隐藏状态连接起来。该错误表示您无法执行此操作,因为batch0)尺寸不匹配 - 您的输入形状为[20,25]并且您的隐藏状态形状为[30,100]

出于某种原因,在第32次迭代时,或者每当看到错误时,输入都不会批量转移到30,而只会转移到20。这通常发生在培训数据的末尾,当培训示例的总数不均匀地划分您的批量大小时。这个假设也与'#34;当我使用较小的批次时,似乎代码可以运行更长时间&#34;言。