Tensorflow形状和Concat错误:两个形状中的尺寸1必须相等,但分别为4和5

时间:2019-05-30 21:54:40

标签: python tensorflow

当我在python中连接两个cnn层时,加入池时出现此错误。如何纠正错误并标准化值?

import tensorflow as tf
import numpy as np 
from tensorflow.python.ops import gen_array_ops

class TextCNN(object):
"""
A CNN for text classification.
Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
"""
def __init__(
  self, sequence_length, num_classes, vocab_size,
  embedding_size, filter_sizes, num_filters,fc_hidden_size, l2_reg_lambda=0.0):

    # Placeholders for input, output and dropout
    self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
    self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
    self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
    fc_hidden_size=1024
    self.is_training = tf.placeholder(tf.bool, name="is_training")
    initializer=tf.random_normal_initializer(stddev=0.1)
    self.initializer=initializer
    self.is_training_flag=True
    # Keeping track of l2 regularization loss (optional)
    l2_loss = tf.constant(0.0)
    def flatten_reshape(variable):
        dim = 1
        for d in variable.get_shape()[1:].as_list():
            dim *= d
        return tf.reshape(variable, shape=[-1, dim])
    def _highway_layer(input_, size, num_layers=1, bias=-2.0, f=tf.nn.relu):
            """
            Highway Network (cf. http://arxiv.org/abs/1505.00387).
            t = sigmoid(Wy + b)
            z = t * g(Wy + b) + (1 - t) * y
            where g is nonlinearity, t is transform gate, and (1 - t) is carry gate.
            """

            for idx in range(num_layers):
                g = f(_linear(input_, size, scope=("highway_lin_{0}".format(idx))))
                t = tf.sigmoid(_linear(input_, size, scope=("highway_gate_{0}".format(idx))) + bias)
                output = t * g + (1. - t) * input_
                input_ = output

            return output
    def _linear(input_, output_size, scope="SimpleLinear"):
        """
        Linear map: output[k] = sum_i(Matrix[k, i] * args[i] ) + Bias[k]
        Args:
            input_: a tensor or a list of 2D, batch x n, Tensors.
            output_size: int, second dimension of W[i].
            scope: VariableScope for the created subgraph; defaults to "SimpleLinear".
        Returns:
            A 2D Tensor with shape [batch x output_size] equal to
            sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
        Raises:
            ValueError: if some of the arguments has unspecified or wrong shape.
        """

        shape = input_.get_shape().as_list()
        if len(shape) != 2:
            raise ValueError("Linear is expecting 2D arguments: {0}".format(str(shape)))
        if not shape[1]:
            raise ValueError("Linear expects shape[1] of arguments: {0}".format(str(shape)))
        input_size = shape[1]

        # Now the computation.
        with tf.variable_scope(scope):
            W = tf.get_variable("W", [input_size, output_size], dtype=input_.dtype)
            b = tf.get_variable("b", [output_size], dtype=input_.dtype)

        return tf.nn.xw_plus_b(input_, W, b)
    # Embedding layer
    with tf.device('/cpu:0'), tf.name_scope("embedding"):
        self.W = tf.Variable(
            tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
            name="W")
        self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
        self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)

    # Create a convolution + maxpool layer for each filter size
    pooled_outputs = []
    for i, filter_size in enumerate(filter_sizes):
        with tf.name_scope("conv-maxpool-%s" % filter_size):
            # Convolution Layer
            filter_shape = [filter_size, embedding_size, 1, num_filters]
            W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
            b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
            conv = tf.nn.conv2d(
                self.embedded_chars_expanded,
                W,
                strides=[1, 1, 1, 1],
                padding="VALID",
                name="conv")
            # Apply nonlinearity
            h = tf.nn.sigmoid(tf.nn.bias_add(conv, b), name="sigmoid")
            #h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
            # Maxpooling over the outputs 
            h2 = tf.reshape(h, [-1, sequence_length, num_filters,1])  # shape:[batch_size,sequence_length,num_filters,1]

            print(h2)
            # self.initializer=tf.random_normal_initializer(stddev=0.1)

            filter2 = tf.get_variable("filter2-%s" % filter_size,[filter_size, num_filters, 1, num_filters],initializer=self.initializer)

            conv2 = tf.nn.conv2d(h2, filter2, strides=[1, 1, 1, 1], padding="SAME",name="conv2")  # shape:[batch_size,sequence_length-filter_size*2+2,1,num_filters]

            b2 = tf.get_variable("b2-%s" % filter_size, [num_filters])  # ADD 2017-06-09
            h3 = tf.nn.sigmoid(tf.nn.bias_add(conv2, b2), name="sigmoid")

            pooled = tf.nn.max_pool(
                h3,
                ksize=[1, sequence_length - filter_size + 1, 1, 1],
                strides=[1, 1, 1, 1],
                padding='VALID',
                name="pool")
            #x_reshaped = tf.reshape(pooled, [-1, 3])
            s=flatten_reshape(pooled)
            pooled_outputs.append(s) 
    # Combine all the pooled features 
    num_filters_total = num_filters * len(filter_sizes) 
    self.h_pool =  tf.concat(pooled_outputs, 3) 
    self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
    print("zzz")
    num_filters_total = num_filters * len(filter_sizes)
    self.pool = tf.concat(pooled_outputs, axis=3)
    self.pool_flat = tf.reshape(self.pool, shape=[-1, num_filters_total])

    # Fully Connected Layer
    with tf.name_scope("fc"): 
        W = tf.Variable(tf.truncated_normal(shape=[num_filters_total, fc_hidden_size],
                                            stddev=0.1, dtype=tf.float32), name="W")
        b = tf.Variable(tf.constant(value=0.1, shape=[fc_hidden_size], dtype=tf.float32), name="b")    
        self.fc = tf.nn.xw_plus_b(self.pool_flat, W, b)

        # Batch Normalization Layer
        self.fc_bn = tf.layers.batch_normalization(self.fc, training=self.is_training)

        # Apply nonlinearity
        self.fc_out = tf.nn.relu(self.fc_bn, name="relu")

    # Highway Layer
    with tf.name_scope("highway"):
        self.highway = _highway_layer(self.fc_out, self.fc_out.get_shape()[1], num_layers=1, bias=0)




    # Add dropout
    with tf.name_scope("dropout"):
        self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)

    # Final (unnormalized) scores and predictions
    with tf.name_scope("output"):
        W = tf.get_variable(
            "W",
            shape=[num_filters_total, num_classes],
            initializer=tf.contrib.layers.xavier_initializer())
        b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
        l2_loss += tf.nn.l2_loss(W)
        l2_loss += tf.nn.l2_loss(b)
        self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
        self.predictions = tf.argmax(self.scores, 1, name="predictions")

    # Calculate mean cross-entropy loss
    with tf.name_scope("loss"):
        losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
        self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss

    # Accuracy
    with tf.name_scope("accuracy"):
        correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
        self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
  

ValueError:两个形状中的尺寸1必须相等,但必须为4和5。形状为[?,4,50]和[?,5,50]。输入形状为[?,3,50,50],[?, 4,50,50],[?, 5,50,50],[]的'concat'(op:'ConcatV2')和计算输入张量:input [3]

     

ValueError:形状必须至少为4级,但输入形状为[[,7500],[?, 10000],[?, 12500],[]的'concat'(op:'ConcatV2')的等级为2 ]

0 个答案:

没有答案