keras RNN ValueError:不支持任何值

时间:2017-07-25 07:09:18

标签: python keras rnn

我正在用GRU制作GAN模型。

当我训练模型时,我得到:'ValueError:不支持任何值'。

运行train_on_batch()函数时会引发错误。

目标是y2(数据看起来像这样。[[0. 1.] [0. 1.] ...... [0. 1。]])

我改变了y2的数据类型,但它没有用。 这是我的代码和错误消息。

我修改了这段代码 - >> https://github.com/kimsohyeon/KerasGAN

def build_GAN():
    ######### Build Generative model ... ##########
    g_input = Input(shape=[100])
    H = Embedding(100,256)(g_input)
    H = GRU(256, dropout=0.2, recurrent_dropout=0.2)(H)
    g_V = Dense(maxlen, activation='sigmoid')(H)
    generator = Model(g_input,g_V)
    generator.compile(loss='binary_crossentropy', optimizer='adam')

    ######## Build Discriminative model ... ##########
    d_input = Input(shape=[maxlen])
    H = Embedding(max_features, 256)(d_input)
    H = GRU(256, dropout=0.2, recurrent_dropout=0.2)(H)
    d_V = Dense(2, activation='softmax')(H)
    discriminator = Model(d_input,d_V)
    discriminator.compile(loss='categorical_crossentropy', optimizer='adam')
    make_trainable(discriminator, False)

    ########### Build stacked GAN model ##############
    gan_input = Input(shape=[100])
    H = generator(gan_input)
    gan_V = discriminator(H)
    GAN = Model(gan_input, gan_V)
    GAN.compile(loss='categorical_crossentropy', optimizer='adam')

    return generator, discriminator, GAN


def train_GAN(x_train, nb_epoch, plt_frq, batch_size):
    print('Train...')
    # set up loss storage vector
    losses = {"d":[], "g":[]}

    for e in tqdm(range(nb_epoch)):
        # X : real data + fake data
        trainidx = random.sample(range(0,x_train.shape[0]), batch_size)
        review_batch = x_train[trainidx]

        noise_gen = np.random.uniform(0,1,size=[batch_size,100])
        generated_reviews = generator.predict(noise_gen)
        x = np.concatenate((review_batch, generated_reviews))

        # y : [0,1] = positive data, [1,0] = negative data
        y = np.zeros([2*batch_size,2])
        y[0:batch_size,1] = 1
        y[batch_size:,0] = 1
        make_trainable(discriminator,True)
        d_loss  = discriminator.train_on_batch(x,y)
        losses["d"].append(d_loss)

        # train Generator-Discriminator stack on input noise to non-generated output class
        noise_tr = np.random.uniform(0,1,size=[batch_size,100])
        y2 = np.zeros([batch_size,2])
        y2[:,1] = 1
        make_trainable(discriminator,False)
        g_loss = GAN.train_on_batch(noise_tr, y2)
        losses["g"].append(g_loss)
---------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-43-fc66cd2b8331> in <module>()
     24     generator, discriminator, GAN = build_GAN()
     25 
---> 26     train_GAN(xp_train, nb_epoch=100, plt_frq=25, batch_size=32)
     27     map(add, performance,one_class_performance(xp_test, xn_test))
     28 

<ipython-input-42-b8c2dbe7b1c6> in train_GAN(x_train, nb_epoch, plt_frq, batch_size)
     28         y2[:,1] = 0
     29         make_trainable(discriminator,False)
---> 30         g_loss = GAN.train_on_batch(noise_tr, y2)
     31         losses["g"].append(g_loss)
     32 

//anaconda/lib/python3.5/site-packages/keras/engine/training.py in train_on_batch(self, x, y, sample_weight, class_weight)
   1562         else:
   1563             ins = x + y + sample_weights
-> 1564         self._make_train_function()
   1565         outputs = self.train_function(ins)
   1566         if len(outputs) == 1:

//anaconda/lib/python3.5/site-packages/keras/engine/training.py in _make_train_function(self)
    935                 self._collected_trainable_weights,
    936                 self.constraints,
--> 937                 self.total_loss)
    938             updates = self.updates + training_updates
    939             # Gets loss and metrics. Updates weights at each call.

//anaconda/lib/python3.5/site-packages/keras/optimizers.py in get_updates(self, params, constraints, loss)
    418 
    419         for p, g, m, v in zip(params, grads, ms, vs):
--> 420             m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
    421             v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
    422             p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)

//anaconda/lib/python3.5/site-packages/tensorflow/python/ops/math_ops.py in binary_op_wrapper(x, y)
    881     with ops.name_scope(None, op_name, [x, y]) as name:
    882       if not isinstance(y, sparse_tensor.SparseTensor):
--> 883         y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
    884       return func(x, y, name=name)
    885 

//anaconda/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in convert_to_tensor(value, dtype, name, preferred_dtype)
    649       name=name,
    650       preferred_dtype=preferred_dtype,
--> 651       as_ref=False)
    652 
    653 

//anaconda/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in internal_convert_to_tensor(value, dtype, name, as_ref, preferred_dtype)
    714 
    715         if ret is None:
--> 716           ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
    717 
    718         if ret is NotImplemented:

//anaconda/lib/python3.5/site-packages/tensorflow/python/framework/constant_op.py in _constant_tensor_conversion_function(v, dtype, name, as_ref)
    174                                          as_ref=False):
    175   _ = as_ref
--> 176   return constant(v, dtype=dtype, name=name)
    177 
    178 

//anaconda/lib/python3.5/site-packages/tensorflow/python/framework/constant_op.py in constant(value, dtype, shape, name, verify_shape)
    163   tensor_value = attr_value_pb2.AttrValue()
    164   tensor_value.tensor.CopyFrom(
--> 165       tensor_util.make_tensor_proto(value, dtype=dtype, shape=shape, verify_shape=verify_shape))
    166   dtype_value = attr_value_pb2.AttrValue(type=tensor_value.tensor.dtype)
    167   const_tensor = g.create_op(

//anaconda/lib/python3.5/site-packages/tensorflow/python/framework/tensor_util.py in make_tensor_proto(values, dtype, shape, verify_shape)
    358   else:
    359     if values is None:
--> 360       raise ValueError("None values not supported.")
    361     # if dtype is provided, forces numpy array to be the type
    362     # provided if possible.

ValueError: None values not supported.

0 个答案:

没有答案