预期为float32,类型改为为“ str”的“ Ka-Ping Yee <ping@lfw.org>”

时间:2019-10-11 13:20:16

标签: python keras-layer tensorflow2.0 mixture-model tensorflow-gradient

我仍在使用相同的代码。我做了一点修改,现在出现了这个错误:

TypeError: Expected float32 passed to parameter 'y' of op 'Equal', got 'Ka-Ping Yee <ping@lfw.org>' of type 'str' instead. Error: Expected float32, got 'Ka-Ping Yee <ping@lfw.org>' of type 'str' instead.

这是代码的不同部分,在这里我尝试实现一个混合密度网络,该网络获取一维输入并返回二维输出。

def elu_plus_one_plus_epsilon(x):
    """ELU activation with a very small addition to help prevent
    NaN in loss."""
    return keras.backend.elu(x) + 1 + keras.backend.epsilon()
def mdn_model():
    input = tf.keras.Input(shape=(None, INPUT_DIMS))
    layer = tf.keras.layers.Dense(N_HIDDEN, activation='relu', name='baselayer')(input)
    layer = tf.keras.layers.Dense(N_HIDDEN, activation='relu', name='baselayer')(layer)
    mu = tf.keras.layers.Dense((OUTPUT_DIMS * N_MIXES), activation=None, name='mean_layer')(layer)
#print(mu.shape)
# variance (should be greater than 0 so we exponentiate it)
    var_layer = tf.keras.layers.Dense(OUTPUT_DIMS * N_MIXES, activation=None, name='dense_var_layer')(layer)
    var = tf.keras.layers.Lambda(lambda x: tf.math.exp(x), output_shape=(OUTPUT_DIMS * N_MIXES,), 
                                 name='variance_layer')(var_layer)
# mixing coefficient should sum to 1.0
    pi = tf.keras.layers.Dense(N_MIXES, activation='softmax', name='pi_layer')(layer)
    return mu, var, pi

这是损失函数的定义方式:

def mdn_loss_func(output_dim, num_mixes, x_true, y_true):
        y_pred = mdn_model(x_true)
        print('y_pred shape is {}'.format(y_pred.shape))
        y_pred = tf.reshape(y_pred, [-1, (2 * num_mixes * output_dim) + num_mixes], name='reshape_ypreds')
        y_true = tf.reshape(y_true, [-1, output_dim], name='reshape_ytrue')
        # Split the inputs into paramaters
        out_mu, out_sigma, out_pi = tf.split(y_pred, num_or_size_splits=[num_mixes * output_dim,
                                                                         num_mixes * output_dim,
                                                                         num_mixes],
                                             axis=-1, name='mdn_coef_split')
        # Construct the mixture models
        cat = tfd.Categorical(logits=out_pi)
        component_splits = [output_dim] * num_mixes
        mus = tf.split(out_mu, num_or_size_splits=component_splits, axis=1)
        sigs = tf.split(out_sigma, num_or_size_splits=component_splits, axis=1)
        coll = [tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale) for loc, scale
                in zip(mus, sigs)]
        mixture = tfd.Mixture(cat=cat, components=coll)
        loss = mixture.log_prob(y_true)
        loss = tf.negative(loss)
        loss = tf.reduce_mean(loss)
        return loss

我使用了来自tf.keras的Adam Optimizer:

mdn_optimizer = tf.keras.optimizers.Adam(1e-4)
@tf.function
def train_step(x_true, y_true, output_dim, num_mixes):

    with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:

      mdn_loss = mdn_loss_func(output_dim, num_mixes, x_true, y_true)

    gradients_of_mdn = gen_tape.gradient(mdn_loss, mdn_model.trainable_variables)

    mdn_optimizer.apply_gradients(zip(gradients_of_mdn, mdn_model.trainable_variables))
def train(dataset, output_dim, num_mixes, epochs):
  for epoch in range(epochs):
    start = time.time()

    for x_true, y_true in dataset:
        print('x_true format is {}'.format(x_true.shape))
        #print('output_dim format is {}'.format(output_dim.type))
        #print('num_mixes format is {}'.format(num_mixes.type))
        print('y_true format is {}'.format(y_true.shape))
        train_step(x_true, y_true, output_dim, num_mixes)

    print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))

  # Generate after the final epoch
  display.clear_output(wait=True)
`````````````````````````
`````````````````````````````````````````````````````
%%time
train(train_dataset, OUTPUT_DIMS, N_MIXES, EPOCHS)
`````````````````````````````````````````````````````````

0 个答案:

没有答案