ValueError:维度必须相等,但在 time2vec 示例中为 2 和 1

时间:2021-02-09 02:14:31

标签: tensorflow keras deep-learning lstm word2vec

我有 2 个输入和 4 个输出。我想使用 time2vec 来预测输出。我使用了 https://towardsdatascience.com/time2vec-for-time-series-features-encoding-a03a4f3f937e 中的代码,它适用于一个输入和一个输出。但是当我想使用 for(2 个输入和 4 个输出)时,它给了我以下错误:

import numpy as np
import tensorflow as tf
from keras.layers import Dense, Dropout, Activation, Flatten, LSTM, Embedding, Input, concatenate, 
Lambda
from sklearn.preprocessing import MinMaxScaler
from keras.callbacks import EarlyStopping
import keras
import random
import os
from sklearn.metrics import mean_absolute_error
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from tensorflow.keras.callbacks import * 
from tensorflow.keras.optimizers import *
from tensorflow.keras import backend as K
from kerashypetune import KerasGridSearch
import matplotlib.pyplot as plt
w               = 5
ts              = 10              
nt              = 10           
ntest           = nt + int(percent*nt) 
X_train = np.random.rand(90,5,2)
X_test = np.random.rand(5,5,2)
y_train = np.random.rand(90,4)
y_test = np.random.rand(5,4)

""" ### 定义 T2V 层 ###

class T2V(Layer):
    def __init__(self, output_dim=None, **kwargs):
        self.output_dim = output_dim
        super(T2V, self).__init__(**kwargs)
    
    def build(self, input_shape):
        self.W = self.add_weight(name='W', shape=(1, self.output_dim), initializer='uniform', 
        trainable=True)
        self.P = self.add_weight(name='P',shape=(1, 
        self.output_dim),initializer='uniform',trainable=True)
        self.w = self.add_weight(name='w',shape=(1, 1),initializer='uniform', trainable=True)
        self.p = self.add_weight(name='p',shape=(1, 1),initializer='uniform',trainable=True)
        super(T2V, self).build(input_shape)
    
    def call(self, x):
        original = self.w * x + self.p
        sin_trans = K.sin(K.dot(x, self.W) + self.P)
        return K.concatenate([sin_trans, original], -1)

为 LSTM 和 T2V 创建生成器

sequence_length = w
def gen_sequence(id_df, seq_length, seq_cols):
    data_matrix  = id_df[seq_cols].values
    num_elements = data_matrix.shape[0]
for start, stop in zip(range(0, num_elements-seq_length), range(seq_length, num_elements)):
    yield data_matrix[start:stop, :]

def gen_labels(id_df, seq_length, label):
    data_matrix  = id_df[label].values
    num_elements = data_matrix.shape[0]
    return data_matrix[seq_length:num_elements, :]

定义模型结构

def set_seed_TF2(seed):
    tf.random.set_seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    random.seed(seed)

def T2V_NN(param, dim):
    inp = Input(shape=(dim,2))
    x = T2V(param['t2v_dim'])(inp)
    x = LSTM(param['unit'], activation=param['act'])(x)
    x = Dense(2)(x)
    m = Model(inp, x)
    m.compile(loss='mse', optimizer=Adam(lr=param['lr']))
    return m

def NN(param, dim):
    inp = Input(shape=(dim,2))
    x = LSTM(param['unit'], activation=param['act'])(inp)
    x = Dense(2)(x)
    m = Model(inp, x)
    m.compile(loss='mse', optimizer=Adam(lr=param['lr']))
    return m

参数网格

param_grid = {'unit': [64,32],'t2v_dim': [128,64],'lr': [1e-2,1e-3], 'act': ['elu','relu'], 'epochs': 1,'batch_size': [512,1024]}

FIT T2V + LSTM

es = EarlyStopping(patience=5, verbose=0, min_delta=0.001, monitor='val_loss', mode='auto', 
restore_best_weights=True)

hypermodel = lambda x: T2V_NN(param=x, dim=sequence_length)

kgs_t2v = KerasGridSearch(hypermodel, param_grid, monitor='val_loss', greater_is_better=False, 
tuner_verbose=1)
kgs_t2v.set_seed(set_seed_TF2, seed=33)
kgs_t2v.search(X_train, y_train, validation_split=0.2, callbacks=[es], shuffle=False)

但是当我运行模型时,出现了这个错误:

ValueError: Dimensions must be equal, but are 2 and 1 for '{{node t2v_2/MatMul}} = MatMul[T=DT_FLOAT, 
transpose_a=false, transpose_b=false](t2v_2/Reshape, t2v_2/Reshape_1)' with input shapes: [?,2], [1,128].

你能帮我解决这个问题吗?

1 个答案:

答案 0 :(得分:1)

您必须更改 T2V 层内部和网络内部的参数才能正确匹配形状

class T2V(Layer):
    
    def __init__(self, output_dim=None, **kwargs):
        self.output_dim = output_dim
        super(T2V, self).__init__(**kwargs)
    
    def build(self, input_shape):
        self.W = self.add_weight(name='W', shape=(input_shape[-1], self.output_dim), 
                                 initializer='uniform', trainable=True)
        self.P = self.add_weight(name='P', shape=(input_shape[1], self.output_dim), 
                                 initializer='uniform', trainable=True)
        self.w = self.add_weight(name='w', shape=(input_shape[1], 1), 
                                 initializer='uniform', trainable=True)
        self.p = self.add_weight(name='p', shape=(input_shape[1], 1), 
                                 initializer='uniform', trainable=True)
        super(T2V, self).build(input_shape)
    
    def call(self, x):
        original = self.w * x + self.p
        sin_trans = K.sin(K.dot(x, self.W) + self.P)
        return K.concatenate([sin_trans, original], -1)

创建一个虚拟示例

n_sample = 90
timesteps = 5
feat_inp = 2
feat_out = 4

X = np.random.uniform(0,1, (n_sample, timesteps, feat_inp))
y = np.random.uniform(0,1, (n_sample, feat_out))

def T2V_NN():
    inp = Input(shape=(timesteps,feat_inp))
    x = T2V(32)(inp)
    x = LSTM(8)(x)
    x = Dense(feat_out)(x)
    m = Model(inp, x)
    m.compile(loss='mse', optimizer='adam')
    return m

model = T2V_NN()
model.fit(X,y, epochs=3)