我有50个站点,在对一列(风向)进行一次热编码后,测量天气和污染值(每个站点6个参数),添加了8列新列,并删除了原始列。最终,我有50 * 13 = 650列和35000条记录(每个测量都是每小时进行一次,基本上是最近4年)。 将所有这些信息提供给RNN的模型,如下所示,将导致由于充分利用硬件资源而导致崩溃。我已经通过一个TimeSeriesGenerator在时间上拆分了数据,以创建128个示例的批次,每个示例24小时(因此批次大小为128,序列长度为24)。
有没有一种方法可以计算训练,验证和测试,一次给出一个测站的测量值(因此,每次有13列的批次)?我知道还不清楚,请随时询问更多信息...
all_data_generator = iter(TimeseriesGenerator(data = x_train_scaled,
targets = y_train_scaled,
length = sequence_length,
shuffle=False,
batch_size=(num_train - sequence_length + 1)))
train_data_x, train_data_y = next(all_data_generator)
train_data_size = train_data_x.shape[0]
validation_data_size = int(train_data_size * validation_split)
val_data_x = train_data_x[train_data_size-validation_data_size:]
val_data_y = train_data_y[train_data_size-validation_data_size:]
train_data_x = train_data_x[0:train_data_size-validation_data_size]
train_data_y = train_data_y[0:train_data_size-validation_data_size]
train_data_size = train_data_x.shape[0]
train_generator = getBatch(x = train_data_x, y = train_data_y, batch_size = batch_size)
validation_data = (val_data_x, val_data_y)
validation_generator = getBatch (x = val_data_x, y = train_data_y, batch_size = batch_size)
test_generator = iter(TimeseriesGenerator(data=x_test_scaled,
targets=y_test_scaled,
length= sequence_length,
shuffle=False,
batch_size=(num_test - sequence_length + 1)))
test_data_x, test_data_y = next(test_generator)
test_generator = getBatch(test_data_x, test_data_y, batch_size = batch_size)
#### MODEL TYPE DECLARATION AND CONFIGURATION ######################################
code_name = {-1: "linear", 0: "ann_base_single", 1 : "ann_base_multi",
2 : "gru_single_layer", 3 : "gru_single_layer_w_dropout", 4 : "gru_multi_layer", 5 : "gru_multi_layer_2",
6 : "gru_single_layer_w_state", 7 : "gru_single_layer_w_dropout_w_state", 8 : "gru_multi_layer_w_state", 9 : "gru_multi_2_w_state",
10 : "lstm_single_layer", 11 : "lstm_single_layer_w_dropout", 12 : "lstm_multi_layer", 13 : "lstm_multi_layer_2",
14 : "lstm_single_layer_w_state", 15 : "lstm_single_layer_w_dropout_w_state", 16 : "lstm_multi_layer_w_state", 17 : "lstm_multi_2_w_state", 18 : "test_conv"}
model_type = 5
model = Sequential()
if model_type == -1:
model.add(layers.Flatten(input_shape=(sequence_length, x_data.shape[1])))
model.add(layers.Dense(num_y_signals, activation='linear'))
elif model_type == 0:
model.add(layers.Flatten(input_shape=(sequence_length, x_data.shape[1])))
model.add(layers.Dense(hidden_layer_size, activation='relu'))
model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 1:
model.add(layers.Flatten(input_shape=(sequence_length, x_data.shape[1])))
model.add(layers.Dense(hidden_layer_size, activation='relu'))
model.add(layers.Dense(hidden_layer_size, activation='relu'))
model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 2:
model.add(layers.GRU(hidden_layer_size, activation='relu', input_shape=(sequence_length, x_data.shape[1])))
model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 3:
model.add(layers.GRU(hidden_layer_size, activation='relu', input_shape=(sequence_length, x_data.shape[1])))
model.add(Dropout(0.2))
model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 4:
model.add(layers.GRU(64, activation='relu', return_sequences=True, input_shape=(sequence_length, x_data.shape[1])))
model.add(layers.GRU(32, activation='relu'))
model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 5:
model.add(layers.GRU(64, activation='relu', return_sequences=True, input_shape=(sequence_length, x_data.shape[1])))
model.add(layers.GRU(32, activation='relu', return_sequences=True))
model.add(layers.GRU(16, activation='relu'))
model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 6:
model.add(layers.GRU(hidden_layer_size, activation='relu', batch_input_shape=(batch_size, sequence_length, x_data.shape[1]), stateful=True))
model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 7:
model.add(layers.GRU(hidden_layer_size, activation='relu', batch_input_shape=(batch_size, sequence_length, x_data.shape[1]), stateful=True))
model.add(Dropout(0.2))
model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 8:
model.add(layers.GRU(64, activation='relu', return_sequences=True, batch_input_shape=(batch_size, sequence_length, x_data.shape[1]), stateful=True))
model.add(layers.GRU(32, activation='relu', stateful=True))
model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 9:
model.add(layers.GRU(64, activation='relu', return_sequences=True, batch_input_shape=(batch_size, sequence_length, x_data.shape[1]), stateful=True))
model.add(layers.GRU(32, activation='relu', return_sequences=True, stateful=True))
model.add(layers.GRU(16, activation='relu', stateful=True))
model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 10:
model.add(layers.LSTM(hidden_layer_size, activation='relu', input_shape=(sequence_length, x_data.shape[1])))
model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 11:
model.add(layers.LSTM(hidden_layer_size, activation='relu', input_shape=(sequence_length, x_data.shape[1])))
model.add(Dropout(0.2))
model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 12:
model.add(layers.LSTM(64, activation='relu', return_sequences=True, input_shape=(sequence_length, x_data.shape[1])))
model.add(layers.LSTM(32, activation='relu'))
model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 13:
model.add(layers.LSTM(64, activation='relu', return_sequences=True, input_shape=(sequence_length, x_data.shape[1])))
model.add(layers.LSTM(32, activation='relu', return_sequences=True))
model.add(layers.LSTM(16, activation='relu'))
model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 14:
model.add(layers.LSTM(hidden_layer_size, activation='relu', batch_input_shape=(batch_size, sequence_length, x_data.shape[1]), stateful=True))
model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 15:
model.add(layers.LSTM(hidden_layer_size, activation='relu', batch_input_shape=(batch_size, sequence_length, x_data.shape[1]), stateful=True))
model.add(Dropout(0.2))
model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 16:
model.add(layers.LSTM(64, activation='relu', return_sequences=True, batch_input_shape=(batch_size, sequence_length, x_data.shape[1]), stateful=True))
model.add(layers.LSTM(32, activation='relu', stateful=True))
model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 17:
model.add(layers.LSTM(64, activation='relu', return_sequences=True, batch_input_shape=(batch_size, sequence_length, x_data.shape[1]), stateful=True))
model.add(layers.LSTM(32, activation='relu', return_sequences=True, stateful=True))
model.add(layers.LSTM(16, activation='relu', stateful=True))
model.add(layers.Dense(num_y_signals, activation='sigmoid'))
elif model_type == 18:
model.add(layers.Conv2D(filters= x_data.shape[1], kernel_size=1 , input_shape=(sequence_length, x_data.shape[1])))
model.add(layers.GRU(hidden_layer_size, activation='relu'))
model.add(layers.Dense(num_y_signals, activation='sigmoid'))
model.compile(optimizer=Adam(), loss='mae', metrics=[metrics.mae, 'accuracy'])
model.summary()
在具有i7-4700HQ 8GB内存的ASUS N550上运行
编辑:这是模型摘要
[5 rows x 713 columns]
Station 2000003 ... Time
Parameter 0 1 2 ... H_21 H_22 H_23
count 43824.0 43824.0 43824.000000 ... 43824.000000 43824.000000 43824.000000
mean 0.0 0.0 25.085629 ... 0.041667 0.041667 0.041667
std 0.0 0.0 16.950108 ... 0.199829 0.199829 0.199829
min 0.0 0.0 1.000000 ... 0.000000 0.000000 0.000000
25% 0.0 0.0 11.000000 ... 0.000000 0.000000 0.000000
50% 0.0 0.0 21.000000 ... 0.000000 0.000000 0.000000
75% 0.0 0.0 35.000000 ... 0.000000 0.000000 0.000000
max 0.0 0.0 134.000000 ... 1.000000 1.000000 1.000000
答案 0 :(得分:0)
鉴于您只有8 GB的RAM,对于您当前的硬件,这可能将是一项艰巨的任务。
我将从尽可能多地剥离模型中开始,看看它是否可以解决这个问题,也许首先要移除两个中心GRU层。
您也可以尝试在模型期间设置小批量。fit,8,16或32可能是一个很好的数字。