Keras多头CNN模型-形状错误

时间:2018-09-28 06:53:16

标签: python keras

我在用Keras开发多模型CNN时遇到了一个问题。我提供了一个包含数据集的最小工作示例,如下所示。 数据集可以包含不同的特征,这些特征具有固定的样本编号和每行标签。 每个标签都应由单个CNN处理,然后再合并为一个FFN。

希望您有一个解决方案?

  

ValueError:尺寸为负的尺寸,是将输入形状为[?,1,1,3],[1,2,3,','conv1d_1 / convolution / Conv2D'(op:'Conv2D')的1减去2引起的8]。

示例数据:

FA_1;FA_2;FA_3;FB_1;FB_2;FB_3;FC_1;FC_2;FC_3;Label 0,57;0,22;0,40;0,22;0,63;0,27;0,57;0,78;0,29;A 0,56;0,30;0,57;0,07;0,13;0,44;0,30;0,68;0,76;A 0,42;0,71;0,69;0,33;0,87;0,87;0,22;0,37;0,68;A 0,57;0,45;0,13;0,04;0,15;0,57;0,21;0,34;0,53;B 0,36;0,70;0,42;0,28;0,53;0,37;0,93;0,89;0,29;B 0,25;0,98;0,83;0,73;0,34;0,75;0,44;0,08;0,63;B

我的代码:

import pandas as pd
import numpy as np
from numpy import hstack
from numpy import array
from sklearn.model_selection import train_test_split
from keras.layers import Input, Conv1D, Flatten, Concatenate, Dense, MaxPooling1D
from keras.models import Model
from keras.utils import plot_model

channels = 3 # number of features of the data
sps =  3 # number of samples per feature in the data
locClasses = 2 # number of classes which could be the result
locLayers = 2 # number of hidden layers after the cnn-part

def loadData(locFilename):
    # read the file with correct separator
    df_eval = pd.read_csv(locFilename,sep=';',decimal=',')
    # pop off labels to new group
    print(df_eval.head(2))
    label_eval=df_eval.pop("Label") 
    # convert to a numpy array;
    print(df_eval.shape)

    df_eval=df_eval.values 
    labels_eval, indices_eval = np.unique(label_eval,return_index=True);
    print(labels_eval);                      
    # convert labels to onehots 
    labels_eval = pd.get_dummies(label_eval)                       
    # make np array
    labels_eval = labels_eval.values

    return df_eval, labels_eval

def prepareDataCNN(locDF, locLabels): 
    featurevector = list()
    print(locDF.shape)
    for index in range(0,channels*sps,sps):
        featurevector.append(locDF[:,index:index+sps])

    return featurevector,locLabels

def trainTestSplitCNN(data, label, locTS):
    dataTrainX = list()
    dataTestX = list()
    for index in range(0,channels):
        trainx, testx, trainLabel, testLabel = train_test_split(data[index],label,test_size=locTS,random_state=42)
        dataTrainX.append(trainx)
        dataTestX.append(testx)

    return dataTrainX, dataTestX, trainLabel, testLabel


def loadFeatureForCNN(filename, sps, locTS):
    df,label = loadData(filename)
    df,label = prepareDataCNN(df,label) 

    dataTrainX, dataTestX, trainLabel, testLabel = trainTestSplitCNN(df, label, 0.25)

    return dataTrainX, dataTestX, trainLabel, testLabel

def doWork():
    history = model.fit(dataTrainX, trainLabel, verbose = 0, validation_data=[dataTestX,testLabel])
    model.save(modelsavename)


# create the model based on the input parameters 


dataTrainX, dataTestX, trainLabel, testLabel = loadFeatureForCNN('cnntest.csv',sps,0.25)

inputheads = list()
myinputs = list()

for index in range(0,channels):
    input_a = Input(shape=(1,sps),name='Input_'+str(index))
    #input_a = Input(batch_shape=(None,sps,1),name='Input_'+str(index))
    cnn1 = Conv1D(filters=8,kernel_size=(1)) (input_a)
    cnn1 = MaxPooling1D(pool_size = 2) (cnn1)
    cnn1 = Flatten()(cnn1)
    inputheads.append(cnn1)
    myinputs.append(input_a)



merged = Concatenate() (inputheads)
dense = Dense(channels)(merged)
for index in range (2,locLayers):
    dense = (Dropout(rate=locDropoutRate)) (dense)
    dense = (Dense(locChannels, activation=locActivation, input_shape=(locChannels,))) (dense)


predictions = Dense(locClasses, activation='softmax')(dense)


model = Model(inputs=myinputs, outputs=predictions)
model.compile(loss='categorical_crossentropy',optimizer='RMSProp',metrics=['accuracy'])

plot_model(model,'test.png')

doWork()

0 个答案:

没有答案