ValueError:GpuCorrMM图像和内核必须具有相同的堆栈大小

时间:2017-01-31 20:12:28

标签: python theano keras gpuimage

我的输入数据形状是[n,3,64,64]

我在Stampede上运行代码后得到了这个。

Using Theano backend.
Using gpu device 0: Tesla K20m (CNMeM is disabled, cuDNN not available)
ValueError: GpuCorrMM images and kernel must have the same stack size

Apply node that caused the error: GpuCorrMM{half, (1, 1)}(GpuContiguous.0, GpuContiguous.0)
Toposort index: 115
Inputs types: [CudaNdarrayType(float32, 4D), CudaNdarrayType(float32, 4D)]
Inputs shapes: [(32, 8, 16, 1024), (256, 512, 5, 5)]
Inputs strides: [(131072, 16384, 1024, 1), (12800, 25, 5, 1)]
Inputs values: ['not shown', 'not shown']
Outputs clients: [[GpuElemwise{Add}[(0, 0)](GpuCorrMM{half, (1, 1)}.0, GpuReshape{4}.0)]]

代码是怎么回事以及如何解决这个问题?感谢

我的代码:

g_input = Input(shape=(100,))

generator = Sequential()
generator.add(Dense(1024 * 4 * 4, input_shape=(100,)))
generator.add(BatchNormalization(mode=2))
generator.add(Activation('relu'))
generator.add(Reshape([1024, 4, 4]))

generator.add(UpSampling2D(size=(2, 2), dim_ordering='th'))
generator.add(Convolution2D(512, 5, 5, border_mode='same', dim_ordering='th'))
generator.add(BatchNormalization(mode=2))
generator.add(Activation('relu'))

generator.add(UpSampling2D(size=(2, 2), dim_ordering='th'))
generator.add(Convolution2D(256, 5, 5, border_mode='same', dim_ordering='th'))
generator.add(BatchNormalization(mode=2))
generator.add(Activation('relu'))

generator.add(UpSampling2D(size=(2, 2), dim_ordering='th'))
generator.add(Convolution2D(128, 5, 5, border_mode='same', dim_ordering='th'))
generator.add(BatchNormalization(mode=2))
generator.add(Activation('relu'))

generator.add(UpSampling2D(size=(2, 2), dim_ordering='th'))
generator.add(Convolution2D(64, 5, 5, border_mode='same', dim_ordering='th'))
generator.add(BatchNormalization(mode=2))
generator.add(Activation('relu'))

generator.add(Convolution2D(3, 5, 5, border_mode='same', dim_ordering='th'))
generator.add(Activation('sigmoid'))

generator.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.0002, beta_1=0.5))
generator.summary()

# discriminative model

discriminator = Sequential()

discriminator.add(Convolution2D(64, 5, 5, subsample=(2, 2), border_mode='same', dim_ordering='th', input_shape=X_train.shape[1:]))
discriminator.add(LeakyReLU(0.2))


discriminator.add(Convolution2D(128, 5, 5, subsample=(2, 2), border_mode='same', dim_ordering='th'))
discriminator.add(LeakyReLU(0.2))


discriminator.add(Convolution2D(256, 5, 5, subsample=(2, 2), border_mode='same', dim_ordering='th'))
discriminator.add(LeakyReLU(0.2))

discriminator.add(Convolution2D(512, 5, 5, subsample=(2, 2), border_mode='same', dim_ordering='th'))
discriminator.add(LeakyReLU(0.2))

discriminator.add(Flatten())

discriminator.add(Dense(1024))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.5))

discriminator.add(Dense(2, activation='softmax'))

discriminator.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0002, beta_1=0.5))
discriminator.summary()

# GAN Model
gan_input = Input(shape=(100,))
gan_output = discriminator(generator(gan_input))
gan_model = Model(gan_input, gan_output)

gan_model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0002, beta_1=0.5))
gan_model.summary()
print("Pre-training generator...")
noise_gen = np.random.uniform(0, 1, size=(14000, 100))   # at (0,1) creates 10000 points
generated_images = generator.predict(noise_gen)

print('generated_images shape ----', generated_images.shape)

X = np.concatenate((X_train[:14000, :, :, :], generated_images))
y = np.zeros([28000, 2])
y[:14000, 1] = 1
y[14000:, 0] = 1

discriminator.fit(X, y, nb_epoch=1, batch_size=128)
y_hat = discriminator.predict(X)

# set up loss storage vector
losses = {"d": [], "g": []}


def train_for_n(nb_epoch=28000, batch_size=128):
    for e in range(nb_epoch):

        # Make generative images
        train_idx = np.random.randint(0, X_train.shape[0], size=batch_size)  # 0 <= train_idx <= X_train.shape[0]
        mini_batch = X_train[train_idx]
        noise_gen = np.random.normal(0, 1, size=(batch_size, 100))
        generated_images = generator.predict(noise_gen)

        # Train discriminator on generated images
        X = np.concatenate((mini_batch, generated_images))
        y = np.zeros([2 * batch_size, 2])
        y[:batch_size, 1] = 1
        y[batch_size:, 0] = 1

        discriminator.trainable = True
        for layer in discriminator.layers:
            layer.trainable = True
        d_loss = discriminator.train_on_batch(X, y)
        losses["d"].append(d_loss)

        noise_tr = np.random.uniform(0, 1, size=(batch_size, 100))
        y2 = np.zeros([batch_size, 2])
        y2[:, 1] = 1

        discriminator.trainable = False
        for layer in discriminator.layers:
            layer.trainable = False
        g_loss = gan_model.train_on_batch(noise_tr, y2)
        losses["g"].append(g_loss)

        if e % 10 == 9:
            generator.save_weights('G0_weights.h5')
            discriminator.save_weights('D0_weights.h5')
            noise = np.random.uniform(0, 1, size=(100, 100))
            generated_images = generator.predict(noise)
            np.save('/Users/zhangguanghua/Desktop/Stampede/generated_images_0.npy', generated_images)

        print(("Iteration: {0} / {1}, G-Loss: {2:.4f}".format(e, nb_epoch, float(g_loss))))


train_for_n(nb_epoch=2000, batch_size=128)

此外,有人知道什么是输入形状:[(32,8,16,1024),(256,512,5,5)]代表什么?我怎么能解决这个问题?

由于

1 个答案:

答案 0 :(得分:1)

当我在cpu上运行此代码时,gan_model.summary()结果为: 0.0 1.0 X_train形状---(29404,3,64,64) 29404列车样本

var initialOrientation = true
var isInPortrait = false
var orientationDidChange = false

override func viewWillLayoutSubviews() {
    super.viewDidLayoutSubviews()
    if initialOrientation {
        initialOrientation = false
        if view.frame.width > view.frame.height {
            isInPortrait = false
        } else {
            isInPortrait = true
        }
        orientationWillChange()
    } else {
        if view.orientationHasChanged(&isInPortrait) {
            orientationWillChange()
        }
    }
}
func orientationWillChange() {
    // capture the old frame values here, storing in class variables
    orientationDidChange = true
}
override func viewDidLayoutSubviews() {
    super.viewDidLayoutSubviews()
    if orientationDidChange {
        // change frame for mask and reposition
        orientationDidChange = false
   }

}