我的keras vae模型有什么问题,acc非常不好

时间:2019-01-19 11:03:45

标签: keras

我有一个仅包含数字1,0或2的2-D数组。该数组有150行和124列,我尝试使用VAE稍加修改。 但是结果和acc非常差,请帮忙

from __future__ import print_function

import numpy as np
from scipy.stats import norm

from keras.layers import Input, Dense, Lambda
from keras.models import Model
from keras import backend as K
from keras import metrics, losses
from keras.datasets import mnist

class VAE:

    def __init__(self, batch_size=100, original_dim =1600, latent_dim = 24, epochs=50, root="IsingMC/", epsilon=0.5):
        '''
         #Reference
         - Auto-Encoding Variational Bayes
           https://arxiv.org/abs/1312.6114
           This code is taken from Keras VAE tutorial available at https://blog.keras.io/building-autoencoders-in-keras.html

        Parameters
        ----------

        batch_size : int, default=100
            Size of batches for gradient descent
        original_dim : int, default =1600
            Number of features
        latent_dim: int, default = 100
            Dimensionality of the latent space
        epochs: int, default = 50
            Number of epochs for training
        '''
        self.batch_size = batch_size
        self.original_dim = original_dim
        self.latent_dim = latent_dim
        self.intermediate_dim = 256
        self.epochs = epochs
        self.epsilon_std = epsilon

    def sampling(self, args):
        ''' Sampling from the latent variables using the means and log-variances'''
        z_mean, z_log_var = args
        epsilon = K.random_normal(shape=(K.shape(z_mean)[0], self.latent_dim), mean=0.,
                                  stddev=self.epsilon_std)
        return z_mean + K.exp(z_log_var / 2) * epsilon

    def build(self):
        """ This class method constructs the VAE model
        """
        original_dim = self.original_dim
        latent_dim = self.latent_dim
        intermediate_dim = self.intermediate_dim

        # encoder
        self.x = Input(shape=(original_dim,))
        h = Dense(intermediate_dim, activation='relu')(self.x)
        self.z_mean = Dense(latent_dim)(h)
        self.z_log_var = Dense(latent_dim)(h)

        # note that "output_shape" isn't necessary with the TensorFlow backend
        z = Lambda(self.sampling, output_shape=(latent_dim,))([self.z_mean, self.z_log_var])

        # we instantiate these layers separately so as to reuse them later
        decoder_h = Dense(intermediate_dim, activation='relu')
        decoder_mean = Dense(original_dim, activation='sigmoid')
        h_decoded = decoder_h(z)
        x_decoded_mean = decoder_mean(h_decoded)

        #decoder
        decoder_input = Input(shape=(latent_dim,))
        _h_decoded = decoder_h(decoder_input)
        _x_decoded_mean = decoder_mean(_h_decoded)

        self.generator = Model(decoder_input, _x_decoded_mean)


        # end-to-end VAE model
        self.vae = Model(self.x, x_decoded_mean)

        # encoder, from inputs to latent space
        self.encoder = Model(self.x, self.z_mean)




        # decoder
        #self.decoder = Model(decoder_input, _x_decoded_mean)

        # Compute VAE loss
        self.vae.compile(optimizer='rmsprop', loss=self.vae_loss,metrics=['acc'])

        # Prints a summary of the architecture used
        self.vae.summary()

    def vae_loss(self, x, x_decoded_mean):
            xent_loss = losses.binary_crossentropy(x, x_decoded_mean)
            kl_loss = - 0.5 * K.mean(1 + self.z_log_var - K.square(self.z_mean) - K.exp(self.z_log_var), axis=-1)
            return xent_loss + kl_loss

    def train(self, x_train, x_test):
        from sklearn.preprocessing import minmax_scale

        x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:]))) # flatten each sample out 
        x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
        x_train = minmax_scale(x_train) # this step is required in order to use cross-entropy loss for reconstruction
        x_test = minmax_scale(x_train) # scaling features in 0,1 interval


        self.vae.fit(x_train, x_train,
                shuffle=True,
                epochs=self.epochs,
                batch_size=self.batch_size,
                validation_data=(x_test, x_test)
                )

        # build a model to project inputs on the latent space
        #encoder = Model(self.x, self.z_mean)

    def predict_latent(self, xnew):
        # build a model to project inputs on the latent space
        return self.encoder.predict(xnew)

    def generate_decoding(self, znew):
        # Generate new fantasy particles 
        return self.generator.predict(znew)

from pandas import read_csv
from pandas import DataFrame
import keras
import pandas as pd
import numpy as np
from keras.models import Input, Model, Sequential
from keras.layers import Dense,Dropout,Embedding



def columns(df,startwith):
    return  [col for col in df if col.startswith(startwith)]    

data = read_csv('./data0115/d1.csv')
data.fillna(2,inplace=True)
x_col = columns(data,'Q2_')


x=data[x_col].values
x_train,x_test = x,x
to_ca = True
copy = x_test.copy()



if to_ca:
    x = keras.utils.to_categorical(x_train,num_classes=3)
    x= x.reshape((x.shape[0],x.shape[1]*x.shape[2]))
    x_train,x_test = x,x


v = VAE(original_dim=len(x[0]))
v.build()

第35/50集 500/500 [==============================]-0s 24us / step-损耗:0.4306-acc:0.0000e + 00-val_loss:0.4308-val_acc:0.0000e + 00 时代36/50 500/500 [=============================]-0s 24us / step-损耗:0.4293-acc:0.0000e + 00-val_loss:0.4294-val_acc:0.0000e + 00 时代37/50 500/500 [==============================]-0s 24us / step-损耗:0.4292-acc:0.0000e + 00-val_loss:0.4287-val_acc:0.0000e + 00 时代38/50 500/500 [==============================]-0s 24us / step-损耗:0.4282-acc:0.0000e + 00-val_loss:0.4285-val_acc:0.0000e + 00 时代39/50 500/500 [==============================]-0s 24us / step-损耗:0.4262-acc:0.0000e + 00-val_loss:0.4265-val_acc:0.0000e + 00 时代40/50 500/500 [=============================]-0s 24us / step-损耗:0.4250-acc:0.0000e + 00-val_loss:0.4256-val_acc:0.0000e + 00 时代41/50 500/500 [=============================]-0s 24us / step-损耗:0.4271-acc:0.0000e + 00-val_loss:0.4247-val_acc:0.0000e + 00 时代42/50 500/500 [=============================]-0s 24us / step-损耗:0.4226-acc:0.0000e + 00-val_loss:0.4252-val_acc:0.0000e + 00 时代43/50 500/500 [=============================]-0s 25us / step-损耗:0.4214-acc:0.0000e + 00-val_loss:0.4233-val_acc:0.0000e + 00 时代44/50 500/500 [=============================]-0s 25us / step-损耗:0.4237-acc:0.0000e + 00-val_loss:0.4209-val_acc:0.0000e + 00 纪元45/50 500/500 [==============================]-0s 24us / step-损耗:0.4203-acc:0.0000e + 00-val_loss:0.4179-val_acc:0.0000e + 00 时代46/50 500/500 [=============================]-0s 24us / step-损耗:0.4211-acc:0.0000e + 00-val_loss:0.4209-val_acc:0.0000e + 00 时代47/50 500/500 [=============================]-0s 25us / step-损耗:0.4220-acc:0.0000e + 00-val_loss:0.4179-val_acc:0.0000e + 00 时代48/50 500/500 [=============================]-0s 24us / step-损耗:0.4172-acc:0.0000e + 00-val_loss:0.4180-val_acc:0.0000e + 00 时代49/50 500/500 [=============================]-0s 24us / step-损耗:0.4166-acc:0.0000e + 00-val_loss:0.4154-val_acc:0.0000e + 00 时代50/50 500/500 [=============================]-0s 25us / step-损耗:0.4173-acc:0.0000e + 00-val_loss:0.4176-val_acc:0.0000e + 00

0 个答案:

没有答案