Keras模型挂在纪元1 / x上

时间:2018-08-12 14:55:28

标签: python-3.x ubuntu tensorflow keras

我正尝试使用SMAC3的贝叶斯优化(Bayesian Optimization)方法以迭代方式针对不同的训练和测试集找出最佳模型。能够为第一次迭代获得最佳模型,并将相应的分数保存在适当的存储桶中。问题是当转到第二个数据集时,我的model.fit停留在第1阶段,长时间没有输出。

from datetime import datetime
tstart = datetime.now()
print(tstart)
import pandas as pd
import io
import boto3
s3_resource = boto3.resource('s3')
s3_client = boto3.client('s3')
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import tensorflow as tf
%matplotlib inline  
from sklearn.preprocessing import OneHotEncoder

USE_RESAMPLING = True

def pd_read_csv_s3(path, *args, **kwargs):
    path = path.replace("s3://", "")
    bucket, key = path.split('/', 1)
    obj = s3_client.get_object(Bucket=bucket, Key=key)
    return pd.read_csv(io.BytesIO(obj['Body'].read()), *args, **kwargs)

from io import StringIO

csv_buffer = StringIO()

input_data = pd_read_csv_s3("DNN_INPUT_4core.csv") #a dataframe containing train and test data set  locations and corresponding row counts to def a batch size

def f(row):
    if row['row_count'] <=3000:
        val = 32 
    elif row['row_count'] <=5000:
        val = 256
    elif row['row_count'] <=10000:
        val = 512
    else:
        val = 1024
    return val

input_data['batch_size']=input_data.apply(f,axis=1)

import smac

# Import ConfigSpace and different types of parameters
from smac.configspace import ConfigurationSpace
from ConfigSpace.hyperparameters import CategoricalHyperparameter, UniformFloatHyperparameter, UniformIntegerHyperparameter

# Import SMAC-utilities
from smac.tae.execute_func import ExecuteTAFuncDict
from smac.scenario.scenario import Scenario
from smac.facade.smac_facade import SMAC

cs = ConfigurationSpace()

learning_rate = UniformFloatHyperparameter("learning_rate", 0.0001, 0.1, default=0.001)
dropout_rate  = UniformFloatHyperparameter("dropout_rate",  0.001, 0.25, default=0.01)
num_layers    = UniformIntegerHyperparameter("num_layers",  5, 10, default=5)
hidden_units  = UniformIntegerHyperparameter("hidden_units",  5, 30, default=10)
num_epoch     = UniformIntegerHyperparameter("num_epoch",     2, 10, default=3)

cs.add_hyperparameters([learning_rate, dropout_rate, num_layers, hidden_units, num_epoch])

# Scenario object
scenario = Scenario({"run_obj": "quality",   # we optimize quality (alternatively runtime)
                    "runcount-limit": 5,  # maximum function evaluations
                    "cs": cs,               # configuration space
                    "deterministic": "true"
                    })


import keras
#import smac
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, BatchNormalization
from keras.optimizers import SGD, Adam
from sklearn.metrics import roc_auc_score
from sklearn.metrics import recall_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score

#batch_size = 1024     ## 

def eval_results(model, X, Y):
    predict_probs = model.predict_classes(X, batch_size=batch_size)
    Y_prob_score = predict_probs

    return accuracy_score(Y, Y_prob_score)
#     return roc_auc_score(Y, Y_prob_score)

# to evaluate Capture Rate
from sklearn.metrics import recall_score

def eval_CR(model, X, Y):
    predict_probs = model.predict_classes(X, batch_size=batch_size)
    Y_prob_score = predict_probs

    return recall_score(Y, Y_prob_score)

def model_architecture(learning_rate, num_layers, hidden_units, dropout_rate, num_epoch):
    model = Sequential()
    model.add(Dense(hidden_units, activation='relu', input_dim=num_features))

    for _ in range(num_layers - 1):
        model.add(Dense(hidden_units, activation='relu'))
        model.add(Dropout(dropout_rate))
        model.add(BatchNormalization(axis=-1, momentum=0.99, 
                                    epsilon=0.001, center=True, scale=True, 
                                    beta_initializer='zeros', gamma_initializer='ones'))

    # Add head at the end to be a softmax layer (Binary classification)
    model.add(Dense(2, activation='softmax'))

    adam = Adam(lr=learning_rate)
    model.compile(loss='binary_crossentropy',
                optimizer=adam,
                metrics=['accuracy'])
    return model

def get_model_from_cfg(cfg):
    learning_rate = cfg["learning_rate"]
    dropout_rate  = cfg["dropout_rate"]
    num_layers    = cfg["num_layers"]
    hidden_units  = cfg["hidden_units"]
    num_epoch     = cfg["num_epoch"]

    print(""" Learning rate = %.5f, num_layers = %s, hidden_units = %s, dropout_rate = %s, num_epoch = %s    """
        % ( learning_rate, num_layers, hidden_units, dropout_rate,  num_epoch)         
        )
    return model_architecture(learning_rate=learning_rate,
                            num_layers=num_layers,
                            hidden_units=hidden_units,
                            dropout_rate=dropout_rate,
                            num_epoch=num_epoch)

nfold = 3
def fit_dnn_from_cfg(cfg):
    """ Evaluate DNN based on the configuration
    Parameter: cfg dictionary 
    Returns:
    --------
    A cross validated score
    """
    model = get_model_from_cfg(cfg)
    num_epoch = cfg["num_epoch"]
    scores = np.zeros(nfold)
    for n in range(nfold):
        idx = np.array(range(X_train.shape[0]))
        idx_valid = np.logical_and(idx>=X_train.shape[0]/nfold*n, idx<X_train.shape[0]/nfold*(n+1))
        idx_train = np.logical_not(idx_valid)
        model.fit(X_train.values[idx_train,:], Y_train_encoded[idx_train,:], 
                epochs=num_epoch, batch_size=batch_size, shuffle=True,
                callbacks = None, verbose=1)
        scores[n] = eval_results(model, X_train.values[idx_valid, :], Y_train_encoded[idx_valid, 1])
    cross_val_auc = sum(scores)/nfold
    print(" Got cross validated score = %s, each fold score is %s" % (cross_val_auc, str(scores)))
    return 1 - cross_val_auc # We want to maximize cross-validated AUC (which is equivalent to minize 1 - fs

for index, row in input_data.iterrows():
    print (index,row['Learn_Set'], row['Pred_Set'],row['segmentation_rule'], row['batch_size'])
    Learn_Set=row['Learn_Set']
    Pred_Set=row['Pred_Set']
    segmentation_rule=row['segmentation_rule']
    batch_size=row['batch_size']

    df=pd_read_csv_s3(f"s3://ip/{Learn_Set}")
    df1 = df.replace(np.nan, 0, regex=True)
    del df
    act_y= df1.iloc[:,10]
    #predset=pd_read_csv_s3(f"s3://ip/{Pred_Set}")
    predset=pd_read_csv_s3(f"s3://ip/{Pred_Set}")

    predset0 = predset.replace(np.nan, 0, regex=True)
    del predset
    predset1 = pd.get_dummies(predset0, columns=["plg", "ol_revenue_band", "final_core_tagging"])
    #del predset0
    predset2 = predset1[predset1.columns[9:73]]
    del predset1

    predsetX_pd = pd.DataFrame(predset2[predset2.columns[1:]])
    predsetY_pd = pd.DataFrame(predset2[predset2.columns[0]])
    del predset2


    enc = OneHotEncoder()
    enc.fit(predsetY_pd.values.reshape(-1, 1))
    predsetY_pd_encoded  = enc.transform(predsetY_pd.values.reshape(-1, 1)).toarray()
    predsetY_pd_encoded
    df2 = pd.get_dummies(df1, columns=["plg", "ol_revenue_band", "final_core_tagging"])
    df3 = df2[df2.columns[9:73]]
    X_pd = pd.DataFrame(df3[df3.columns[1:]])
    Y_pd = pd.DataFrame(df3[df3.columns[0]])
    from sklearn.model_selection import train_test_split
    X_train, X_test, Y_train, Y_test = train_test_split(X_pd, Y_pd, test_size=0.30,
                                                    random_state=42)
    from imblearn.combine import SMOTEENN
    from imblearn.over_sampling import SMOTE 
    from imblearn.under_sampling import RandomUnderSampler
    from imblearn.under_sampling import ClusterCentroids

    if USE_RESAMPLING:
        target_num_samples_major = Y_train[Y_train == 1].shape[0]
        sampling_model = SMOTE()

        X_resampled_train, Y_resampled_train = sampling_model.fit_sample(X_train, Y_train)
        X_train = pd.DataFrame(X_resampled_train, columns=X_pd.columns)
        Y_train = pd.DataFrame(Y_resampled_train)

    from sklearn.preprocessing import OneHotEncoder
    enc = OneHotEncoder()
    enc.fit(Y_train.values.reshape(-1, 1))
    Y_train_encoded = enc.transform(Y_train.values.reshape(-1, 1)).toarray()
    Y_test_encoded  = enc.transform(Y_test.values.reshape(-1, 1)).toarray()
    Y_train_encoded

    num_samples = X_train.shape[0]
    num_features = X_train.shape[1]




    print("Optimizing! Depending on your machine, this might take a few minutes.")
    smac= SMAC(scenario=scenario, rng=np.random.RandomState(42),
            tae_runner=fit_dnn_from_cfg)

    optimized_cfg = smac.optimize()

    optimized_cfg      # run2

    num_epoch = optimized_cfg["num_epoch"]

    cr_list = []
    acc_list = []
    model_dict = {}
    for i in range(30):
        model=get_model_from_cfg(optimized_cfg)
        model.fit(X_train.values, Y_train_encoded, epochs=num_epoch, batch_size=batch_size, shuffle=True, callbacks = None, verbose=1)
        cr  = eval_CR(model, X_test.values, Y_test)
        acc = eval_results(model, X_test.values, Y_test)
        cr_list.append(cr)
        acc_list.append(acc)
        model_dict['model'+str(i)] = model
        print("The current iteration number is: ", i+1)

    model_dict

    #Capture rate and accuracy tables
    cr_tbl = pd.DataFrame({'capture_rate':cr_list})
    acc_tbl = pd.DataFrame({'model_accuracy':acc_list})

    dif_tbl = pd.concat([cr_tbl, acc_tbl], axis=1)

    dif_tbl["difference"] = np.abs(dif_tbl['capture_rate']-dif_tbl['model_accuracy']).T

    minimum = min(dif_tbl.difference)
    minimum

    select = dif_tbl[dif_tbl.difference==minimum]
    select.index[0]

    best = 'model'+str(select.index[0])
    model = model_dict[best]

    score1=pd.DataFrame(eval('model').predict(predsetX_pd.values))

    #score1 = pd.read_csv(f"CA_CC1PS1_validpred11.csv", index_col=0)
    score1.columns = ['Pred0', 'Pred1']
    score1.shape, predset0.shape

    #Picking the best model
    base = predset0[predset0.columns[0:14]]
    base

    score_final = pd.concat([base, score1.Pred1], axis=1)
    #score_final.to_csv()
    #predsetY_pd.to_csv('CA_CC1PS1_valid_Yactual_latest.csv')

    score_final.to_csv(csv_buffer,index=False)
    s3_resource = boto3.resource('s3')
    s3_resource.Object('ip', f'output/{best}_BS_{batch_size}_{segmentation_rule}.csv').put(Body=csv_buffer.getvalue())
    dif_tbl.to_csv(f'{best}_BS_{batch_size}_{segmentation_rule}_cr.csv')
    #tf.keras.backend.clear_session()
    #tf.keras.backend.backend()
    #tf.reset_default_graph()
    # code to speed test
tend = datetime.now()
print(tend-tstart)

输出

0 firstinput_train.csv firstinput_test.csv CAPCC3PS3 1024 Optimizing! Depending on your machine, this might take a few minutes. /home/ubuntu/anaconda3/lib/python3.6/site-packages/sklearn/utils/validation.py:578: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().   y = column_or_1d(y, warn=True)  Learning rate
= 0.00100, num_layers = 5, hidden_units = 10, dropout_rate = 0.01, num_epoch = 3     Epoch 1/3 8374/8374 [==============================]
- 1s 134us/step - loss: 0.7920 - acc: 0.5057 Epoch 2/3 8374/8374 [==============================] - 0s 8us/step - loss: 0.7038 - acc:
0.5807 Epoch 3/3 8374/8374 [==============================] - 0s 8us/step - loss: 0.6587 - acc: 0.6536 Epoch 1/3 8375/8375 [==============================] - 0s 8us/step - loss: 0.6778 - acc:
0.5878 Epoch 2/3 8375/8375 [==============================] - 0s 8us/step - loss: 0.6653 - acc: 0.6007 Epoch 3/3 8375/8375 [==============================] - 0s 8us/step - loss: 0.6571 - acc:
0.6084 Epoch 1/3 8375/8375 [==============================] - 0s 8us/step - loss: 0.7673 - acc: 0.3850 Epoch 2/3 8375/8375 [==============================] - 0s 8us/step - loss: 0.7346 - acc:
0.4141 Epoch 3/3 8375/8375 [==============================] - 0s 8us/step - loss: 0.7072 - acc: 0.4589  Got cross validated score =
0.5390249165733113, each fold score is [0.30802292 0.50728445 0.80176738]  Learning rate = 0.09412, num_layers = 8, hidden_units = 28, dropout_rate = 0.004292986996289652, num_epoch = 8     Epoch 1/8 8374/8374 [==============================] - 2s 224us/step - loss:
0.8176 - acc: 0.6181 Epoch 2/8 8374/8374 [==============================] - 0s 14us/step - loss: 0.5817 - acc:
0.6991 Epoch 3/8 8374/8374 [==============================] - 0s 14us/step - loss: 0.5390 - acc: 0.7362 Epoch 4/8 8374/8374 [==============================] - 0s 14us/step - loss: 0.5107 - acc:
0.7638 Epoch 5/8 8374/8374 [==============================] - 0s 14us/step - loss: 0.4831 - acc: 0.7739 Epoch 6/8 8374/8374 [==============================] - 0s 15us/step - loss: 0.4660 - acc:
0.7902 Epoch 7/8 8374/8374 [==============================] - 0s 14us/step - loss: 0.4544 - acc: 0.7939 Epoch 8/8 8374/8374 [==============================] - 0s 14us/step - loss: 0.4437 - acc:
0.8039 Epoch 1/8 8375/8375 [==============================] - 0s 15us/step - loss: 0.5297 - acc: 0.7483 Epoch 2/8 8375/8375 [==============================] - 0s 15us/step - loss: 0.5009 - acc:
0.7633 Epoch 3/8 8375/8375 [==============================] - 0s 15us/step - loss: 0.4792 - acc: 0.7730 Epoch 4/8 8375/8375 [==============================] - 0s 15us/step - loss: 0.4513 - acc:
0.7964 Epoch 5/8 8375/8375 [==============================] - 0s 15us/step - loss: 0.4283 - acc: 0.8093 Epoch 6/8 8375/8375 [==============================] - 0s 15us/step - loss: 0.4175 - acc:
0.8211 Epoch 7/8 8375/8375 [==============================] - 0s 14us/step - loss: 0.4012 - acc: 0.8230 Epoch 8/8 8375/8375 [==============================] - 0s 14us/step - loss: 0.3871 - acc:
0.8347 Epoch 1/8 8375/8375 [==============================] - 0s 15us/step - loss: 0.5034 - acc: 0.7519 Epoch 2/8 8375/8375 [==============================] - 0s 14us/step - loss: 0.4069 - acc:
0.8208 Epoch 3/8 8375/8375 [==============================] - 0s 14us/step - loss: 0.3689 - acc: 0.8217 Epoch 4/8 8375/8375 [==============================] - 0s 14us/step - loss: 0.3509 - acc:
0.8353 Epoch 5/8 8375/8375 [==============================] - 0s 14us/step - loss: 0.3534 - acc: 0.8288 Epoch 6/8 8375/8375 [==============================] - 0s 14us/step - loss: 0.3494 - acc:
0.8364 Epoch 7/8 8375/8375 [==============================] - 0s 14us/step - loss: 0.3320 - acc: 0.8485 Epoch 8/8 8375/8375 [==============================] - 0s 14us/step - loss: 0.3251 - acc:
0.8507  Got cross validated score = 0.582865929450528, each fold score is [0.62106017 0.7721519  0.35538572]  Learning rate = 0.06822, num_layers = 9, hidden_units = 12, dropout_rate = 0.12258252077655721, num_epoch = 9     Epoch 1/9 8374/8374 [==============================]
- 2s 262us/step - loss: 0.6928 - acc: 0.6480 Epoch 2/9 8374/8374 [==============================] - 0s 13us/step - loss: 0.6226 - acc:
0.6912 Epoch 3/9 8374/8374 [==============================] - 0s 13us/step - loss: 0.6232 - acc: 0.6925 Epoch 4/9 8374/8374 [==============================] - 0s 13us/step - loss: 0.6205 - acc:
0.6929 Epoch 5/9 8374/8374 [==============================] - 0s 13us/step - loss: 0.6165 - acc: 0.6929 Epoch 6/9 8374/8374 [==============================] - 0s 13us/step - loss: 0.6129 - acc:
0.6929 Epoch 7/9 8374/8374 [==============================] - 0s 13us/step - loss: 0.6055 - acc: 0.6929 Epoch 8/9 8374/8374 [==============================] - 0s 13us/step - loss: 0.5928 - acc:
0.6927 Epoch 9/9 8374/8374 [==============================] - 0s 13us/step - loss: 0.5728 - acc: 0.6988 Epoch 1/9 8375/8375 [==============================] - 0s 13us/step - loss: 0.6396 - acc:
0.6523 Epoch 2/9 8375/8375 [==============================] - 0s 13us/step - loss: 0.6158 - acc: 0.6790 Epoch 3/9 8375/8375 [==============================] - 0s 13us/step - loss: 0.5992 - acc:
0.7023 Epoch 4/9 8375/8375 [==============================] - 0s 12us/step - loss: 0.6026 - acc: 0.6995 Epoch 5/9 8375/8375 [==============================] - 0s 13us/step - loss: 0.5876 - acc:
0.7121 Epoch 6/9 8375/8375 [==============================] - 0s 13us/step - loss: 0.5811 - acc: 0.7168 Epoch 7/9 8375/8375 [==============================] - 0s 13us/step - loss: 0.5738 - acc:
0.7259 Epoch 8/9 8375/8375 [==============================] - 0s 13us/step - loss: 0.5685 - acc: 0.7290 Epoch 9/9 8375/8375 [==============================] - 0s 13us/step - loss: 0.5669 - acc:
0.7266 Epoch 1/9 8375/8375 [==============================] - 0s 13us/step - loss: 0.5765 - acc: 0.6997 Epoch 2/9 8375/8375 [==============================] - 0s 12us/step - loss: 0.5151 - acc:
0.7473 Epoch 3/9 8375/8375 [==============================] - 0s 13us/step - loss: 0.4901 - acc: 0.7550 Epoch 4/9 8375/8375 [==============================] - 0s 12us/step - loss: 0.4894 - acc:
0.7569 Epoch 5/9 8375/8375 [==============================] - 0s 13us/step - loss: 0.4896 - acc: 0.7547 Epoch 6/9 8375/8375 [==============================] - 0s 13us/step - loss: 0.4785 - acc:
0.7562 Epoch 7/9 8375/8375 [==============================] - 0s 13us/step - loss: 0.4788 - acc: 0.7559 Epoch 8/9 8375/8375 [==============================] - 0s 13us/step - loss: 0.4744 - acc:
0.7575 Epoch 9/9 8375/8375 [==============================] - 0s 13us/step - loss: 0.4718 - acc: 0.7561  Got cross validated score =
0.4126968892283213, each fold score is [0.1143744  0.56364939 0.56006687]  Learning rate = 0.03591, num_layers = 8, hidden_units = 17, dropout_rate = 0.007277670988809294, num_epoch = 7     Epoch 1/7 8374/8374 [==============================] - 2s 221us/step - loss:
0.6663 - acc: 0.6515 Epoch 2/7 8374/8374 [==============================] - 0s 14us/step - loss: 0.5472 - acc:
0.7369 Epoch 3/7 8374/8374 [==============================] - 0s 14us/step - loss: 0.5015 - acc: 0.7677 Epoch 4/7 8374/8374 [==============================] - 0s 13us/step - loss: 0.4708 - acc:
0.7908 Epoch 5/7 8374/8374 [==============================] - 0s 13us/step - loss: 0.4659 - acc: 0.7897 Epoch 6/7 8374/8374 [==============================] - 0s 14us/step - loss: 0.4401 - acc:
0.8069 Epoch 7/7 8374/8374 [==============================] - 0s 14us/step - loss: 0.4279 - acc: 0.8134 Epoch 1/7 8375/8375 [==============================] - 0s 14us/step - loss: 0.5134 - acc:
0.7552 Epoch 2/7 8375/8375 [==============================] - 0s 13us/step - loss: 0.4691 - acc: 0.7860 Epoch 3/7 8375/8375 [==============================] - 0s 14us/step - loss: 0.4497 - acc:
0.7965 Epoch 4/7 8375/8375 [==============================] - 0s 13us/step - loss: 0.4505 - acc: 0.7956 Epoch 5/7 8375/8375 [==============================] - 0s 13us/step - loss: 0.4357 - acc:
0.8050 Epoch 6/7 8375/8375 [==============================] - 0s 14us/step - loss: 0.4261 - acc: 0.8101 Epoch 7/7 8375/8375 [==============================] - 0s 14us/step - loss: 0.4202 - acc:
0.8184 Epoch 1/7 8375/8375 [==============================] - 0s 13us/step - loss: 0.5110 - acc: 0.7236 Epoch 2/7 8375/8375 [==============================] - 0s 14us/step - loss: 0.4254 - acc:
0.7983 Epoch 3/7 8375/8375 [==============================] - 0s 13us/step - loss: 0.4101 - acc: 0.7686 Epoch 4/7 8375/8375 [==============================] - 0s 14us/step - loss: 0.3812 - acc:
0.8142 Epoch 5/7 8375/8375 [==============================] - 0s 14us/step - loss: 0.3670 - acc: 0.8252 Epoch 6/7 8375/8375 [==============================] - 0s 14us/step - loss: 0.3618 - acc:
0.8240 Epoch 7/7 8375/8375 [==============================] - 0s 13us/step - loss: 0.3624 - acc: 0.8265  Got cross validated score =
0.5807025992050865, each fold score is [0.79465138 0.76665871 0.18079771]  Learning rate = 0.03571, num_layers = 6, hidden_units = 28, dropout_rate = 0.09429145773928552, num_epoch = 4     Epoch 1/4 8374/8374 [==============================] - 1s 177us/step - loss:
0.7380 - acc: 0.6266 Epoch 2/4 8374/8374 [==============================] - 0s 11us/step - loss: 0.5637 - acc:
0.7302 Epoch 3/4 8374/8374 [==============================] - 0s 11us/step - loss: 0.5244 - acc: 0.7541 Epoch 4/4 8374/8374 [==============================] - 0s 11us/step - loss: 0.4906 - acc:
0.7751 Epoch 1/4 8375/8375 [==============================] - 0s 11us/step - loss: 0.5754 - acc: 0.7096 Epoch 2/4 8375/8375 [==============================] - 0s 11us/step - loss: 0.5433 - acc:
0.7346 Epoch 3/4 8375/8375 [==============================] - 0s 11us/step - loss: 0.5208 - acc: 0.7496 Epoch 4/4 8375/8375 [==============================] - 0s 11us/step - loss: 0.5049 - acc:
0.7639 Epoch 1/4 8375/8375 [==============================] - 0s 11us/step - loss: 0.5453 - acc: 0.6933 Epoch 2/4 8375/8375 [==============================] - 0s 11us/step - loss: 0.4697 - acc:
0.7681 Epoch 3/4 8375/8375 [==============================] - 0s 11us/step - loss: 0.4357 - acc: 0.7733 Epoch 4/4 8375/8375 [==============================] - 0s 11us/step - loss: 0.4274 - acc:
0.7770  Got cross validated score = 0.5817626981286432, each fold score is [0.47874881 0.73967041 0.52686888]  Learning rate = 0.09412, num_layers = 8, hidden_units = 28, dropout_rate =
0.004292986996289652, num_epoch = 8     Epoch 1/8 12562/12562 [==============================] - 2s 164us/step - loss: 0.7557 - acc:
0.5428 Epoch 2/8 12562/12562 [==============================] - 0s 14us/step - loss: 0.6173 - acc: 0.6555 Epoch 3/8 12562/12562 [==============================] - 0s 15us/step - loss: 0.5711 - acc:
0.6993 Epoch 4/8 12562/12562 [==============================] - 0s 14us/step - loss: 0.5448 - acc: 0.7201 Epoch 5/8 12562/12562 [==============================] - 0s 14us/step - loss: 0.5341 - acc:
0.7265 Epoch 6/8 12562/12562 [==============================] - 0s 14us/step - loss: 0.5118 - acc: 0.7443 Epoch 7/8 12562/12562 [==============================] - 0s 14us/step - loss: 0.4889 - acc:
0.7621 Epoch 8/8 12562/12562 [==============================] - 0s 14us/step - loss: 0.4750 - acc: 0.7729 The current iteration number is:  1  Learning rate = 0.09412, num_layers = 8, hidden_units = 28, dropout_rate = 0.004292986996289652, num_epoch = 8     Epoch 1/8 12562/12562 [==============================] - 2s 177us/step - loss:
0.7595 - acc: 0.5776 Epoch 2/8 12562/12562 [==============================] - 0s 15us/step - loss: 0.5878 - acc:
0.6928 Epoch 3/8 12562/12562 [==============================] - 0s 15us/step - loss: 0.5412 - acc: 0.7277 Epoch 4/8 12562/12562 [==============================] - 0s 15us/step - loss: 0.5212 - acc:
0.7422 Epoch 5/8 12562/12562 [==============================] - 0s 15us/step - loss: 0.5056 - acc: 0.7496 Epoch 6/8 12562/12562 [==============================] - 0s 15us/step - loss: 0.4779 - acc:
0.7672 Epoch 7/8 12562/12562 [==============================] - 0s 15us/step - loss: 0.4705 - acc: 0.7756 Epoch 8/8 12562/12562 [==============================] - 0s 15us/step - loss: 0.4519 - acc:
0.7860 The current iteration number is:  2  Learning rate = 0.09412, num_layers = 8, hidden_units = 28, dropout_rate =
0.004292986996289652, num_epoch = 8     Epoch 1/8 12562/12562 [==============================] - 2s 182us/step - loss: 0.7375 - acc:
0.5699 Epoch 2/8 12562/12562 [==============================] - 0s 15us/step - loss: 0.6144 - acc: 0.6597 Epoch 3/8 12562/12562 [==============================] - 0s 15us/step - loss: 0.5718 - acc:
0.6989 Epoch 4/8 12562/12562 [==============================] - 0s 15us/step - loss: 0.5565 - acc: 0.7094 Epoch 5/8 12562/12562 [==============================] - 0s 15us/step - loss: 0.5377 - acc:
0.7224 Epoch 6/8 12562/12562 [==============================] - 0s 15us/step - loss: 0.5388 - acc: 0.7211 Epoch 7/8 12562/12562 [==============================] - 0s 15us/step - loss: 0.5265 - acc:
0.7308 Epoch 8/8 12562/12562 [==============================] - 0s 15us/step - loss: 0.5087 - acc: 0.7467 The current iteration number is:  3  Learning rate = 0.09412, num_layers = 8, hidden_units = 28, dropout_rate = 0.004292986996289652, num_epoch = 8     Epoch 1/8 12562/12562 [==============================] - 3s 200us/step - loss:
0.7974 - acc: 0.5454 Epoch 2/8 12562/12562 [==============================] - 0s 15us/step - loss: 0.6105 - acc:
0.6551 Epoch 3/8 12562/12562 [==============================] - 0s 15us/step - loss: 0.5686 - acc: 0.7055 Epoch 4/8 12562/12562 [==============================] - 0s 15us/step - loss: 0.5454 - acc:
0.7268 Epoch 5/8 12562/12562 [==============================] - 0s 15us/step - loss: 0.5300 - acc: 0.7370 Epoch 6/8 12562/12562 [==============================] - 0s 15us/step - loss: 0.5109 - acc:
0.7515 Epoch 7/8 12562/12562 [==============================] - 0s 15us/step - loss: 0.5045 - acc: 0.7520 Epoch 8/8 12562/12562 [==============================] - 0s 15us/step - loss: 0.4856 - acc:
0.7652 ... Learning rate = 0.09412, num_layers = 8, hidden_units = 28, dropout_rate = 0.004292986996289652, num_epoch = 8     Epoch 1/8 12562/12562 [==============================] - 9s 745us/step - loss:
0.7653 - acc: 0.5614 Epoch 2/8 12562/12562 [==============================] - 0s 17us/step - loss: 0.5922 - acc:
0.6848 Epoch 3/8 12562/12562 [==============================] - 0s 17us/step - loss: 0.5511 - acc: 0.7214 Epoch 4/8 12562/12562 [==============================] - 0s 18us/step - loss: 0.5319 - acc:
0.7440 Epoch 5/8 12562/12562 [==============================] - 0s 17us/step - loss: 0.5098 - acc: 0.7598 Epoch 6/8 12562/12562 [==============================] - 0s 17us/step - loss: 0.4865 - acc:
0.7721 Epoch 7/8 12562/12562 [==============================] - 0s 17us/step - loss: 0.4753 - acc: 0.7789 Epoch 8/8 12562/12562 [==============================] - 0s 17us/step - loss: 0.4584 - acc:
0.7890 The current iteration number is:  30 1 secondinput_train.csv secondinput_test.csv MBCC0PS3 512 Optimizing! Depending on your machine, this might take a few minutes. /home/ubuntu/anaconda3/lib/python3.6/site-packages/sklearn/utils/validation.py:578: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().   y = column_or_1d(y, warn=True)  Learning rate
= 0.00100, num_layers = 5, hidden_units = 10, dropout_rate = 0.01, num_epoch = 3     Epoch 1/3

基本上,它卡住了第二个输入数据集,如果我重新启动内核并再次运行,则能够为第二个输入数据运行,这是一个繁琐的任务,因为我必须运行900个此类文件。请帮助我解决此问题,在此先感谢:)

0 个答案:

没有答案