我已经训练了神经网络模型,以便在学习后预测一些目标值。
我正在保存经过训练的模型,对于该模型,我的准确性为87%。 我正在尝试恢复模型,以便通过训练后的模型预测值。
但是,我遇到了问题,无法解决。我经历了类似的问题,但是之前我无法解决。
这是我训练模型的代码:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
#import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import numpy as np
import sys
#from tkinter import Tk
from sklearn.metrics import mean_squared_error, r2_score,mean_absolute_error
from datetime import datetime
#def fooPy():
start_time = datetime.now()
# Load the dataset
data = pd.read_csv('nn_100k.csv')
x = data.iloc[:,0:7]
y = data.iloc[:,7]
y = y.values #converting it into a numpy array
#converting the target values to a column vector
y = np.reshape(y, [y.shape[0], 1])
#Releasing the data
del data
#split the data into train and test dataset
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.2,random_state=4)
#Transforming the input values to a normal distribution
scaler= StandardScaler()
# Fit only to the training data
(scaler.fit(x_train))
# Now apply the transformations to the data:
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
total_len = x_train.shape[0]
#Training parameters
learning_rate = 0.001
training_epochs = 1000
batch_size = 8000
display_step = 1
dropout_rate = 0.9
#Number of nodes in each hidden unit
n_hidden_1 = 275
#n_hidden_2 = 75
#n_hidden_3 = 25
#Input and output layer
n_input = x_train.shape[1]
n_output = y_train.shape[1]
print('The number of input layers: %d' %n_input)
print('The number of input layers: %d' %n_output)
# tf Graph input
x = tf.placeholder("float", [None,7],name='inputs')
y = tf.placeholder("float", [None,1],name='targets')
keep_prob = tf.placeholder(tf.float32) #For drop-out
# Create model
def multilayer_perceptron(x, weights,biases):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
drop_out = tf.nn.dropout(layer_1,keep_prob)
# Hidden layer with RELU activation
# layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
# layer_2 = tf.nn.relu(layer_2)
# drop_out = tf.nn.dropout(layer_2,keep_prob)
# layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
# layer_3 = tf.nn.relu(layer_3)
# drop_out = tf.nn.dropout(layer_2,keep_prob)
# Output layer with linear activation
out_layer = tf.matmul(layer_1, weights['out']) + biases['out']
out = out = tf.sigmoid(out_layer)
return out
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1], 0, 1),name='h1'),
# 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], 0, 1)),
# 'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3], 0, 1)),
'out': tf.Variable(tf.random_normal([n_hidden_1, n_output], 0, 1),name='out_weight')
# 'name': 'weights'
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1], 0, 1),name='b1'),
# 'b2': tf.Variable(tf.random_normal([n_hidden_2], 0, 1)),
# 'b3': tf.Variable(tf.random_normal([n_hidden_3], 0, 1)),
# 'b4': tf.Variable(tf.random_normal([n_hidden_4], 0, 0.1)),
'out': tf.Variable(tf.random_normal([n_output], 0, 1),name='out_bias')
# 'name': 'biases'
}
print('Weights and biases defined')
# Construct model
pred = multilayer_perceptron(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.square(pred-y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
epoch_summary = 0
training_cost = 0
saver_train=tf.train.Saver()
# Launch the graph
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(total_len/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_x = x_train[i*batch_size:(i+1)*batch_size]
batch_y = y_train[i*batch_size:(i+1)*batch_size]
# Run optimization op (backprop) and cost op (to get loss value)
_, c, p = sess.run([optimizer, cost, pred], feed_dict={x: batch_x, y: batch_y, keep_prob :dropout_rate})
# Compute average loss
avg_cost += c / total_batch
# sample prediction
# label_value = batch_y
# estimate = p
# err = label_value-estimate
accuracy = sess.run(cost, feed_dict={x:x_test, y: y_test})
# saver_train.save(sess,r'C:\Users\Nara\Personal\CE_TFD\Thesis\ANN\Restart_Trial\trained_model.ckpt')
# saver_train.export_meta_graph(r'C:\Users\Nara\Personal\CE_TFD\Thesis\ANN\Restart_Trial\trained_model.meta')
# Display logs per epoch step
if epoch % 50 ==0:
training_cost=np.append(training_cost,avg_cost)
epoch_summary=np.append(epoch_summary,epoch+1)
if epoch % display_step == 0:
print ("Epoch:", '%04d' % (epoch+1))
print ("Loss: ", '%.6f' %sess.run(cost, feed_dict={x:x_test, y: y_test}))
print ("[*]----------------------------")
print ("Optimization Finished!")
# Test model
predicted_vals = sess.run(pred, feed_dict={x: x_test})
urf = sess.run(pred,feed_dict={x: x_validate})
saver_train.save(sess,r'C:\Users\....\trained_model.ckpt')
saver_train.export_meta_graph(r'C:\Users\...\trained_model.meta')
## The mean squared error
print("Mean squared error: %.6f"
% mean_squared_error(y_test, predicted_vals))
## Explained variance score: 1 is perfect prediction
print('Prediction accuracy: %.6f' % r2_score(y_test, predicted_vals))
print('Predicted URF: %.6f'%urf)
## The estimated URF value
# print('Estimated URF value: %.6f' %urf)
# plt.plot(epoch_summary,training_cost)
# # plt.scatter()
# plt.show()
time_elapsed = datetime.now() - start_time
print('Time elapsed (hh:mm:ss.ms) {}'.format(time_elapsed))
这是我还原它的代码。
import tensorflow as tf
#import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import sys
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
#from tkinter import Tk
from datetime import datetime
start_time = datetime.now()
data2 = pd.read_csv('x_test.csv',header=None)
x_test = data2.iloc[:]
x_test = x_test.values
del data2
# Load the dataset
data = pd.read_csv('x_validate.csv',header=None)
x_validate = data.head(1)
#x_validate = x_validate.values
del data
#data_path="C:\\Users\\Nara\\Personal\\CE_TFD\\Thesis\\ANN\\Restart_Trial"
scaler= StandardScaler()
scaler.fit(x_test)
x_test = scaler.transform(x_test)
x_validate = scaler.transform(x_validate)
#
del x_test
##Number of nodes in each hidden unit
n_hidden_1 = 325
##n_hidden_2 = 75
##n_hidden_3 = 10
##Input and output layer
n_input = 7
n_output = 1
# tf Graph input
x = tf.placeholder("float", [None,7],name='inputs')
y = tf.placeholder("float", [None,1],name='targets')
#x_validate = tf.placeholder("float", [None,7])
keep_prob = tf.placeholder(tf.float32) #For drop-out
#
# Create model
def multilayer_perceptron(x, weights,biases):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
drop_out = tf.nn.dropout(layer_1,keep_prob)
#
## # Hidden layer with RELU activation
## layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
## layer_2 = tf.nn.relu(layer_2)
## drop_out = tf.nn.dropout(layer_2,keep_prob)
##
## layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
## layer_3 = tf.nn.relu(layer_3)
## drop_out = tf.nn.dropout(layer_2,keep_prob)
##
# Output layer with linear activation
out_layer = tf.matmul(layer_1, weights['out']) + biases['out']
out = tf.sigmoid(out_layer)
return out
#
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1], 0, 1),name='h1'),
## 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], 0, 1)),
## 'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3], 0, 1)),
'out': tf.Variable(tf.random_normal([n_hidden_1, n_output], 0, 1),name='out_weight')
}
#
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1], 0, 1),name='b1'),
## 'b2': tf.Variable(tf.random_normal([n_hidden_2], 0, 1)),
## 'b3': tf.Variable(tf.random_normal([n_hidden_3], 0, 1)),
## 'b4': tf.Variable(tf.random_normal([n_hidden_4], 0, 0.1)),
'out': tf.Variable(tf.random_normal([n_output], 0, 1),name='out_bias')
}
##
##
pred = multilayer_perceptron(x,weights,biases)
#saver_train=tf.train.Saver({"weights": weights,"biases": biases})
tf.reset_default_graph()
#saver = tf.train.import_meta_graph('C:\\Users\\Nara\\Personal\\CE_TFD\\Thesis\\ANN\\Restart_Trial\\trained_model.ckpt.meta')
#graph = tf.get()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.import_meta_graph('C:\\Users\\Nara\\Personal\\CE_TFD\\Thesis\\ANN\\Restart_Trial\\trained_model.ckpt.meta')
graph = sess.graph
saver.restore(sess,tf.train.latest_checkpoint('./'))
print(sess.run(['h1:0'])) #Checking if the weights are restored
urf = sess.run(pred, feed_dict={x: x_validated})
print('Estimated URF value: %.6f' %urf)
time_elapsed = datetime.now() - start_time
print('Time elapsed (hh:mm:ss.ms) {}'.format(time_elapsed))
我正在收到此错误“无法将feed_dict键解释为张量:'+ e.args [0])”