如何从神经网络获得准确的预测?

时间:2016-06-14 17:35:29

标签: python scikit-learn neural-network prediction

我正在使用人工神经网络进行水质预测项目。我用python实现了这个。我已经完成了我的预测模型,但生成的预测并不准确。

我所做的是我每天从河里收集过去4年半的数据,并且我通过输入过去记录的数据来预测特定参数的模式。我需要做的就是预测"浊度水平"通过提供2012 - 2014年浊度数据得出2015年的水资源。

从我创建的模型中,当我与2015年收集的真实数据进行比较时,它并不准确。请帮我解决这个问题。我通过更改隐藏的图层大小和Lambda值来尝试此操作。

//This is my code

import xlrd
import numpy as np
from numpy import zeros
from scipy.optimize import minimize
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy import optimize

#Neural Network

class Neural_Network(object):

    def __init__(self,Lambda):        
        #Define Hyperparameters
        self.inputLayerSize = 2
        self.outputLayerSize = 1
        self.hiddenLayerSize = 10
        #Weights (parameters)
        self.W1 = np.random.randn(self.inputLayerSize,self.hiddenLayerSize)
        self.W2 = np.random.randn(self.hiddenLayerSize,self.outputLayerSize)

        #Regularization Parameter:
        self.Lambda = Lambda

    def forward(self, arrayInput):
        #Propogate inputs though network
        self.z2 = np.dot(arrayInput, self.W1)
        self.a2 = self.sigmoid(self.z2)
        self.z3 = np.dot(self.a2, self.W2)
        yHat = self.sigmoid(self.z3) 
        return yHat

    def sigmoid(self, z):
        #Apply sigmoid activation function to scalar, vector, or matrix
        return 1/(1+np.exp(-z))

    def sigmoidPrime(self,z):
        #Gradient of sigmoid
        return np.exp(-z)/((1+np.exp(-z))**2)

    def costFunction(self, arrayInput, arrayOutput):
        #Compute cost for given input,output use weights already stored in class.       
        self.yHat = self.forward(arrayInput)
        #J = 0.5*sum((arrayOutput-self.yHat)**2)
        #J = 0.5*sum((arrayOutput-self.yHat)**2)/arrayInput.shape[0] + (self.Lambda/2)
        J = 0.5*sum((arrayOutput-self.yHat)**2)/arrayInput.shape[0] + (self.Lambda/2)*sum(sum(self.W1**2),sum(self.W2**2))
        #J = 0.5*sum((arrayOutput-self.yHat)**2)/arrayInput.shape[0] + (self.Lambda/2)*(sum(self.W1**2)+sum(self.W2**2))       
        return J

    def costFunctionPrime(self, arrayInput, arrayOutput):
        #Compute derivative with respect to W and W2 for a given X and y:
        self.yHat = self.forward(arrayInput)

        delta3 = np.multiply(-(arrayOutput-self.yHat), self.sigmoidPrime(self.z3))
        #Add gradient of regularization term:
        #dJdW2 = np.dot(self.a2.T, delta3) + self.Lambda*self.W2
        dJdW2 = np.dot(self.a2.T, delta3)

        delta2 = np.dot(delta3, self.W2.T)*self.sigmoidPrime(self.z2)
        #Add gradient of regularization term:
        #dJdW1 = np.dot(arrayInput.T, delta2)+ self.Lambda*self.W1
        dJdW1 = np.dot(arrayInput.T, delta2)  

        return dJdW1, dJdW2

    #Helper Functions for interacting with other classes:
    def getParams(self):
        #Get W1 and W2 unrolled into vector:
        params = np.concatenate((self.W1.ravel(), self.W2.ravel()))
        return params

    def setParams(self, params):
        #Set W1 and W2 using single paramater vector.
        W1_start = 0
        W1_end = self.hiddenLayerSize * self.inputLayerSize
        self.W1 = np.reshape(params[W1_start:W1_end], (self.inputLayerSize , self.hiddenLayerSize))
        W2_end = W1_end + self.hiddenLayerSize*self.outputLayerSize
        self.W2 = np.reshape(params[W1_end:W2_end], (self.hiddenLayerSize, self.outputLayerSize))

    def computeGradients(self, arrayInput, arrayOutput):
        dJdW1, dJdW2 = self.costFunctionPrime(arrayInput, arrayOutput)        
        return np.concatenate((dJdW1.ravel(), dJdW2.ravel()))

    def computeNumericalGradient(self,N, X, y):
        paramsInitial = N.getParams()        
        numgrad = np.zeros(paramsInitial.shape)
        perturb = np.zeros(paramsInitial.shape)
        e = 1e-4

        for p in range(len(paramsInitial)):
            #Set perturbation vector
            perturb[p] = e
            N.setParams(paramsInitial + perturb)
            loss2 = N.costFunction(X, y)

            N.setParams(paramsInitial - perturb)
            loss1 = N.costFunction(X, y)

            #Compute Numerical Gradient
            numgrad[p] = (loss2 - loss1) / (2*e)            

            #Return the value we changed to zero:
            perturb[p] = 0

        #Return Params to original value:
        N.setParams(paramsInitial)
        return numgrad

#Trainer class    
class trainer(object):
    def __init__(self, N):
        self.N = N

    def costFunctionWrapper(self, params, arrayInput, arrayOutput):
        self.N.setParams(params)
        cost = self.N.costFunction(arrayInput, arrayOutput)
        #grad = self.N.computeGradients(arrayInput, arrayOutput)
        grad = self.N.computeNumericalGradient(self.N,arrayInput, arrayOutput)
        return cost, grad

    def callbackF(self, params):
        self.N.setParams(params)
        self.J.append(self.N.costFunction(self.arrayInput, self.arrayOutput))
        self.testJ.append(self.N.costFunction(self.TestInput, self.TestOutput))

    def train(self, arrayInput, arrayOutput,TestInput,TestOutput):
        #Make an internal variable for the callback function:
        self.arrayInput = arrayInput
        self.arrayOutput = arrayOutput

        self.TestInput = TestInput
        self.TestOutput = TestOutput

        #Make empty list to store costs:
        self.J = []
        self.testJ= []

        params0 = self.N.getParams()

        options = {'maxiter': 200, 'disp' : True}
        _res = optimize.minimize(self.costFunctionWrapper, params0, jac=True, method='BFGS', \
                                 args=(arrayInput, arrayOutput), options=options, callback=self.callbackF)

        self.N.setParams(_res.x)
        self.optimizationResults = _res



#Main Program   

path = "F:\prototype\\newdata\\tody\\turbidity\\c.xlsx"
book = xlrd.open_workbook(path)

input1=[]
output=[]
testinput=[]
testoutput=[]

#training data set
first_sheet = book.sheet_by_index(1)
for row in range(first_sheet.ncols-1):
    input1.append(first_sheet.col_values(row))

for row in range((first_sheet.ncols-1),first_sheet.ncols ):
    output.append(first_sheet.col_values(row))

arrayInput = np.asarray(input1)
arrayInput = arrayInput.T
arrayOutput = np.asarray(output)
arrayOutput = arrayOutput.T

#testing data set
first_sheet1 = book.sheet_by_index(0)
for row in range(first_sheet1.ncols-1):
    testinput.append(first_sheet1.col_values(row))

for row in range((first_sheet1.ncols-1),first_sheet1.ncols ):
    testoutput.append(first_sheet1.col_values(row))

TestInput = np.asarray(testinput)
TestInput = TestInput.T
TestOutput = np.asarray(testoutput)
TestOutput = TestOutput.T

#2016
input2016=[]
first_sheet2 = book.sheet_by_index(2)
for row in range(first_sheet2.ncols):
    input2016.append(first_sheet2.col_values(row))

Input = np.asarray(input2016)
Input = Input.T

# Scaling
arrayInput = arrayInput / np.amax(arrayInput, axis=0)
arrayOutput = arrayOutput / np.amax(arrayOutput, axis=0)

TestInput = TestInput / np.amax(TestInput, axis=0)
Input = Input / np.amax(Input, axis=0)
TestOutput = TestOutput / np.amax(TestOutput, axis=0)

NN=Neural_Network(Lambda=0.00000000000001)

T = trainer(NN)
T.train(arrayInput,arrayOutput,TestInput,TestOutput)


print NN.costFunctionPrime(arrayInput,arrayOutput)

Output =  NN.forward(Input)
print Output
print '----------'
#print TestOutput

#plt.plot(T.J)
plt.plot(Output)
plt.grid(1)
plt.xlabel('Iterations')
plt.ylabel('cost')
plt.show()

//浊度是指2015年实际数据和预测意味着使用此代码预测的数据enter image description here

1 个答案:

答案 0 :(得分:1)

有些评论建议缩放输出sigmoidal层以匹配正确的数据。如果你看一下你的预测,你会发现通过一些缩放它们是非常准确的。但是,我建议不要缩放S形函数。

S形输出意味着被解释为概率(在遵循某些约束的情况下),因此缩放它将破坏该合同并且可能给出未定义的结果。如果你从0-100扩展,然后开始接收大于100的训练目标,会发生什么? (假设您正在培训在线系统,否则该示例可能不相关)

我会更改您的代码以使用线性输出图层。在训练网络之后,这不需要对数据进行任何操纵。另外,假设您的成本函数是最小二乘法,线性输出图层将是凸的(这会减少算法可能陷入的局部最优值)。