我想使用神经网络创建回归模型,该模型使用 1 到 59 的数据预测第 60 个数据。 每个数据有5个特征。 59x5的数据和经典的图像识别神经网络一样,在295个一维中求解和使用。 但我不知道为什么学习不能正常工作。用回归学习代替神经网络中的分类是不可能的吗?
这是完整的代码
import numpy as np
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
data = np.loadtxt('Apr-May_mod.csv',delimiter=",")
X = []
Y = []
for i in range(0,100):
start_point = random.randrange(1,len(data)-60)
X.append(data[start_point:start_point+59,:])
Y.append(data[start_point+60,:])
X = torch.tensor(X, dtype= torch.float)
Y = torch.tensor(Y, dtype= torch.float)
X_Pmax = torch.max(X[:,:,0:4])
X_Vmax = torch.max(X[:,:,4:5])
X = torch.cat([torch.div(X[:,:,0:4], X_Pmax), torch.div(X[:,:,4:5], X_Vmax)], dim=2)
Y_Pmax = torch.max(Y[:,0:4])
Y_Vmax = torch.max(Y[:,4:5])
Y = torch.cat([torch.div(Y[:,0:4], Y_Pmax), torch.div(Y[:,4:5], Y_Vmax)], dim=1)
X = X.view([100,-1])
class NeuralNetwork(nn.Module):
def __init__(self):
self.inputSize = 295
self.outputSize = 5
self.hiddenSize = 500
self.w1 = torch.rand(self.inputSize, self.hiddenSize, requires_grad=True)
self.w2 = torch.rand(self.hiddenSize, self.outputSize, requires_grad=True)
self.learning_rate = 0.001
def forward(self,X):
self.z1 = torch.matmul(X, self.w1)
self.z2 = torch.sigmoid(self.z1)
self.z3 = torch.matmul(self.z2, self.w2)
out = torch.sigmoid(self.z3)
return out
def train(self, X, Y):
out = self.forward(X)
error = ((Y-out)**2).mean()
error.backward()
self.w1.data += self.learning_rate * (self.w1).grad
self.w2.data += self.learning_rate * (self.w2).grad
def saveWeights(self, model):
torch.save(model, "NeuralNetwork")
def predict(self):
print("Input Data: X[3]")
print("Predicted: ", str(self.forward(X[3])))
NN = NeuralNetwork()
trainIdx = 1000
for idx in range(trainIdx):
if idx%100 == 0:
print("#" + str(idx) + " Loss: " + str(torch.mean((Y-NN.forward(X))**2).detach().item()))
NN.train(X,Y)
NN.saveWeights(NN)
NN.predict()
结果是这样的。
#0 Loss: 0.14510075747966766
#100 Loss: 0.14510075747966766
#200 Loss: 0.14510075747966766
#300 Loss: 0.14510075747966766
#400 Loss: 0.14510075747966766
#500 Loss: 0.14510075747966766
#600 Loss: 0.14510075747966766
#700 Loss: 0.14510075747966766
#800 Loss: 0.14510075747966766
#900 Loss: 0.14510075747966766
Input Data: X[3]
Predicted: tensor([1., 1., 1., 1., 1.], grad_fn=<SigmoidBackward>)
这是我使用的文件,Apr-May_mod.csv
58739.46,58796.25,58720.82,58795.74,56.541494
58795.74,58858.21,58787.48,58815.42,45.844834
58815.42,58963.91,58808.99,58963.41,80.257514
58953.83,59080.26,58952.34,59063.13,107.880493
59063.13,59093,58937.3,59061.14,68.599352
59071.84,59083,58977.78,58993.04,38.179872
58993.04,59080,58985.46,59043.88,29.342092
59043.88,59088.95,59007,59007.79,26.401287
59007.79,59071.02,58997.24,59047.38,44.14176
59047.38,59070.88,59030,59030.01,29.801803
59030,59065.61,59030,59060.37,19.928143
59060.38,59068.05,59037.86,59043.86,17.594566
59045.86,59052.55,59029.95,59049.48,21.902506
59049.49,59049.49,58967.44,58994.47,32.587782
58995.12,59007.75,58965,58965,25.056395
58965.01,58966.02,58907.76,58956.39,25.896267
58956.39,58957.68,58895.01,58900,25.493498
58900,58958.94,58877.55,58955.48,27.695467
58955.48,59027.09,58951.55,58994.19,19.519458
58994.2,59022.43,58852.94,58883.57,79.41166
58883.57,58929.89,58862.21,58926.21,17.17355
58926.22,58960.93,58926.21,58959.07,18.647385
...
about 40000 lines