我想使用scikit-learn的SVR模块对未来事件进行时间序列预测。这是我正在尝试使用的源代码:
import csv
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
plt.switch_backend('newbackend')
seq_num=[]
win=[]
def get_data(filename):
with open(filename, 'r') as csvfile:
csvFileReader = csv.reader(csvfile)
next(csvFileReader) # skipping column names
for row in csvFileReader:
seq_num.append(int(row[0])
win.append(int(row[6]))
return
def predict_win(X, y, x):
win = np.reshape(X,(len(X), 1))
svr_lin = SVR(kernel= 'linear', C= 1e3)
svr_poly = SVR(kernel= 'poly', C= 1e3, degree= 2)
svr_rbf = SVR(kernel= 'rbf', C= 1e3, gamma= 0.1)
svr_rbf.fit(X, y)
svr_lin.fit(X, y)
svr_poly.fit(X, y)
plt.scatter(X, y, color= 'black', label= 'Data')
plt.plot(y, svr_rbf.predict(X), color= 'red', label= 'RBF model')
plt.plot(y,svr_lin.predict(X), color= 'green', label= 'Linear model')
plt.plot(y,svr_poly.predict(X), color= 'blue', label= 'Polynomial model')
plt.xlabel('X, other features')
plt.ylabel('win')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
return svr_rbf.predict(x)[0], svr_lin.predict(x)[0], svr_poly.predict(x)[0]
get_data('net_data.csv')
predicted_win = predict_win(X, y, 29)
我的数据集非常庞大,因此我的csv数据集的一部分包含在最后。我对第7栏感兴趣。我想预测第7列中的值何时增加或何时减少。是否可以仅查看第7列并进行时间序列预测?对此的任何帮助都会非常感激吗?谢谢!
0.007804347,10.0.0.11:49438,10.0.12.12:5001,32,3796291040,3796277984,10,2147483647,28960,3034,29312
0.007856739,10.0.0.11:49438,10.0.12.12:5001,32,3796293936,3796278008,11,2147483647,29056,2999,29312
0.010605189,10.0.0.11:49438,10.0.12.12:5001,32,3796320000,3796291040,20,2147483647,55040,2969,29312
0.010850907,10.0.0.11:49438,10.0.12.12:5001,32,3796348960,3796305520,30,2147483647,84096,2946,29312
0.013598458,10.0.0.11:49438,10.0.12.12:5001,32,3796377920,3796320000,40,2147483647,113024,2951,29312
0.01368011,10.0.0.11:49438,10.0.12.12:5001,32,3796434392,3796348960,60,2147483647,170880,2956,29312
0.015104265,10.0.0.11:49438,10.0.12.12:5001,32,3796434392,3796363440,70,2147483647,199936,2940,29312
0.016406964,10.0.0.11:49438,10.0.12.12:5001,32,3796490864,3796377920,80,2147483647,220160,2943,29312
0.016465876,10.0.0.11:49438,10.0.12.12:5001,32,3796537200,3796432944,81,80,330240,2925,29312
0.018355321,10.0.0.11:49438,10.0.12.12:5001,32,3796547336,3796434392,81,80,333056,2914,29312
0.020171945,10.0.0.11:49438,10.0.12.12:5001,32,3796603808,3796490864,83,80,382336,2956,29312
0.237314523,10.0.0.11:49438,10.0.12.12:5001,32,3810417728,3809658976,529,396,1775360,7109,29312
0.237409075,10.0.0.11:49438,10.0.12.12:5001,44,3810417728,3809700968,530,397,1859328,7381,29312
0.237486647,10.0.0.11:49438,10.0.12.12:5001,44,3810417728,3809700968,371,371,1960704,7365,29312
0.237807596,10.0.0.11:49438,10.0.12.12:5001,44,3810417728,3809700968,371,371,1980928,7362,29312
0.237989588,10.0.0.11:49438,10.0.12.12:5001,44,3810417728,3809700968,371,371,1989632,7400,29312
0.259123971,10.0.0.11:49438,10.0.12.12:5001,32,3811590608,3811251776,261,260,2267648,5885,29312
0.259174008,10.0.0.11:49438,10.0.12.12:5001,32,3811655768,3811289424,261,260,2267648,5918,29312
0.262546461,10.0.0.11:49438,10.0.12.12:5001,32,3811720928,3811354584,261,260,2267648,5823,29312
答案 0 :(得分:2)
好的,下面的svm函数有一个问题:
第二行win = ...
未使用,会导致错误。删除它。
def predict_win(X, y, x):
win = np.reshape(X,(len(X), 1)) # <----This line
svr_lin = SVR(kernel= 'linear', C= 1e3)
svr_poly = SVR(kernel= 'poly', C= 1e3, degree= 2)
svr_rbf = SVR(kernel= 'rbf', C= 1e3, gamma= 0.1)
svr_rbf.fit(X, y)
svr_lin.fit(X, y)
svr_poly.fit(X, y)
plt.scatter(X, y, color= 'black', label= 'Data')
plt.plot(y, svr_rbf.predict(X), color= 'red', label= 'RBF model')
plt.plot(y,svr_lin.predict(X), color= 'green', label= 'Linear model')
plt.plot(y,svr_poly.predict(X), color= 'blue', label= 'Polynomial model')
plt.xlabel('X, other features')
plt.ylabel('win')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
return svr_rbf.predict(x)[0], svr_lin.predict(x)[0], svr_poly.predict(x)[0]
其次,我不知道为什么有一个完整的函数来读取csv。忽略它并使用熊猫。以下是一个可行的示例代码:
from sklearn import svm
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def predict_win(X,y,x):
svr_lin = svm.SVR(kernel='linear',C=1e3)
svr_poly = svm.SVR(kernel='poly',C=1e3, degree=2)
svr_rbf = svm.SVR(kernel='rbf',C=1e3,gamma=0.1)
svr_rbf.fit(X,y)
svr_lin.fit(X,y)
svr_poly.fit(X,y)
plt.plot(y,svr_rbf.predict(X),color='red',label='RBF model')
plt.plot(y,svr_lin.predict(X),color='green',label='Linear model')
plt.plot(y,svr_poly.predict(X),color='blue', label='Polynomial model')
plt.xlabel('X, other features')
plt.ylabel('win')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
return [svr_rbf.predict(x)[0],svr_lin.predict(x)[0],svr_poly.predict(x)[0]]
df = pd.read_csv('data.csv')
data_np_array = df.values
y = np.ndarray.copy(data_np_array[:,6])
Xleft = np.ndarray.copy(data_np_array[:,:6])
Xright = np.ndarray.copy(data_np_array[:,7:])
X = np.hstack((Xleft,Xright))
x0 = np.ndarray.copy(X[0,:])
xp = predict_win(X,y,x0)
percent_off = [min(data_np_array[0,2],prediction)/max(data_np_array[0,2],prediction) for prediction in xp]
中间步骤,清理导入的数据,将其从数据框转换为numpy数组,将第7列复制为适合的回归,将其从训练数据中删除,并重建新数组必须完成在适合SVR之前。
df = pd.read_csv('data.csv')
data_np_array = df.values
y = np.ndarray.copy(data_np_array[:,6])
Xleft = np.ndarray.copy(data_np_array[:,:6])
Xright = np.ndarray.copy(data_np_array[:,7:])
X = np.hstack((Xleft,Xright))
让我知道这些是否奏效。我刚从上面的数据表中选了几行。