一段时间后,我认为我终于实现了所谓的概率SVM。我一直在使用的代码是这样的:
numpy as np
import random
from sklearn.svm import SVC
import math
from scipy.optimize import minimize
import matplotlib.pyplot as plt
from sklearn import decomposition
from sklearn import svm
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings("ignore")
def training_banana(name):
inputs = []
file = open(name, "r")
for line in file:
vector = line.split()
coordinate = []
for i in range(len(vector)):
coordinate.append(float(vector[i]))
inputs.append(coordinate)
file.close()
return np.array(inputs)
def define_inputs(name, name_targets):
inputs = training_banana(name)
targets_array = training_banana(name_targets)
N = targets_array.shape[0]
targets = np.zeros(N)
for i in range(N):
targets[i] = targets_array[i][0]
return inputs, targets, N
#training set
inputs_train, targets_train, N = define_inputs('banana_train.txt', 'banana_train_label.txt')
permute = list(range(N))
random.shuffle(permute)
inputs_train = inputs_train[permute, :]
targets_train = targets_train[permute]
#test set
inputs_test, targets_test, N = define_inputs('banana_test.txt', 'banana_test_label.txt')
permute = list(range(N))
random.shuffle(permute)
inputs_test = inputs_test[permute, :]
targets_test = targets_test[permute]
def plotting():
ax = plt.gca()
param_C = [0.01, 0.1, 1, 10, 100]
param_grid = {'C': param_C, 'kernel': ['poly','rbf', 'linear'], 'gamma': [0.1, 0.01, 0.001, 0.0001]}
clf = GridSearchCV(SVC(class_weight='balanced'), param_grid)
clf.fit(inputs_train, targets_train)
index = clf.best_estimator_.n_support_
print(clf.best_params_['kernel'])
clf = SVC(C=clf.best_params_['C'], cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=5, gamma=clf.best_params_['gamma'], kernel=clf.best_params_['kernel'],
max_iter=-1, probability=True, random_state=None, shrinking=True,
tol=0.001, verbose=False)
clf.fit(inputs_train, targets_train)
support_vectors = []
for i in range(len(index)):
support_vectors.append(inputs_train[i])
support_vectors = np.array(support_vectors)
xx = np.linspace(-4, 4, 1000)
yy = np.linspace(-4, 4, 1000).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
probabilities = clf.predict_proba(inputs_test)
predicting_classes_pos_targets = []
predicting_classes_pos_inputs = []
predicting_classes_neg_targets = []
predicting_classes_neg_inputs = []
prob_mesh = clf.predict_proba(Xfull)
#print(Xfull)
#for i in range(inputs_test.shape[0]):
b = 0
q = 0
print(clf.predict([inputs_test[0]])[0])
for i in range(100):
if clf.predict([inputs_test[i]]) >= 0:
predicting_classes_pos_targets.append(1)
predicting_classes_pos_inputs.append(inputs_test[i]) #Problmet ligger här
b = b+1
else:
predicting_classes_neg_targets.append(-1)
predicting_classes_neg_inputs.append(inputs_test[i])
q = q+1
predicting_classes_pos_inputs = np.array(predicting_classes_pos_inputs)
predicting_classes_neg_inputs = np.array(predicting_classes_neg_inputs)
#print(predicting_classes_pos_inputs.shape)
#print(prob)
#print(predicting_classes_pos_inputs)
#print(np.min(inputs_test[:,0]))
#print(np.max(inputs_test[:,0]))
#print(np.min(inputs_test[:, 1]))
#print(np.max(inputs_test[:, 1]))
#plt.scatter(predicting_classes_pos_inputs[:, 0], predicting_classes_pos_inputs[:, 1], c="b", s=30, cmap=plt.cm.Paired)
plt.subplot(1, 2, 1)
plt.imshow(prob_mesh[:, 1].reshape((1000, 1000)),
extent=(-4, 4, -4, 4), origin='lower')
plt.scatter(predicting_classes_pos_inputs[:, 0], predicting_classes_pos_inputs[:, 1], marker='o', c='w',
edgecolor='k')
plt.subplot(1,2,2)
plt.imshow(prob_mesh[:, 0].reshape((1000, 1000)),
extent=(-4, 4, -4, 4), origin='lower')
plt.scatter(predicting_classes_neg_inputs[:, 0], predicting_classes_neg_inputs[:, 1], marker='o', c='w',
edgecolor='k')
#plt.xticks(())
#plt.yticks(())
#plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
#plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
我正在尝试阅读理论,这是如何工作的,但是看起来有几种不同的方法可以实现此实现。因此,我很好奇,概率SVM如何真正起作用,是否有我可以借鉴的良好文献,因为我需要为当前正在编写的项目提供某种理论(比较SVM和RVM)。预先感谢!