如何在情感分析中添加混淆矩阵和k倍10倍

时间:2019-05-10 06:28:31

标签: python scikit-learn cross-validation sentiment-analysis confusion-matrix

我想使用交叉验证和混淆矩阵k倍(k = 10)方法添加评估模型,但是我很困惑 数据集:https://github.com/fadholifh/dats/blob/master/cpas.txt

使用Pyhon 3.7

import sklearn.metrics
import sen
import csv
import os
import re
import nltk
import scipy
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import svm
from sklearn.externals import joblib
from sklearn.pipeline import Pipeline
from sklearn import model_selection
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
from Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory
factorys = StemmerFactory()
stemmer = factorys.create_stemmer()





if __name__ == "__main__":
    main()

结果是混淆矩阵,对于k折,每一折具有一定百分比的F1得分,命中率和召回率

1 个答案:

答案 0 :(得分:0)

df = pd.read_csv("cpas.txt", header=None, delimiter="\t")
X = df[1].values
y = df[0].values

stop_words = stopwords.words('english')
stemmer = PorterStemmer()

def clean_text(text, stop_words, stemmer):
    return " ".join([stemmer.stem(word) for word in word_tokenize(text) 
                    if word not in stop_words and not word.isnumeric()])

X = np.array([clean_text(text, stop_words, stemmer) for text in X])

kfold = KFold(3, shuffle=True, random_state=33)
i = 1
for train_idx, test_idx in kfold.split(X):
    X_train = X[train_idx]
    y_train = y[train_idx]

    X_test = X[test_idx]
    y_test = y[test_idx]

    vectorizer = TfidfVectorizer()
    X_train = vectorizer.fit_transform(X_train)
    X_test = vectorizer.transform(X_test)

    model = LinearSVC()
    model.fit(X_train, y_train)
    print ("Fold : {0}".format(i))
    i += 1
    print (classification_report(y_test, model.predict(X_test)))

使用交叉验证的原因是当数据较少时进行参数调整。可以使用CV进行网格搜索。

df = pd.read_csv("cpas.txt", header=None, delimiter="\t")
X = df[1].values
labels = df[0].values

text = np.array([clean_text(text, stop_words, stemmer) for text in X])
idx = np.arange(len(text))
np.random.shuffle(idx)

text = text[idx]
labels = labels[idx]

pipeline = Pipeline([
        ('vectorizer', TfidfVectorizer()),
        ('svm', LinearSVC())])

params = {
    'vectorizer__ngram_range' : [(1,1),(1,2),(2,2)],
    'vectorizer__lowercase' : [True, False],
    'vectorizer__norm' : ['l1','l2']}

model = GridSearchCV(pipeline, params, cv=3, verbose=1)
model.fit(text, y)