Python:NLTK:情绪分析

时间:2015-03-12 19:51:53

标签: python twitter nltk sentiment-analysis

我有一大堆代码,我不想在第一时间打扰你。我试着找出一周以来出了什么问题,我联系了几个外部来源(没有任何回复),目前我只是想知道:问题可能是我的训练集?

对于我的论文,我需要将一大堆推文分类为pos / neg / neutral。我编写的代码在我自己构成的测试数据集上运行正常(例如,由15个训练句子组成:5个pos,5个neg和5个中性; 6个测试句子:2个pos,2个neg,2个中性 - 只有1个测试句被错误分类)。

一旦我开始在手动分类的训练集(1629 pos,1411中性推文和690 neg)和900测试推文上运行代码,事情就开始出错了。在900条测试推文中,巨大的多数被归类为pos(700到800之间),而只有少数的负面和中性推文。

有人请非常友好地检查我的代码并帮助我弄清楚我做错了什么?我真的很感激。如果您需要更多信息,我很乐意提供。

import re, math, collections, itertools
import nltk
import nltk.classify.util, nltk.metrics
import csv
from nltk.classify import NaiveBayesClassifier
from nltk.metrics import BigramAssocMeasures
from nltk.probability import FreqDist, ConditionalFreqDist  
from nltk.util import ngrams
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.stem.porter import *
from nltk.stem.snowball import SnowballStemmer


stemmer = SnowballStemmer("english", ignore_stopwords = True)
pos = []
neg = []
neutral = []
with open('C:\\...pos.csv', 'r', encoding = "utf8") as f:  #open positive training set
    reader = csv.reader(f) 
    for row in reader:
        pos.extend(row)

with open('C:\\ ...neg.csv', 'r', encoding = "utf8") as f: #open negative training set
    reader = csv.reader(f) 
    for row in reader:
        neg.extend(row)

with open('C:\\...neutral.csv', 'r', encoding = "utf8") as f: #open neutral training set
    reader = csv.reader(f) 
    for row in reader:
        neutral.extend(row)


def uni(doc):
    x = []
    y = []
    for tweet in doc:
        x.append(word_tokenize(tweet))
    for element in x:
        for word in element:
            if len(word)>2:
                word = word.lower()
                word = stemmer.stem(word)
                y.append(word)
    return y

def word_feats_uni(doc):
     return dict([(word, True) for word in uni(doc)])

def tokenizer_ngrams(document):
    all_tokens = []
    filtered_tokens = []
    for (sentence) in document:
        all_tokens.append(word_tokenize(sentence))
    return all_tokens

def get_bi (document):
    x = tokenizer_ngrams(document)
    c = []
    for sentence in x:
        c.extend([bigram for bigram in nltk.bigrams(sentence)])
    return c

def get_tri(document):
    x = tokenizer_ngrams(document)
    c = []
    for sentence in x:
        c.extend([bigram for bigram in nltk.bigrams(sentence)])
    return c


def word_feats_bi(doc): 
    return dict([(word, True) for word in get_bi(doc)])

def word_feats_tri(doc):
    return dict([(word, True) for word in get_tri(doc)])

def word_feats_test(doc):
    feats_test = {}
    feats_test.update(word_feats_uni(doc))
    feats_test.update(word_feats_bi(doc))
    feats_test.update(word_feats_tri(doc))
    return feats_test


pos_feats = [(word_feats_uni(pos),'1')] + [(word_feats_bi(pos),'1')] + [(word_feats_tri(pos),'1')]

neg_feats = [(word_feats_uni(neg),'-1')] + [(word_feats_bi(neg),'-1')] + [(word_feats_tri(neg),'-1')]

neutral_feats = [(word_feats_uni(neutral),'0')] + [(word_feats_bi(neutral),'0')] + [(word_feats_tri(neutral),'0')]

trainfeats = pos_feats + neg_feats + neutral_feats
random.shuffle(trainfeats)
classifier = NaiveBayesClassifier.train(trainfeats)

testtweets = []
with open('C:\\ ... testtweets.csv', 'r', encoding = "utf8") as f: #open testset
    reader = csv.reader(f, delimiter = ';') 
    for row in reader:
        testtweets.extend([row])



date = []
word = []
y = []

def classification(date,sentence): #doc = sentencelist
    i = 0
    for tweet in sentence:
        sent = classifier.classify(word_feats_test([tweet]))
        y.extend([(date[i],tweet,sent)])
        i = i + 1

def result(doc):
    i = 0
    while i in range(0,len(doc) -1):
        date.append(doc[i][0])
        word.append(doc[i][1])
        i = i + 1
    classification(date,word)

    result(testtweets) 

with open('C:\\...write.csv', 'w') as fp: #write classified test set to file
    a = csv.writer(fp, delimiter=',')
    a.writerows(y)

0 个答案:

没有答案