我正在尝试使用python中的nltk将推文标记为正面或负面。 我有3个文件“train_posi_tweets.txt”包含4000条正面推文“train_nega_tweets.txt”,其中包含8000条负面推文,“unlabeled_tweetss.txt”包含51647条推文,我需要标注...还有一条推文是西班牙语
参考GitHub上的victorneo我现在有了这个代码,但它没有用,任何人都可以帮我这个吗?我在这一行得到一个错误“for(words,sentiment)pos_tweets + neg_tweets:太多的值了解包例外“
# -*- coding: utf-8 -*-
"""
Created on Fri May 16 16:34:46 2014
@author: shyam
"""
import nltk
import json
from nltk.classify.naivebayes import NaiveBayesClassifier
import re
def get_words_in_tweets(tweets):
all_words = []
for (words, sentiment) in tweets:
all_words.extend(words)
return all_words
def get_word_features(wordlist):
wordlist = nltk.FreqDist(wordlist)
word_features = wordlist.keys()
return word_features
def read_tweets(fname, t_type):
tweets = []
f = open(fname, 'r')
for line in f.readlines():
tweet = json.loads(line)
text = tweet['text'].strip().encode('ascii', errors='ignore')
text = re.sub(r"\n", " ", text) # remove newlines from text
tweets.append(text)
f.close()
return tweets
def extract_features(document):
document_words = set(document)
features = {}
for word in word_features:
features['contains(%s)' % word] = (word in document_words)
return features
def classify_tweet(tweet):
return \
classifier.classify(extract_features(nltk.word_tokenize(tweet)))
# read in postive and negative training tweets
pos_tweets = read_tweets('train_posi_tweets.txt', 'positive')
neg_tweets = read_tweets('train_nega_tweets.txt', 'negative')
# filter away words that are less than 3 letters to form the training data
tweets = []
for (words, sentiment) in pos_tweets + neg_tweets:
words_filtered = [e.lower() for e in words.split() if len(e) >= 3]
tweets.append((words_filtered, sentiment))
# extract the word features out from the training data
word_features = get_word_features(\
get_words_in_tweets(tweets))
# get the training set and train the Naive Bayes Classifier
training_set = nltk.classify.util.apply_features(extract_features, tweets)
classifier = NaiveBayesClassifier.train(training_set)
# read in the test tweets and check accuracy
# to add your own test tweets, add them in the respective files
test_tweets = read_tweets('unlabeled_tweetss.txt', 'unlabled')
total = accuracy = float(len(test_tweets))
for tweet in test_tweets:
if classify_tweet(tweet[0]) != tweet[1]:
accuracy -= 1
print('Total accuracy: %f%% (%d/20).' % (accuracy / total * 100, accuracy))