我是机器学习的初学者,我正在努力实现我的第一个Naive Bayes,以便更好地理解。所以,我有来自http://archive.ics.uci.edu/ml/datasets/Adult的数据集(美国人口普查数据,类别是'< = 50k'和'> 50k')。
这是我的python代码:
#!/usr/bin/python
import sys
import csv
words_stats = {} # {'word': {'class1': cnt, 'class2': cnt'}}
words_cnt = 0
targets_stats = {} # {'class1': 3234, 'class2': 884} how many words in each class
class_stats = {} # {'class1': 7896, 'class2': 3034} how many lines in each class
items_cnt = 0
def train(dataset, targets):
global words_stats, words_cnt, targets_stats, items_cnt, class_stats
num = len(dataset)
for item in xrange(num):
class_stats[targets[item]] = class_stats.get(targets[item], 0) + 1
for i in xrange(len(dataset[item])):
word = dataset[item][i]
if not words_stats.has_key(word):
words_stats[word] = {}
tgt = targets[item]
cnt = words_stats[word].get(tgt, 0)
words_stats[word][tgt] = cnt + 1
targets_stats[tgt] = targets_stats.get(tgt, 0) + 1
words_cnt += 1
items_cnt = num
def classify(doc, tgt_set):
global words_stats, words_cnt, targets_stats, items_cnt
probs = {} #the probability itself P(c|W) = P(W|c) * P(c) / P(W)
pc = {} #probability of the class in document set P(c)
pwc = {} #probability of the word set in particular class. P(W|c)
pw = 1 #probability of the word set in documet set
for word in doc:
if word not in words_stats:
continue #dirty, very dirty
pw = pw * float(sum(words_stats[word].values())) / words_cnt
for tgt in tgt_set:
pc[tgt] = class_stats[tgt] / float(items_cnt)
for word in doc:
if word not in words_stats:
continue #dirty, very dirty
tgt_wrd_cnt = words_stats[word].get(tgt, 0)
pwc[tgt] = pwc.get(tgt, 1) * float(tgt_wrd_cnt) / targets_stats[tgt]
probs[tgt] = (pwc[tgt] * pc[tgt]) / pw
l = sorted(probs.items(), key = lambda i: i[1], reverse=True)
print probs
return l[0][0]
def check_results(dataset, targets):
num = len(dataset)
tgt_set = set(targets)
correct = 0
incorrect = 0
for item in xrange(num):
res = classify(dataset[item], tgt_set)
if res == targets[item]:
correct = correct + 1
else:
incorrect = incorrect + 1
print 'correct:', float(correct) / num, ' incorrect:', float(incorrect) / num
def load_data(fil):
data = []
tgts = []
reader = csv.reader(fil)
for line in reader:
d = [x.strip() for x in line]
if '?' in d:
continue
if not len(d):
continue
data.append(d[:-1])
tgts.append(d[-1:][0])
return data, tgts
if __name__ == '__main__':
if len(sys.argv) < 3:
print './program train_data.txt test_data.txt'
sys.exit(1)
filename = sys.argv[1]
fil = open(filename, 'r')
data, tgt = load_data(fil)
train(data, tgt)
test_file = open(sys.argv[2], 'r')
test_data, test_tgt = load_data(test_file)
check_results(test_data, tgt)
它提供了约61%的正确结果。当我打印概率时,我得到以下内容:
{'<=50K': 0.07371606889800396, '>50K': 15.325378327213354}
但是在正确的分类器的情况下,我希望看到两个概率的总和等于1。 起初我认为问题出在浮动下溢并试图以对数进行所有计算,但结果类似。 我明白省略一些词会影响准确性,但概率是错误的。
我做错了什么或不明白?
为了您的方便我在这里上传了数据集和python脚本: https://dl.dropboxusercontent.com/u/36180992/adult.tar.gz
感谢您的帮助。
答案 0 :(得分:1)
Naive Bayes不直接计算概率,而是计算与每个标签的其他分数相对比的“原始分数”,以便对实例进行分类。该分数可以很容易地转换为[0, 1]
total = sum(probs.itervalues())
for label, score in probs.iteritems():
probs[label] = score / total
但是,请记住,仍并不代表真正的概率,如answer中所述:
朴素贝叶斯倾向于预测几乎总是非常接近于零或非常接近于1的概率。