我在一些Wikipedia文章中训练了一个模型,该文章分为两个类别(每个类别有12篇文章)。
下面是我如何创建模型,对其进行训练并对其进行腌制的方法:
import numpy as np
import re
import nltk
from sklearn.datasets import load_files
import pickle
from nltk.corpus import stopwords
data = load_files(r'[...]review_polarity')
X, y = data.data, data.target
documents = []
from nltk.stem import WordNetLemmatizer
stemmer = WordNetLemmatizer()
for sen in range(0, len(X)):
# Remove all the special characters
document = re.sub(r'\W', ' ', str(X[sen]))
# remove all single characters
document = re.sub(r'\s+[a-zA-Z]\s+', ' ', document)
# Remove single characters from the start
document = re.sub(r'\^[a-zA-Z]\s+', ' ', document)
# Substituting multiple spaces with single space
document = re.sub(r'\s+', ' ', document, flags=re.I)
# Removing prefixed 'b'
document = re.sub(r'^b\s+', '', document)
# Converting to Lowercase
document = document.lower()
# Lemmatization
document = document.split()
document = [stemmer.lemmatize(word) for word in document]
document = ' '.join(document)
documents.append(document)
from sklearn.feature_extraction.text import TfidfTransformer
tfidfconverter = TfidfTransformer()
X = tfidfconverter.fit_transform(X).toarray()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators=1000,random_state=0)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
with open('text_classifier', 'wb') as picklefile:
pickle.dump(classifier, picklefile)
然后,我加载了pickle文件,并尝试预测了一篇看不见的新文章的分类:
import pickle
import sys, os
import re
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer
with open(os.path.join(sys.path[0], 'text_classifier'), 'rb') as training_model:
model = pickle.load(training_model)
with open(os.path.join(sys.path[0], 'article.txt'), 'rb') as f:
X = [f.read()]
documents = []
stemmer = WordNetLemmatizer()
for sen in range(0, len(X)):
# Remove all the special characters
document = re.sub(r'\W', ' ', str(X[sen]))
# remove all single characters
document = re.sub(r'\s+[a-zA-Z]\s+', ' ', document)
# Remove single characters from the start
document = re.sub(r'\^[a-zA-Z]\s+', ' ', document)
# Substituting multiple spaces with single space
document = re.sub(r'\s+', ' ', document, flags=re.I)
# Removing prefixed 'b'
document = re.sub(r'^b\s+', '', document)
# Converting to Lowercase
document = document.lower()
# Lemmatization
document = document.split()
document = [stemmer.lemmatize(word) for word in document]
document = ' '.join(document)
documents.append(document)
tfidfconverter = TfidfVectorizer(max_features=1500, min_df=0, max_df=1.0, stop_words=stopwords.words('english'))
X = tfidfconverter.fit_transform(documents).toarray()
y_pred = model.predict(X)
print y_pred
调用预测函数时出现以下错误:
模型的特征数量必须与输入匹配。模型n_features为10,输入n_features为47
似乎新文章中包含了47个功能的numpy数组,而经过训练的模型可以使用10个功能的数组。我不确定我是否正确理解了这一点,如果您能帮助我更好地理解并使其正常工作,将不胜感激。
谢谢!
答案 0 :(得分:0)
答案是,对于不可见的新数据,我应该使用“ transform”函数而不是“ fit_transform”,以便保持特征数量不变。