它是我previous question的修改版本:我试图在quanteda
文本模型上运行LIME,该模型以Trump & Clinton tweets data为基础。我按照托马斯·彼得森(Thomas Pedersen)在Understanding LIME提供的@Weihuang Wong和有用的答案中给出了一个例子来运行它:
library(dplyr)
library(stringr)
library(quanteda)
library(lime)
#data prep
tweet_csv <- read_csv("tweets.csv")
# creating corpus and dfm for train and test sets
get_matrix <- function(df){
corpus <- quanteda::corpus(df)
dfm <- quanteda::dfm(corpus, remove_url = TRUE, remove_punct = TRUE, remove = stopwords("english"))
}
set.seed(32984)
trainIndex <- sample.int(n = nrow(tweet_csv), size = floor(.8*nrow(tweet_csv)), replace = F)
train_dfm <- get_matrix(tweet_csv$text[trainIndex])
train_raw <- tweet_csv[, c("text", "tweet_num")][as.vector(trainIndex), ]
train_labels <- tweet_csv$author[as.vector(trainIndex)] == "realDonaldTrump"
test_dfm <- get_matrix(tweet_csv$text[-trainIndex])
test_raw <- tweet_csv[, c("text", "tweet_num")][-as.vector(trainIndex), ]
test_labels <- tweet_csv$author[-as.vector(trainIndex)] == "realDonaldTrump"
#### make sure that train & test sets have exactly same features
test_dfm <- dfm_select(test_dfm, train_dfm)
### Naive Bayes model using quanteda::textmodel_nb ####
nb_model <- quanteda::textmodel_nb(train_dfm, train_labels)
nb_preds <- predict(nb_model, test_dfm) #> 0.5
# select only correct predictions
predictions_tbl <- data.frame(predict_label = nb_preds$nb.predicted,
actual_label = test_labels,
tweet_name = rownames(nb_preds$posterior.prob)
) %>%
mutate(tweet_num =
as.integer(
str_trim(
str_replace_all(tweet_name, "text", ""))
))
correct_pred <- predictions_tbl %>%
filter(actual_label == predict_label)
# pick a sample of tweets for explainer
tweets_to_explain <- test_raw %>%
filter(tweet_num %in% correct_pred$tweet_num) %>%
head(4)
### set up correct model class and predict functions
class(nb_model)
model_type.textmodel_nb_fitted <- function(x, ...) {
return("classification")
}
# have to modify the textmodel_nb_fitted so that
predict_model.textmodel_nb_fitted <- function(x, newdata, type, ...) {
X <- corpus(newdata)
X <- dfm_select(dfm(X), x$data$x)
res <- predict(x, newdata = X, ...)
switch(
type,
raw = data.frame(Response = res$nb.predicted, stringsAsFactors = FALSE),
prob = as.data.frame(res$posterior.prob, check.names = FALSE)
)
}
### run the explainer - no problems here
explainer <- lime(tweets_to_explain$text, # lime returns error on different features in explainer and explanations, even if I use the same dataset in both. Raised an issue on Github and asked a question on SO
model = nb_model,
preprocess = get_matrix)
但是当我运行解释器时......
corr_explanation <- lime::explain(tweets_to_explain$text,
explainer,
n_labels = 1,
n_features = 6,
cols = 2,
verbose = 0)
...我收到以下错误:
UseMethod中的错误(&#34;语料库&#34;): 没有适用于&#39;语料库的方法。应用于班级&#34; c(&#39; dfm&#39;,&#39; dgCMatrix&#39;,&#39; CsparseMatrix&#39;,&#39; dsparseMatrix&#39;,&# 39; generalMatrix&#39;,&#39; dCsparseMatrix&#39; dMatrix&#39;,&#39; sparseMatrix&#39;,&#39; compMatrix&#39;,&#39; Matrix&#39 ;,&#39; xMatrix&#39; mMatrix&#39;,&#39; Mnumeric&#39;,&#39; replValueSp&#39;)&#34;
它可以追溯到将corpus()
应用于newdata
:
5.corpus(newdata)
4.predict_model.textmodel_nb_fitted(x = explainer$model, newdata = permutations_tokenized,
type = o_type)
3.predict_model(x = explainer$model, newdata = permutations_tokenized,
type = o_type)
2.explain.character(tweets_to_explain$text, explainer, n_labels = 1,
n_features = 6, cols = 2, verbose = 0)
1.lime::explain(tweets_to_explain$text, explainer, n_labels = 1,
n_features = 6, cols = 2, verbose = 0)
但我不明白为什么这会导致任何问题,因为新数据是文本向量?
感谢任何提示