我想知道如何通过我的语料库中130个.txt文件(月度数据)的句子来训练槌LDA。正如我按文件水平估计的那样,我面临的问题是,主题比例超时的情节是如此奇怪。例如,随着时间的推移,比例仍然没有变化,在某些主题中,比例不会改变。
这是我使用的编码脚本。
dir <- "C:/Users/Dell/desktop/MPSCLEANED"
setwd(dir)
require(mallet)
documents <- mallet.read.dir(dir)
mallet.instances <- mallet.import(documents$id, documents$text,
"C:/Users/Dell/desktop/stopwords.txt", token.regexp = "\\p{L}
[\\p{L}\\p{P}]+\\p{L}")
# Before moving on, I just wonder how can I estimate LDA by sentences from
all documents in my corpus.
n.topics <- 15
topic.model <- MalletLDA(n.topics, alpha.sum=3.33, beta=0.2)
topic.model$model$setRandomSeed(19820L)
topic.model$setOptimizeInterval(50L)
topic.model$loadDocuments(mallet.instances)
vocabulary <- topic.model$getVocabulary()
word.freqs <- mallet.word.freqs(topic.model)
topic.model$setAlphaOptimization(1,1000)
topic.model$train(1000)
topic.model$maximize(20)
doc.topics <- mallet.doc.topics(topic.model, smoothed=T, normalized=T)
topic.words <- mallet.topic.words(topic.model, smoothed=T, normalized=T)
topic.docs <- t(doc.topics)
topic.docs <- topic.docs / rowSums(topic.docs)
topics.labels <- rep("", n.topics)
for (topic in 1:n.topics) topics.labels[topic] <-
paste(mallet.top.words(topic.model, topic.words[topic,], num.top.words=5)
$words, collapse=" ")
topics.labels
#Topics over time
options(java.parameters="-Xmx2g")
library("dfrtopics")
library("dplyr")
library("ggplot2")
library("lubridate")
library("stringr")
library("mallet")
m <- mallet_model(doc_topics = doc.topics, doc_ids = documents$id, vocab =
vocabulary, topic_words = topic.words, model = topic.model)
pd <- data.frame(date = list.files(path = "C:/Users/Dell/Desktop/MPS"))
pd <- data.frame(date = lapply(pd, function(x) {gsub(".txt", "", x)}))
meta <- data.frame(id = documents$id, pubdate = as.Date(pd$date, "%Y%m%d"))
metadata(m) <- meta
# Visualize topics over time
theme_update(strip.text=element_text(size=7),
axis.text=element_text(size=7))
topic_series(m) %>%
plot_series(labels=topic_labels(m, 2))
答案 0 :(得分:1)
130个文档不是用于估计主题模型。文件可以细分为更小的部分吗?