我在R中有以下代码来获取有关当地市长候选人的最新推文并创建一个wordcloud:
library(twitteR)
library(ROAuth)
require(RCurl)
library(stringr)
library(tm)
library(ggmap)
library(plyr)
library(dplyr)
library(SnowballC)
library(wordcloud)
(...)
setup_twitter_oauth(...)
N = 10000 #Number of twetts
S = 200 #200Km radius from Natal (Covers the whole Natal area)
candidate = 'Carlos+Eduardo'
#Lists so I can add more cities in future codes
lats = c(-5.7792569)
lons = c(-35.200916)
# Gets the tweets from every city
result = do.call(
rbind,
lapply(
1:length(lats),
function(i) searchTwitter(
candidate,
lang="pt-br",
n=N,
resultType="recent",
geocode=paste(lats[i], lons[i], paste0(S,"km"), sep=",")
)
)
)
# Get the latitude and longitude of each tweet,
# the tweet itself, how many times it was re-twitted and favorited,
# the date and time it was twitted, etc and builds a data frame.
result_lat = sapply(result, function(x) as.numeric(x$getLatitude()))
result_lat = sapply(result_lat, function(z) ifelse(length(z) != 0, z, NA))
result_lon = sapply(result, function(x) as.numeric(x$getLongitude()))
result_lon = sapply(result_lon, function(z) ifelse(length(z) != 0, z, NA))
result_date = lapply(result, function(x) x$getCreated())
result_date = sapply(result_date,
function(x) strftime(x, format="%d/%m/%Y %H:%M%S", tz="UTC")
)
result_text = sapply(result, function(x) x$getText())
result_text = unlist(result_text)
is_retweet = sapply(result, function(x) x$getIsRetweet())
retweeted = sapply(result, function(x) x$getRetweeted())
retweet_count = sapply(result, function(x) x$getRetweetCount())
favorite_count = sapply(result, function(x) x$getFavoriteCount())
favorited = sapply(result, function(x) x$getFavorited())
tweets = data.frame(
cbind(
tweet = result_text,
date = result_date,
lat = result_lat,
lon = result_lon,
is_retweet=is_retweet,
retweeted = retweeted,
retweet_count = retweet_count,
favorite_count = favorite_count,
favorited = favorited
)
)
# World Cloud
#Text stemming require the package ‘SnowballC’.
#https://cran.r-project.org/web/packages/SnowballC/index.html
#Create corpus
corpus = Corpus(VectorSource(tweets$tweet))
corpus = tm_map(corpus, removePunctuation)
corpus = tm_map(corpus, removeWords, stopwords('portuguese'))
corpus = tm_map(corpus, stemDocument)
wordcloud(corpus, max.words = 50, random.order = FALSE)
但是我收到了这些错误:
simple_triplet_matrix中的错误(i = i,j = j,v = as.numeric(v),nrow = length(allTerms),:
' i,j,v'不同的长度
另外:警告信息:
1:在doRppAPICall中("搜索/推文",n,params = params, retryOnRateLimit = retryOnRateLimit,:
请求了10000条推文,但API只能返回518
#I理解这一点,我无法获得更多存在的推文2:在mclapply(unname(content(x)),termFreq,control):all 计划核心在用户代码中遇到错误
3:在simple_triplet_matrix中(i = i,j = j,v = as.numeric(v),nrow = length(allTerms),:由强制引入的NA
这是我第一次构建wordcloud,我遵循了这样的教程one。
有没有办法解决它?另一件事是:tweets$tweet
的类是"因素",我应该转换它还是什么?如果是的话,我是怎么做到的?
答案 0 :(得分:0)
我认为问题在于wordcloud
没有为 tm 语料库对象定义。安装 quanteda 包,并尝试:
plot(quanteda::corpus(corpus), max.words = 50, random.order = FALSE)
答案 1 :(得分:0)
我遵循了这个tutorial,它定义了一个函数来清理"在构建wordcloud之前,文本并创建TermDocumentMatrix而不是stemDocument。它现在正常工作。