不允许从shinyoutput对象读取对象:将函数与闪亮的应用程序链接的问题

时间:2017-11-14 18:58:54

标签: r twitter shiny

我正在遇到问题,以在交互式闪亮应用中显示功能的结果。 我创建了一个功能" Scraaping"收集来自Twitter API的推文,并且当我单独运行它时,它会起作用,它只会在" &#34 ;. 但是,当我想从我的应用程序获取文本输出并将其作为参数插入到我的函数中运行到服务器中时,我遇到了错误。看起来它来自toString函数。 你能帮忙吗?

# Define UI for app 
 ui <- fluidPage(




  titlePanel('Twitter Scraping'),

        sidebarLayout(
        sidebarPanel("Work In Progress",
             h2("Input a research, get trends linked with it from Twitter"),
             actionButton("ActionButton", label = "Get Result"),
             dateRangeInput("Dates", label = "Dates de recherche",             start         = '2017-08-01', end = Sys.Date() ),
             textInput("Research" , label="Recherche" , value ='fintech'),
             selectInput("Localisation" , label ="Localisation", choices = c('Paris', 'Londres','Lyon','New York'))
               ),


  mainPanel (
          textOutput("Localisation"),
          textOutput("Dates") ,
          textOutput("Recherche1"),
          plotOutput("Plot")
          ) 
    # img(src = "my_image.png", height = 72, width = 72)    
         )
   )


   # server parameter
   server <- function(input, output) {



   # Appeler la fonction 

      output$Recherche2 <- renderText(input$Research)
    MotCle <- toString(output$Recherche2)  
   d <- Scrapping(MotCle) 
    Top10 <- head(d,10)  


   #Récupérer le graphe
        output$Localisation <- renderText({paste("Vous avez séléctionné la     localisation autour de" , input$Localisation)})
   output$Dates <- renderText({paste("Votre recherche porte du ", input$Dates[1],"au", input$Dates[2])})
    output$Recherche1 <- renderText({paste("La recherche porte sur les tweets contenant",input$Research)})


  output$Plot <- renderPlot( { output$Recherche2 <- renderText(input$Research)
                            MotCle <- toString(output$Recherche2) 
                            d <- Scrapping(MotCle) 
                            Top10 <- head(d,10)     

wordcloud(words = d$word, freq = d$freq, min.freq = 10,
                                  max.words = 100 , random.order=FALSE, rot.per=0.35, 
                                  colors=brewer.pal(8, "Dark2") ) })

}

和Scrapping功能:

  Scrapping <- function(Recherche) {


  #Nombre de tweets souhaités
   NombreTweets <- 1000

  #Dates souhaitées
   Date_début = '2017-04-01'
   Date_fin = '2017-11-13'

   #Connexion à l'API
  Info_tweets <- searchTwitteR(Recherche, n=NombreTweets, since=Date_début,     until=Date_fin,
                         geocode = '48.878114,2.629798,100mi')


  # Extraction du texte // Construction du vecteur contenant le texte
  Tweets <- matrix(NA, nrow = NombreTweets)

for( i in 1:NombreTweets)
 Tweets[i] <- Info_tweets[[i]]$text
 #View(Tweets)

 #Textes = lapply(Info_tweets, function(t)t$getText())

 #Analyse des tweets 

   # Build a Corpus
  data.corpus <- Corpus(VectorSource(Tweets))
 data.corpus <- str_replace_all(data.corpus,"[[:punct:]]", " ") 
   # Remove all non-alphanumeric characters - just as backup to the special      chars 
  data.corpus <- str_replace_all(data.corpus,"[[:punct:]]", " ") 
  # Remove camelCase 
 data.corpus <- gsub("([a-z])([A-Z])", "\\1 \\L\\2", data.corpus, perl = TRUE)  
  # Lowercase 
   data.corpus <- tolower(data.corpus) 
  #' Remove everything that is not a number or letter (may want to keep more  
  #' stuff in your actual analyses).  
  data.corpus <- str_replace_all(data.corpus,"[^a-zA-Z0-9\\s]", " ") 
 # Shrink down to just one white space 
  data.corpus <- str_replace_all(data.corpus,"[\\s]+", " ") 

 data.corpus <- Corpus(VectorSource(data.corpus))

# remove punctuation & Numbers & double space
data.corpus <- tm_map(data.corpus, removeNumbers)
 #data.corpus <- tm_map(data.corpus, removePunctuation)
#data.corpus <- tm_map(data.corpus, stripWhitespace)
data.corpus <- tm_map(data.corpus, stemDocument)

 # Convert to lowercase
 data.corpus <- tm_map(data.corpus, content_transformer(tolower))

 # Remove specific words 
 #data.corpus <- tm_map(data.corpus, removeWords, c  (Recherche, "€", 'https'))

 #remove stop words, otherwise some of these words would appear as most used
 some_stopwordsEN <- c(stopwords('english'))
  some_stopwordsFR <- c(stopwords('french'))
 some_stopwordsESP <- c(stopwords('spanish'))
 data.corpus <- tm_map(data.corpus, removeWords, some_stopwordsEN)
 data.corpus <- tm_map(data.corpus, removeWords, some_stopwordsFR)
 data.corpus <- tm_map(data.corpus, removeWords, some_stopwordsESP)

  # Nuage de points 
 dtm <- TermDocumentMatrix(data.corpus)
  m <- as.matrix(dtm)
  v <- sort(rowSums(m),decreasing=TRUE)
  d <- data.frame(word = names(v),freq=v)
  Top10 <- head(d, 10)

 set.seed(1234)
 wordcloud(words = d$word, freq = d$freq, min.freq = 10,
      max.words=200, random.order=FALSE, rot.per=0.35, 
      colors=brewer.pal(8, "Dark2"))


return(d)

 }

0 个答案:

没有答案