输出问题,Shiny app R

时间:2015-04-24 15:01:35

标签: r shiny

我试图制作一个Shiny应用程序,允许您在Twitter中搜索一个术语,然后分析获得的推文的感受并计算正面和负面推文的百分比。我有所有已实现的功能,但我无法在屏幕上写出结果。计算百分比的函数是无效的,换句话说,什么都不返回,我该怎么做才能在屏幕上显示结果?

在ui.r:

sidebarPanel(textInput("term", "Put the term",""),
textInput("number","Number of tweets",""), submitButton("Search")),
mainPanel(("Results of the search"),textOutput("result"))

在server.r中:

myterm<-reactive({myterm<-TweetFrame(input$term, input$number)})

cleanterms <- reactive({CleanTweets(myterm()["text"])})

sentimentsTweets<-reactive({sentimentsTweets<-sentimentalanalysis(cleanterms()["text"])})

output$result <- renderPrint({paste(print(sentimentsTweets()["score"]))})
output$result <- renderPrint({CalculatePercentaje(as.vector(sentimentsTweets()))})

我希望我知道如何编写显示推文获得的分数以及如何编写调用函数CalculatePercentage的结果,这是函数:

CalculatePercentage<-function(sentimentTweets){

      neutral <- 0
      negativo <- 0
      positivo <- 0

      for(i in 1:length(sentimentTweets$score)){

        if(sentimentTweets$score[i] == 0){
          neutral <- neutral + 1 
        } else {
          if(sentimentTweets$score[i] > 0){
            positivo <- positivo + 1
          } else {
            negativo <- negativo + 1
          }
        }

      }

      cat("El porcentaje de tweets neutrales es ", (neutral * 100)/ length(sentimentTweets$score), "% \n")
      cat("El porcentaje de tweets positivos es ", (positivo * 100)/ length(sentimentTweets$score), "% \n")
      cat("El porcentaje de tweets negativos es ", (negativo * 100)/ length(sentimentTweets$score), "% \n")
}

2 个答案:

答案 0 :(得分:0)

这可以让你开始,但我没有足够的东西来完成它并使它工作。

ui.R:

  shinyUI(
    sidebarPanel(textInput("term", "Put the term",""),
             textInput("number","Number of tweets",""), 
             submitButton("Search"),
             mainPanel( h4("Results of the search"),
                        textOutput("result1"),
                        textOutput("result2"))
    )
  )

server.R

library(stringr)

shinyServer(function(input, output) {

  score.sentiment = function(sentences, pos.words, neg.words) {

    # we got a vector of sentences. plyr will handle a list
    # or a vector as an "l" for us
    # we want a simple array ("a") of scores back, so we use 
    # "l" + "a" + "ply" = "laply":
    scores = laply(sentences, function(sentence, pos.words, neg.words) {

      # clean up sentences with R's regex-driven global substitute, gsub():
      sentence = gsub('[[:punct:]]', '', sentence)
      sentence = gsub('[[:cntrl:]]', '', sentence)
      sentence = gsub('\\d+', '', sentence)
      # and convert to lower case:
      sentence = tolower(sentence)

      # split into words. str_split is in the stringr package
      word.list = str_split(sentence, '\\s+')
      # sometimes a list() is one level of hierarchy too much
      words = unlist(word.list)

      # compare our words to the dictionaries of positive & negative terms
      pos.matches = match(words, pos.words)
      neg.matches = match(words, neg.words)

      # match() returns the position of the matched term or NA
      # we just want a TRUE/FALSE:
      pos.matches = !is.na(pos.matches)
      neg.matches = !is.na(neg.matches)

      # and conveniently enough, TRUE/FALSE will be treated as 1/0 by sum():
      score = sum(pos.matches) - sum(neg.matches)

      return(score)
    }, pos.words, neg.words)

    scores.df = data.frame(score=scores, text=sentences)
    return(scores.df)
  }
  sentimentalanalysis<-function(entity1text){

    # A compiled list of words expressing positive and negative sentiments ----
    #http://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html
    # List of words and additional information on the original source from Jeffrey Breen's github site at:
    #https://github.com/jeffreybreen/twitter-sentiment-analysis-tutorial-201107/tree/master/data/opinion-lexicon-English

    positivewords=readLines("positive_words.txt")
    negativewords=readLines("negative_words.txt")

    #Applying score.sentiment algorithm to cleaned tweets and getting data frames of tweets, net sentiment score for a tweet 
    #(number of positive sentiments minus negative sentiments)

    #entity1score = score.sentiment(CleanTweets(entity1text),positivewords,negativewords)
    entity1score = score.sentiment(entity1text,positivewords,negativewords)

    return(entity1score)

  }  
  TweetFrame <- function(term,number)
  {
     s <- sprintf("%s%s",term,number)
     return(s)
  }

myterm<-reactive({myterm<-TweetFrame(input$term, input$number)})

#cleanterms <- reactive({CleanTweets(myterm()["text"])})
cleanterms <- reactive({myterm()["text"]})



sentimentsTweets<-reactive({sentimentsTweets<-sentimentalanalysis(cleanterms()["text"])})

output$result1 <- renderPrint({paste(print(sentimentsTweets()["score"]))})

CalculatePercentage<-function(sentimentTweets){

  neutral <- 0
  negativo <- 0
  positivo <- 0

  for(i in 1:length(sentimentTweets$score)){

    if(sentimentTweets$score[i] == 0){
      neutral <- neutral + 1 
    } else {
      if(sentimentTweets$score[i] > 0){
        positivo <- positivo + 1
      } else {
        negativo <- negativo + 1
      }
    }

  }

  cat("El porcentaje de tweets neutrales es ", (neutral * 100)/ length(sentimentTweets$score), "% \n")
  cat("El porcentaje de tweets positivos es ", (positivo * 100)/ length(sentimentTweets$score), "% \n")
  cat("El porcentaje de tweets negativos es ", (negativo * 100)/ length(sentimentTweets$score), "% \n")
}

output$result2 <- renderPrint({CalculatePercentage(as.vector(sentimentsTweets()))})
})

enter image description here

答案 1 :(得分:0)

score.sentiment = function(句子,pos.words,neg.words)     {

  # we got a vector of sentences. plyr will handle a list
  # or a vector as an "l" for us
  # we want a simple array ("a") of scores back, so we use 
  # "l" + "a" + "ply" = "laply":
  scores = laply(sentences, function(sentence, pos.words, neg.words) {

    # clean up sentences with R's regex-driven global substitute, gsub():
    sentence = gsub('[[:punct:]]', '', sentence)
    sentence = gsub('[[:cntrl:]]', '', sentence)
    sentence = gsub('\\d+', '', sentence)
    # and convert to lower case:
    sentence = tolower(sentence)

    # split into words. str_split is in the stringr package
    word.list = str_split(sentence, '\\s+')
    # sometimes a list() is one level of hierarchy too much
    words = unlist(word.list)

    # compare our words to the dictionaries of positive & negative terms
    pos.matches = match(words, pos.words)
    neg.matches = match(words, neg.words)

    # match() returns the position of the matched term or NA
    # we just want a TRUE/FALSE:
    pos.matches = !is.na(pos.matches)
    neg.matches = !is.na(neg.matches)

    # and conveniently enough, TRUE/FALSE will be treated as 1/0 by sum():
    score = sum(pos.matches) - sum(neg.matches)

    return(score)
  }, pos.words, neg.words)

  scores.df = data.frame(score=scores, text=sentences)
  return(scores.df)
}






sentimentalanalysis<-function(entity1text){

  # A compiled list of words expressing positive and negative sentiments ----
  #http://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html
  # List of words and additional information on the original source from Jeffrey Breen's github site at:
  #https://github.com/jeffreybreen/twitter-sentiment-analysis-tutorial-201107/tree/master/data/opinion-lexicon-English

  positivewords=readLines("positive_words.txt")
  negativewords=readLines("negative_words.txt")

  #Applying score.sentiment algorithm to cleaned tweets and getting data frames of tweets, net sentiment score for a tweet 
  #(number of positive sentiments minus negative sentiments)

  entity1score = score.sentiment(CleanTweets(entity1text),positivewords,negativewords)

  return(entity1score)

}