R代码在我的Windows机器上工作但不在闪亮的服务器上

时间:2017-07-30 19:01:24

标签: r shiny shiny-server

R代码在我的Windows机器上工作但不在闪亮的服务器上,请帮忙。

ui.r

         library(shiny)
         shinyUI(
         fluidPage(
# Application title
           titlePanel("Word Cloud"),

           sidebarLayout(

             sidebarPanel("Sidebar with a slider and selection inputs"

             ),
# Show Word Cloud
             mainPanel(
               plotOutput("plot")
        )
      )
    )
    )

**server.r**

           library(shiny)
         shinyServer(
         function(input, output) {

                 output$plot <- renderPlot({

             comparison.cloud(o_g_m,colors=c("orange", "blue"),max.words=50)
           })
         }
         )

global.r

         library(plyr)
         library(rjson)
         library(RCurl)
         library(bitops)
         library(qdapDictionaries)
         library(qdapRegex)
         library(validate)
         library(qdapTools)
         library(ggplot2)
         library(base)
         library(RDSTK)
         library(readr)
         library(twitteR)

    library(syuzhet)
    library(ggplot2)
    library(NLP)
    library(tm)
    library(RWeka)
    library(RColorBrewer)
    library(wordcloud)
    library(qdap)
    library(stringr)
    library(plyr)

#uplaoding file
    pos <- readLines("PML.txt")
    # All lowercase
    pos<-tolower(pos)


 #uplaoding file
    neg <- readLines("OPPS.txt")
    # All lowercase
    neg<-tolower(neg)


 #uplaoding file
    editorials <- readLines("news.csv")


#uplaoding file
    custom_stop_words <- readLines("customStopWords.txt")
    new_stop_words<- c(custom_stop_words, stopwords("en"))

         score.sentiment_p = function(sentences, pos.words, .progress='none')
         {
      # Parameters
      # sentences: vector of text to score
      # pos.words: vector of words of postive sentiment
      #progress: passed to laply() to control of progress bar
      #create simple array of scores w1th laply
           scores = laply(sentences,
                     function(sentence, pos.words)
                     {
                       #remove punctuation using global substitute
                       sentence = gsub("[[ :punct:]]", "" ,sentence)
                       # remove control characters
                       sentence = gsub("[[ :cntrl:]]", "" ,sentence)
                       #remove digits
                       sentence = gsub('\\d+' , '', sentence)
                       #define error handling function when  trying tolower
                       tryTolower = function(x)
                       {
                         #create miissing value
                         y = NA
                         #trycatch error
                         try_error = tryCatch(tolower (x) , error= function(e) e)
                         # if not an error
                         if (!inherits(try_error, "error"))
                           y = tolower (x)
                         # result
                         return(y)
                       }
                       #use tryTolower with sapply
                       sentence = sapply(sentence, tryTolower)
                       # split sentence into ngrams with BigramTokenizer (RWeka package)
                       ngram.list = BigramTokenizer(sentence)
                       words = unlist(ngram.list)

                       #sum up all positive terms
                       pos.matches = match(words, pos.words)


                       #get the position of the matched term or NA
                       #just want a TRUE /FALSE
                       pos.matches = !is.na(pos.matches)


                       #final score
                       score = sum(pos.matches) 
                       return(score)
                     } , pos.words,  .progress=.progress)

      #data frame with scores for each sentence
           scores.df = data.frame(text=sentences, score=scores)
           return(scores.df)
         }

         score.sentiment_n = function(sentences, neg.words, .progress='none')
         {
      # Parameters
      # sentences: vector of text to score
      # neg.words: vector of worrds of negative sentiment
      #progress: passed to laply() to control of progress bar
      #create simple array of scores w1th laply
           scores = laply(sentences,
                     function(sentence, neg.words)
                     {
                       #remove punctuation using global substitute
                       sentence = gsub("[[ :punct:]]", "" ,sentence)
                       # remove control characters
                       sentence = gsub("[[ :cntrl:]]", "" ,sentence)
                       #remove digits
                       sentence = gsub('\\d+' , '', sentence)
                       #define error handling function when  trying tolower
                       tryTolower = function(x)
                       {
                         #create miissing value
                         y = NA
                         #trycatch error
                         try_error = tryCatch(tolower (x) , error= function(e) e)
                         # if not an error
                         if (!inherits(try_error, "error"))
                           y = tolower (x)
                         # result
                         return(y)
                       }
     #use tryTolower with sapply
                       sentence = sapply(sentence, tryTolower)
     # split sentence into ngrams with BigramTokenizer (RWeka package)
                       ngram.list = BigramTokenizer(sentence)
                       words = unlist(ngram.list)

                       #sum up all negative terms
                       neg.matches = match(words, neg.words)


                       #get the position of the matched term or NA
                       #just want a TRUE /FALSE
                       neg.matches = !is.na(neg.matches)


                       #final score
                       score = sum(neg.matches) 
                       return(score)
                     } , neg.words,  .progress=.progress)

           #data frame with scores for each sentence
           scores.df = data.frame(text=sentences, score=scores)
           return(scores.df)
         }


    #-----------------------------------------------------------------------------
#Function to create n-gram
         BigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min=2,max=4))


#score.sentiment function is self written (getting positive score)
         gov_sentiment= score.sentiment_p(editorials, pos)
         gov_not.blank<-subset(gov_sentiment, score!=0)
         gov_articles<-gov_not.blank

         gov_senti_score <-sum(gov_articles$score)

         gov_articles<-gov_articles$text

#score.sentiment function is self written (getting negative score)
         opp_sentiment= score.sentiment_n(editorials, neg)
         opp_not.blank<-subset(opp_sentiment, score!=0)
         opp_articles<-opp_not.blank

         opp_senti_score <-sum(opp_articles$score)
         opp_senti_score
         opp_articles<-opp_articles$text


         clean.text <- function(x, lowercase=TRUE, numbers=TRUE, punctuation=TRUE,                spaces=TRUE)
         {
      # x: character string

      # lower case
           if (lowercase)
             x = tolower(x)
           # remove numbers
           if (numbers)
             x = gsub("[[:digit:]]", "", x)
      # remove punctuation symbols
           if (punctuation)
             x = gsub("[[:punct:]]", "", x)
      # remove extra white spaces
           if (spaces) {
             x = gsub("[ \t]{2,}", " ", x)
             x = gsub("^\\s+|\\s+$", "", x)
           }
      # return
           x
         }


# Remove stop words from text
         gov_articles<-clean.text(gov_articles)
         opp_articles<-clean.text(opp_articles)
         gov_articles<-removeWords(gov_articles,new_stop_words)
         opp_articles<-removeWords(opp_articles,new_stop_words)


#-------------------------------------------------------------------
    # Make a volatile corpus
         gov_articles_c<-VCorpus(VectorSource(gov_articles))
         opp_articles_c<-VCorpus(VectorSource(opp_articles))


    # clean corpus Function
         clean_corpus <- function(corpus){
           corpus <- tm_map(corpus, stripWhitespace)
           corpus <- tm_map(corpus, removePunctuation)
           corpus <- tm_map(corpus, content_transformer(tolower))
           corpus <- tm_map(corpus, removeWords, c(stopwords("en")))
           return(corpus)
         }

    # Apply your customized function to clean corp: clean_corp
         clean_corp_g <- clean_corpus(gov_articles_c)
         clean_corp_o <- clean_corpus(opp_articles_c)


    # Make tokenizer function 
         tokenizer <- function(x)   NGramTokenizer(x, Weka_control(min = 2, max = 4))


# Create bigram_dtm
         gov_articles_bigram_dtm <- DocumentTermMatrix(
           clean_corp_g, 
           control = list(tokenize = tokenizer)
    )

         opp_articles_bigram_dtm <- DocumentTermMatrix(
           clean_corp_o, 
           control = list(tokenize = tokenizer)
         )


# Create bigram_dtm_m
         gov_articles_bigram_dtm_m<-as.matrix(gov_articles_bigram_dtm)
         opp_articles_bigram_dtm_m<-as.matrix(opp_articles_bigram_dtm)


# Create freq
         gov_articles_freq<-colSums(gov_articles_bigram_dtm_m)
         opp_articles_freq<-colSums(opp_articles_bigram_dtm_m)

#--------------------------------------------------------------------
    # Sort term_frequency in descending order
         gov_articles_term_freq<-sort(gov_articles_freq, decreasing=TRUE)
         opp_articles_term_freq<-sort(opp_articles_freq, decreasing=TRUE)

    # View the top 10 most common words
         gov_articles_term_freq[1:10]
         opp_articles_term_freq[1:10]

    # Plot a barchart of the 10 most common words
         barplot(gov_articles_term_freq[1:10], col = "red" , las = 2)
         barplot(opp_articles_term_freq[1:10], col = "red" , las = 2)

#---------------------------------------------------------------------
# #Sentiments
         govt<-as.vector(gov_articles)
         opps<-as.vector(opp_articles)
         g_scores<-get_nrc_sentiment(govt)
         o_scores<-get_nrc_sentiment(opps)
         g_scores
         o_scores

         g_polarity<-g_scores[c(9:10)]
         o_polarity<-o_scores[c(9:10)]
         g_sentiment<-g_scores[c(1:8)]
         o_sentiment<-o_scores[c(1:8)]
         g_polarity
         o_polarity
         g_sentiment
         o_sentiment


         sum_g_polarity <-colSums(g_polarity)
         sum_o_polarity <-colSums(o_polarity)
         sum_g_polarity
         sum_o_polarity
         barplot(sum_g_polarity)
         barplot(sum_o_polarity)


         g_polarity_m<-as.matrix(g_polarity, rownames.force = TRUE)
         o_polarity_m<-as.matrix(o_polarity, rownames.force = TRUE)
         barplot(g_polarity_m)
         barplot(o_polarity_m)

 #-------------------------------------------------------------------
###Visualize common words

    # Create all_coffee (as a single document )
         all_gov_articles<-paste(gov_articles, collapse=" ")

    # Create all_chardonnay(as a single document )
         all_opp_articles<-paste(opp_articles,collapse=" ")

    # Create all_tweets
         o_g_articles<-c(all_gov_articles,all_opp_articles)

    # Convert to a vector source
         o_g_articles<-VectorSource(o_g_articles)

    # Create all_corpus
         o_g_corpus<-VCorpus(o_g_articles)

    # Clean the corpus
         o_g_clean_corpus<-clean_corpus(o_g_corpus)

    # Create all_tdm
         o_g_tdm<-TermDocumentMatrix(o_g_clean_corpus)

    # Create all_m
         o_g_m<-as.matrix(o_g_tdm)


    #--------------------------------------------------------------

# Give the columns distinct names
         colnames(o_g_m) <- c("Government","Opposition")

日志

         2017-07-30T17:51:16.404481+00:00 shinyapps[198754]: The following object is                masked from ‘package:qdapRegex’:
         2017-07-30T17:51:16.404482+00:00 shinyapps[198754]: 
         2017-07-30T17:51:16.404483+00:00 shinyapps[198754]:     %+%
         2017-07-30T17:51:16.404484+00:00 shinyapps[198754]: 
         2017-07-30T17:51:16.567970+00:00 shinyapps[198754]: 
         2017-07-30T17:51:16.567972+00:00 shinyapps[198754]: Attaching package: ‘twitteR’
         2017-07-30T17:51:16.567972+00:00 shinyapps[198754]: 
         2017-07-30T17:51:16.568624+00:00 shinyapps[198754]: The following objects are masked from ‘package:validate’:
         2017-07-30T17:51:16.568258+00:00 shinyapps[198754]: The following object is masked from ‘package:qdapTools’:
         2017-07-30T17:51:16.568624+00:00 shinyapps[198754]: 
         2017-07-30T17:51:16.568625+00:00 shinyapps[198754]: 
         2017-07-30T17:51:16.568625+00:00 shinyapps[198754]:     created, description
         2017-07-30T17:51:16.569012+00:00 shinyapps[198754]: The following object is masked from ‘package:plyr’:
         2017-07-30T17:51:16.568258+00:00 shinyapps[198754]: 
         2017-07-30T17:51:16.568259+00:00 shinyapps[198754]:     id
         2017-07-30T17:51:16.568259+00:00 shinyapps[198754]: 
         2017-07-30T17:51:16.569013+00:00 shinyapps[198754]: 
         2017-07-30T17:51:16.569013+00:00 shinyapps[198754]:     id
         2017-07-30T17:51:16.569013+00:00 shinyapps[198754]: 
         2017-07-30T17:51:16.646722+00:00 shinyapps[198754]: 
         2017-07-30T17:51:16.646726+00:00 shinyapps[198754]: Attaching package: ‘NLP’
         2017-07-30T17:51:16.646727+00:00 shinyapps[198754]: 
         2017-07-30T17:51:16.646983+00:00 shinyapps[198754]: The following object is masked from ‘package:ggplot2’:
         2017-07-30T17:51:16.646984+00:00 shinyapps[198754]: 
         2017-07-30T17:51:16.646985+00:00 shinyapps[198754]:     annotate
         2017-07-30T17:51:16.646985+00:00 shinyapps[198754]: 
         2017-07-30T17:51:17.786148+00:00 shinyapps[198754]: 
         2017-07-30T17:51:17.786151+00:00 shinyapps[198754]: Attaching package: ‘qdap’
         2017-07-30T17:51:17.786152+00:00 shinyapps[198754]: 
         2017-07-30T17:51:17.786445+00:00 shinyapps[198754]: The following objects are masked from ‘package:tm’:
         2017-07-30T17:51:17.786446+00:00 shinyapps[198754]: 
         2017-07-30T17:51:17.786447+00:00 shinyapps[198754]:     as.DocumentTermMatrix, as.TermDocumentMatrix
         2017-07-30T17:51:17.786447+00:00 shinyapps[198754]: 
         2017-07-30T17:51:17.786889+00:00 shinyapps[198754]: The following object is masked from ‘package:NLP’:
         2017-07-30T17:51:17.786890+00:00 shinyapps[198754]:     ngrams
         2017-07-30T17:51:17.786890+00:00 shinyapps[198754]: 
         2017-07-30T17:51:17.786891+00:00 shinyapps[198754]: 
         2017-07-30T17:51:17.788601+00:00 shinyapps[198754]: The following object is masked from ‘package:base’:
         2017-07-30T17:51:17.788602+00:00 shinyapps[198754]: 
         2017-07-30T17:51:17.788603+00:00 shinyapps[198754]:     Filter
         2017-07-30T17:51:17.793538+00:00 shinyapps[198754]: Attaching package: ‘stringr’
         2017-07-30T17:51:17.793537+00:00 shinyapps[198754]: 
         2017-07-30T17:51:17.793538+00:00 shinyapps[198754]: 
         2017-07-30T17:51:17.793805+00:00 shinyapps[198754]:     %>%
         2017-07-30T17:51:17.788603+00:00 shinyapps[198754]: 
         2017-07-30T17:51:17.793803+00:00 shinyapps[198754]: The following object is masked from ‘package:qdap’:
         2017-07-30T17:51:17.793804+00:00 shinyapps[198754]: 
         2017-07-30T17:51:17.793805+00:00 shinyapps[198754]: 
         2017-07-30T17:51:18.496556+00:00 shinyapps[198754]: Error in value[[3L]](cond) : java.lang.NullPointerException
         2017-07-30T17:51:18.496580+00:00 shinyapps[198754]: Calls: local ... tryCatch -> tryCatchList -> tryCatchOne -> <Anonymous>
         2017-07-30T17:51:18.496584+00:00 shinyapps[198754]: Execution halted

我尝试了很多解决方案但都徒劳无功。 R代码适用于我的Windows机器,但不适用于Shiny Server。请帮忙,谢谢。

我被困在这里。我想在shinyapps.io

上看到我的文字云
  

(发生错误之前连接尝试未成功   / [3L]中的cancel./Error:java.lang.NullPointerException   电话:本地... tryCatch - &gt; tryCatchList - &gt; tryCatchOne - &gt;    执行暂停)

0 个答案:

没有答案