我需要像这样解析输入:“+ 704”
进入:切换器=“+”和c = 749
我明白了:
scanf("%c %d", &switcher, &c);
这不起作用。
Scanf返回1而不是2,c = 4196080,printf("%c", switcher)
打印换行符
我错过了什么?
答案 0 :(得分:0)
所以基本上,有一个新行等待缓冲区。要摆脱它,只需在格式化字符串的开头添加一个空格即可。
library(shiny)
library(base64enc)
library(httr)
library(rjson)
library(bit64)
library(plyr)
library(stringr)
library(urltools)
library(twitteR)
library(ggplot2)
#initialize environment variables
consumerKey <- 'MUwukLPqlXp7pROuci6NGHiIo'
consumerSecret <- 'yCucMwTsjMsXFrrKDnHTyawxgKFzzRMGy3UeEyzGGvlQXzIHAV'
tokenKey <- '4095349632-uOfSHrUmPzbuTYDRkASHfUuJBy0YxCesY6QtWbe'
tokenSecret <- 'FS55U9PqOIBTCo3mbV3bHli7aEXMJEGmGjPlpfrifGeTL'
setup_twitter_oauth(consumerKey, consumerSecret, tokenKey, tokenSecret)
pos <- scan('res/positive-words.txt', what='character', comment.char=';')
neg <- scan('res/negative-words.txt', what='character', comment.char=';')
ui <- fluidPage(
sliderInput(inputId = "nTweets",
label = "Choose a number of tweets to gather",
value = 10, min = 1, max = 100),
textInput(inputId = "term", label = "Enter a hashtag or term to search:", value = "Canada"),
actionButton(inputId = "go",
label = "Go/Refresh"),
plotOutput("LHist")
)
server <- function(input, output) {
rv <- reactiveValues()
title <- eventReactive(input$go, {
paste("sentiment on", rv$searchTerm, sep=" ")
})
tweets <- eventReactive(input$go, {
searchTwitter(rv$searchTerm, n=rv$Tweets, lang="en")
})
Tweets.text <- eventReactive(input$go, {
laply(tweets, function(t)t$getText())
})
analysis <- eventReactive(input$go, {
score.sentiment(Tweets.text, pos, neg)
})
observeEvent(input$go, {
rv$searchTerm <- input$term
rv$Tweets <- input$nTweets
})
output$LHist <- renderPlot({
#histogram
ggplot(data=analysis()) +
geom_bar(mapping=aes(x=score), binwidth=1) +
theme_bw() + scale_color_brewer() +
ggtitle(paste("Lexical analysis'", title()))
})
}
shinyApp(ui = ui, server = server)
score.sentiment = function(sentences, pos.words, neg.words, .progress='none')
{
require(plyr)
require(stringr)
scores = laply(sentences, function(sentence, pos.words, neg.words) {
sentence = gsub('[[:punct:]]', '', sentence)
sentence = gsub('[[:cntrl:]]', '', sentence)
sentence = gsub('\\d+', '', sentence)
# remove retweet entities
sentence = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", sentence)
# remove at people
sentence = gsub("@\\w+", "", sentence)
sentence = gsub("http\\w+", "", sentence)
word.list = str_split(sentence, '\\s+')
words = unlist(word.list)
pos.matches = match(words, pos.words)
neg.matches = match(words, neg.words)
pos.matches = !is.na(pos.matches)
neg.matches = !is.na(neg.matches)
score = sum(pos.matches) - sum(neg.matches)
return(score)
}, pos.words, neg.words, .progress=.progress )
scores.df = data.frame(score=scores, text=sentences)
return(scores.df)
}