我想用Shiny来演示我的文本挖掘分析过程
但我很困惑,闪亮不能在不同的tabPanel中使用完成的函数(或数据,数据帧,矩阵等)。
构建下一个分析过程对我来说真的很麻烦。
有什么方法可以解决这个问题吗?
这是我的测试分析文件下载链接: https://drive.google.com/uc?export=download&id=1A9BtTplgy42etAh42YwLiZJAwl_RSNF-
ui.r
library(shiny)
# Define UI for data upload app ----
navbarPage(
title = 'Patent Analysis System',
#Table1----------------------------------------------------
tabPanel("Upload Patent File", fluidPage(
# App title ----
titlePanel("Uploading Files"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Select a Patent file ----
fileInput("patent_file", "Choose Patent File",
multiple = TRUE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
"csv")),
#submit button
submitButton("Update View", icon("refresh"))
),
# Main panel for displaying outputs ----
mainPanel(
tabsetPanel(
tabPanel("Load Files",tableOutput("contents"),id="load_files"),
tabPanel("Patents Preprocess",fluidPage(
sidebarPanel(
selectizeInput("choose_patent", "Choose Patent",
choices = 1:10 ,selected = 1),
submitButton("Update", icon("refresh"))
)
),
verbatimTextOutput("patent_pre_process")),
tabPanel("Convert martix(TF)",
tableOutput("mat_tf"), id="matrix_tf")
)
)
)
)),
tabPanel('Process2 ', DT::dataTableOutput('ex5'))
)
server.r
library(ontologyIndex)
library(magrittr)
library(tidyr) #separate_rows
library(quanteda) #tolower
library(SnowballC) #wordstem
library(gtools)
library(tm)
library(wordcloud)
library(textreg)
server <- function(input, output, session) {
options(shiny.maxRequestSize = 30*1024^2)
#1. File load------------------------------------------------------------------------
output$contents <- renderTable({
req(input$patent_file)
patent_df <- read.csv(input$patent_file$datapath)
patent_df
})
#2. Patent preprocess----------------------------------------------------------------
output$patent_pre_process <- renderPrint({
req(input$patent_file)
df_cmp <- read.csv(input$patent_file$datapath, stringsAsFactors = F)
#Build Corpus (Title)--------------------------------------------------------------
df_title <- data.frame(Title=df_cmp$English.title)
corpus_title <- Corpus(DataframeSource(df_title))
#Pre-processing and tranforming the Corpus
corpus_tm_title <- tm_map(corpus_title, content_transformer(tolower)) %>%
tm_map(removeNumbers)
#Build Corpus (Abstract)------------------------------------------------------------
df_abstract <- data.frame(abstract=df_cmp$English.abstract)
corpus_abstract <- Corpus(DataframeSource(df_abstract))
#Pre-processing and tranforming the Corpus
corpus_tm_abstract <- tm_map(corpus_abstract, content_transformer(tolower)) %>%
tm_map(removeNumbers) %>%
tm_map(stripWhitespace)
#Patent result output---------------------------------------------------------------
pat_num <- 1:length(df_cmp$Publication.numbers)
updateSelectInput(session, "choose_patent", "Choose Patent :", choices = pat_num)
choose_pat_num <- as.integer(input$choose_patent)
list(Title=content(corpus_tm_title[[choose_pat_num]]),
Abstract= content(corpus_tm_abstract[[choose_pat_num]]))
})
#3. Matix-----------------------------------------------------------------------------
output$mat_tf <- renderTable({
#Title
title_convert <- convert.tm.to.character(corpus_tm_title)
mat_title <- dfm(title_convert, what = "word", remove_punct = FALSE,
ngrams = 1:3, concatenator = " ") %>%
as.matrix()*3
#Combine title/ads/claim dtm--------------------------------------------------------
tf_mat <- cbind(mat_title, mat_abstract)
tf_mat
})
}
答案 0 :(得分:0)
您的问题是您已将所有计算放在渲染函数中。这些设计是为了将输出呈现给单个源而不是其他任何东西。你真的想把这样的所有计算都放在反应式表达式中并用它们构建链。下面我已经以我认为应该运行良好的方式重新格式化了代码(没有测试)。
library(ontologyIndex)
library(magrittr)
library(tidyr) #separate_rows
library(quanteda) #tolower
library(SnowballC) #wordstem
library(gtools)
library(tm)
library(wordcloud)
library(textreg)
server <- function(input, output, session) {
options(shiny.maxRequestSize = 30*1024^2)
#1. File load------------------------------------------------------------------------
patent_df <- reactive({
read.csv(input$patent_file$datapath, stringsAsFactors = F)
})
output$contents <- renderTable({
req(input$patent_file)
patent_df()
})
#2. Patent preprocess----------------------------------------------------------------
preProcess <- reactive({
req(input$patent_file)
df_cmp <- patent_df()
#Build Corpus (Title)--------------------------------------------------------------
df_title <- data.frame(Title=df_cmp$English.title)
corpus_title <- Corpus(DataframeSource(df_title))
#Pre-processing and tranforming the Corpus
corpus_tm_title <- tm_map(corpus_title, content_transformer(tolower)) %>%
tm_map(removeNumbers)
corpus_tm_title
})
dtaCorpus <- reactive({
#Build Corpus (Abstract)------------------------------------------------------------
df_cmp <- patent_df()
corpus_tm_title <- preProcess()
df_abstract <- data.frame(abstract=df_cmp$English.abstract)
corpus_abstract <- Corpus(DataframeSource(df_abstract))
#Pre-processing and tranforming the Corpus
corpus_tm_abstract <- tm_map(corpus_abstract, content_transformer(tolower)) %>%
tm_map(removeNumbers) %>%
tm_map(stripWhitespace)
#Patent result output---------------------------------------------------------------
pat_num <- 1:length(df_cmp$Publication.numbers)
updateSelectInput(session, "choose_patent", "Choose Patent :", choices = pat_num)
choose_pat_num <- as.integer(input$choose_patent)
list(Title=content(corpus_tm_title[[choose_pat_num]]),
Abstract= content(corpus_tm_abstract[[choose_pat_num]]))
})
output$patent_pre_process <- renderPrint({
dtaCorpus()
})
#3. Matix-----------------------------------------------------------------------------
output$mat_tf <- renderTable({
corpus_tm_title <- preProcess()
#Title
title_convert <- convert.tm.to.character(corpus_tm_title)
mat_title <- dfm(title_convert, what = "word", remove_punct = FALSE,
ngrams = 1:3, concatenator = " ") %>%
as.matrix()*3
#Combine title/ads/claim dtm--------------------------------------------------------
tf_mat <- cbind(mat_title, mat_abstract)
tf_mat
})
}
希望这有帮助