在R中的目录中的文件列表上执行脚本

时间:2013-12-21 16:45:13

标签: r pdf directory text-mining tm

我有一个程序,可以让我使用R将pdf文件转换为txt文件。如何将此程序应用于我想要转换为txt文件的pdf文件目录?

这是我到目前为止的代码,只适用于链接到pdf文档的单个网址:

# download pdftotxt from 
# ftp://ftp.foolabs.com/pub/xpdf/xpdfbin-win-3.03.zip
# and extract to your program files folder

# here is a pdf for mining
url <- "http://www.noisyroom.net/blog/RomneySpeech072912.pdf"
dest <- tempfile(fileext = ".pdf")
download.file(url, dest, mode = "wb")

# set path to pdftotxt.exe and convert pdf to text
exe <- "C:\\Program Files\\xpdfbin-win-3.03\\bin32\\pdftotext.exe"
system(paste("\"", exe, "\" \"", dest, "\"", sep = ""), wait = F)

# get txt-file name and open it  
filetxt <- sub(".pdf", ".txt", dest)
shell.exec(filetxt); shell.exec(filetxt)    # strangely the first try always throws an error..


# do something with it, i.e. a simple word cloud 
library(tm)
library(wordcloud)
library(Rstem)

txt <- readLines(filetxt) # don't mind warning..

txt <- tolower(txt)
txt <- removeWords(txt, c("\\f", stopwords()))

corpus <- Corpus(VectorSource(txt))
corpus <- tm_map(corpus, removePunctuation)
tdm <- TermDocumentMatrix(corpus)
m <- as.matrix(tdm)
d <- data.frame(freq = sort(rowSums(m), decreasing = TRUE))

# Stem words
d$stem <- wordStem(row.names(d), language = "english")

# and put words to column, otherwise they would be lost when aggregating
d$word <- row.names(d)

# remove web address (very long string):
d <- d[nchar(row.names(d)) < 20, ]

# aggregate freqeuncy by word stem and
# keep first words..
agg_freq <- aggregate(freq ~ stem, data = d, sum)
agg_word <- aggregate(word ~ stem, data = d, function(x) x[1])

d <- cbind(freq = agg_freq[, 2], agg_word)

# sort by frequency
d <- d[order(d$freq, decreasing = T), ]

# print wordcloud:
wordcloud(d$word, d$freq)

# remove files
file.remove(dir(tempdir(), full.name=T)) # remove files

1 个答案:

答案 0 :(得分:4)

如果您有正在尝试处理的文件的URL列表(实际上是一个向量),您可以将您的过程转换为函数并将此过程应用于每个URL。尝试以下方面:

crawlPDFs <- function(x) {
  # x is a character string to the url on the web
  url <- x
  dest <- tempfile(fileext = ".pdf")
  download.file(url, dest, mode = "wb")

  # set path to pdftotxt.exe and convert pdf to text
  exe <- "C:\\Program Files\\xpdfbin-win-3.03\\bin32\\pdftotext.exe"
  system(paste("\"", exe, "\" \"", dest, "\"", sep = ""), wait = F)

  # get txt-file name and open it  
  filetxt <- sub(".pdf", ".txt", dest)
  shell.exec(filetxt); shell.exec(filetxt)    # strangely the first try always throws an error..


  # do something with it, i.e. a simple word cloud 
  library(tm)
  library(wordcloud)
  library(Rstem)

  txt <- readLines(filetxt) # don't mind warning..

  txt <- tolower(txt)
  txt <- removeWords(txt, c("\\f", stopwords()))

  corpus <- Corpus(VectorSource(txt))
  corpus <- tm_map(corpus, removePunctuation)
  tdm <- TermDocumentMatrix(corpus)
  m <- as.matrix(tdm)
  d <- data.frame(freq = sort(rowSums(m), decreasing = TRUE))

  # Stem words
  d$stem <- wordStem(row.names(d), language = "english")

  # and put words to column, otherwise they would be lost when aggregating
  d$word <- row.names(d)

  # remove web address (very long string):
  d <- d[nchar(row.names(d)) < 20, ]

  # aggregate freqeuncy by word stem and
  # keep first words..
  agg_freq <- aggregate(freq ~ stem, data = d, sum)
  agg_word <- aggregate(word ~ stem, data = d, function(x) x[1])

  d <- cbind(freq = agg_freq[, 2], agg_word)

  # sort by frequency
  d <- d[order(d$freq, decreasing = T), ]

  # print wordcloud:
  wordcloud(d$word, d$freq)

  # remove files
  file.remove(dir(tempdir(), full.name=T)) # remove files
}

sapply(list.of.urls, FUN = crawlPDFs) 

list.of.urls可以是字符向量或列表,其中每个列表元素是一个字符,是pdf的URL。