我想下载一些.txt文件。我有一个数据框'“New_test,其中网址在'url'下面,而'。'
下的目标名称“New_test.txt”
"url" "code"
"1" "http://documents.worldbank.org/curated/en/704931468739539459/text/multi-page.txt" "704931468739539459.txt"
"2" "http://documents.worldbank.org/curated/en/239491468743788559/text/multi-page.txt" "239491468743788559.txt"
"3" "http://documents.worldbank.org/curated/en/489381468771867920/text/multi-page.txt" "489381468771867920.txt"
"4" "http://documents.worldbank.org/curated/en/663271468778456388/text/multi-page.txt" "663271468778456388.txt"
"5" "http://documents.worldbank.org/curated/en/330661468742793711/text/multi-page.txt" "330661468742793711.txt"
"6" "http://documents.worldbank.org/curated/en/120441468766519490/text/multi-page.txt" "120441468766519490.txt"
"7" "http://documents.worldbank.org/curated/en/901481468770727038/text/multi-page.txt" "901481468770727038.txt"
"8" "http://documents.worldbank.org/curated/en/172351468740162422/text/multi-page.txt" "172351468740162422.txt"
"9" "http://documents.worldbank.org/curated/en/980401468740176249/text/multi-page.txt" "980401468740176249.txt"
"10" "http://documents.worldbank.org/curated/en/166921468759906515/text/multi-page.txt" "166921468759906515.txt"
"11" "http://documents.worldbank.org/curated/en/681071468781809792/text/DRD169.txt" "681071468781809792.txt"
"12" "http://documents.worldbank.org/curated/en/358291468739333041/text/multi-page.txt" "358291468739333041.txt"
"13" "http://documents.worldbank.org/curated/en/716041468759870921/text/multi0page.txt" "716041468759870921.txt"
"14" "http://documents.worldbank.org/curated/en/961101468763752879/text/34896.txt" "961101468763752879.txt"`
这是脚本
rm(list=ls())
require(quanteda)
library(stringr)
workingdir <-setwd("~/Study/Master/Thesis/Mining/R/WorldBankDownl")
test <- read.csv(paste0(workingdir,"/New_test.txt"), header = TRUE,
stringsAsFactors = FALSE, sep="\t")
#Loop through every url in test_df and download in target directory with name = code
for (url in test) {
print(head(url))
print(head(test$code))
destfile <- paste0('~/Study/Master/Thesis/Mining/R/WorldBankDownl/Sources/', test$code)
download.file(test$url, destfile, method = "wget", quiet=TRUE)
这是我得到的错误
Error in download.file(test$url, destfile, method = "wget", quiet = TRUE) :
'url' must be a length-one character vector
答案 0 :(得分:0)
这是一种更简单的方法。您需要将test$url
替换为txturls
(两者都是带有文本文件网址的字符向量)。
txturls <- c("http://documents.worldbank.org/curated/en/704931468739539459/text/multi-page.txt",
"http://documents.worldbank.org/curated/en/239491468743788559/text/multi-page.txt",
"http://documents.worldbank.org/curated/en/489381468771867920/text/multi-page.txt")
library("quanteda")
txt <- character()
for (i in txturls) {
# read the file from the URL
temp <- readLines(url("http://documents.worldbank.org/curated/en/704931468739539459/text/multi-page.txt"))
# concatenate lines into one text
temp <- texts(temp, groups = 1)
# remove form feed character
txt <- gsub("\\f", "", txt)
# concatenate into the vector
txt <- c(txt, temp)
}
# form the quanteda corpus
urlcorp <- corpus(txt, docvars = data.frame(source = txturls, stringsAsFactors = FALSE))
summary(urlcorp)
# Corpus consisting of 3 documents:
#
# Text Types Tokens Sentences source
# 1 1343 5125 135 http://documents.worldbank.org/curated/en/704931468739539459/text/multi-page.txt
# 1.1 1343 5125 135 http://documents.worldbank.org/curated/en/239491468743788559/text/multi-page.txt
# 1.2 1343 5125 135 http://documents.worldbank.org/curated/en/489381468771867920/text/multi-page.txt
#
# Source: /Users/kbenoit/Dropbox (Personal)/GitHub/dictionaries_paper/* on x86_64 by kbenoit
# Created: Tue Mar 20 15:51:05 2018
# Notes:
答案 1 :(得分:0)
每个人,谢谢你帮助我。对我来说,解决方案是改变我使用的方法。 'wget'在'url'中要求1个url,对'destfile'要求相同。 '长度 - 一个字符向量'两个'url'和'destfile'是长度为十四个字符的向量。现在我使用'libcurl'方法,它要求'url'和'destfile'中字符向量的长度相等。如果使用此方法,请确保quiet = TRUE。
此外,你可能有一个工作循环但是你得到错误。
Error in download.file(test$url, destfile, method = "libcurl", quiet = TRUE) :
cannot download any files
In addition: There were 50 or more warnings (use warnings() to see the first 50)
这意味着您的来源无法跟上您的呼叫,您基本上是DDOS,因此循环必须放慢速度。
rm(list=ls())
require(quanteda)
library(stringr)
workingdir <-setwd("~/Study/Master/Thesis/Mining/R/WorldBankDownl")
test <- read.csv(paste0(workingdir,"/New_test.txt"), header = TRUE,
stringsAsFactors = FALSE, sep="\t")
test <- data.frame(test)
#Loop through every url in comb_df and download in target directory with name = code, if you get an error and no files are downloaded try to slow down the loop.
for (url in test) {
print(head(url))
destfile <- paste0(workingdir, '/Sources/WB_', test$code)
download.file(test$url, destfile, method = "libcurl", quiet = TRUE)
}