在python中并行地从url下载文件

时间:2014-01-22 07:25:30

标签: python

我在数据库中有一些链接,我想并行下载。我尝试连续做,但花了太多时间。我有大约1877个链接。

我尝试使用此代码并行运行下载,但它会抛出错误:失败:'tuple'对象没有属性'read'

#!/usr/bin/env python

import urllib
from stream import ThreadPool

URLs = [
  'http://www.cnn.com/',
  'http://www.bbc.co.uk/',
  'http://www.economist.com/',
  'http://nonexistant.website.at.baddomain/',
  'http://slashdot.org/',
  'http://reddit.com/',
  'http://news.ycombinator.com/'
 ]

def retrieve(urls):
    for url in urls:
    print url,' '
    res = urllib.urlretrieve(url).read()
    yield url, res

if __name__ == '__main__':
    retrieved = URLs >> ThreadPool(retrieve, poolsize=7)
    for url, content in retrieved:
        print '%r is %d bytes' % (url, len(content))
    for url, exception in retrieved.failure:
        print '%r failed: %s' % (url, exception)

我也尝试了这个:

import urllib
import tldextract
from multiprocessing.pool import ThreadPool

URLs = [
  'http://www.cnn.com/',
  'http://www.bbc.co.uk/',
  'http://www.economist.com/',
  'http://nonexistant.website.at.baddomain/',
   'http://slashdot.org/',
  'http://reddit.com/',
  'http://news.ycombinator.com/'
 ]


def dwld(url):
  print url
  res = urllib.urlopen(url).read() 
  filename = tldextract.extract(url)
  with open(filename.domain, 'wb') as fh:
     fh.write(res)
  return url 

pool = ThreadPool(processes = 4)
pool.map(dwld, URLs)

给我     Traceback(最近一次调用最后一次):      文件“dwld_thread.py”,第26行,in       pool.map(dwld,URL)      文件“/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/multiprocessing/pool.py”,第148行,在地图中     return self.map_async(func,iterable,chunksize).get()   文件“/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/multiprocessing/pool.py”,第422行,获取      提高self._value    IOError:[Errno套接字错误] [Errno 8]提供nodename或servname,或者不知道

4 个答案:

答案 0 :(得分:2)

我不知道您使用的stream.ThreadPool是什么,或者它的API是什么......但问题很明显:

res = urllib.urlretrieve(url).read()

如果您查看urlretrieve的文档:

  

返回一个元组(文件名,标题),其中filename是可以在其下找到对象的本地文件名...

你显然无法打电话给read。如果要使用此旧版API下载到本地文件,然后阅读该文件,可以

filename, headers = urllib.urlretrieve(url)
with open(filename) as f:
    res = f.read()

但为什么呢?只需使用urllib2.urlopen,“使用另外两种方法返回类似文件的对象”,这样就可以在其上调用read,而不会创建临时文件,而且你是没有使用一个没有完全设计的旧功能,没有人在几年内保持这种功能。


但是Python在标准库中内置了一个很好的ThreadPoolExecutor。如果你看看他们向你展示的第一个例子,那正是你想要做的。

不幸的是,你使用的是Python 2.x,它没有concurrent.futures模块。幸运的是,PyPI上有backport,可以使用2.5 +。

Python也有multiprocessing.dummy.Pool(也可以在未记录的,但可能更具可读性的名称multiprocessing.ThreadPool下使用)。但是如果你愿意在stdlib之外找一些你显然不确定如何使用并且我从未听说过的模块,我猜你使用futures时不会有任何问题。所以:

import futures
import urllib2

URLs = [
  'http://www.cnn.com/',
  'http://www.bbc.co.uk/',
  'http://www.economist.com/',
  'http://nonexistant.website.at.baddomain/',
  'http://slashdot.org/',
  'http://reddit.com/',
  'http://news.ycombinator.com/'
 ]

def load_url(url):
    return urllib2.urlopen(url).read()

if __name__ == '__main__':
    with futures.ThreadPoolExecutor(max_workers=7) as executor:
        fmap = dict((executor.submit(load_url, url), url) for url in URLs)
        for f in futures.as_completed(fmap):
            url = fmap[f]
            try:
                content = f.result()
            except Exception as exception:
                print '%r failed: %s' % (url, exception)
            else:
                print '%r is %d bytes' % (url, len(content))

答案 1 :(得分:0)

from threading import *
from time import sleep
# if Python2:
import urllib
# if Python3:
# import urllib.request

URLs = [
  'http://www.cnn.com/',
  'http://www.bbc.co.uk/',
  'http://www.economist.com/',
  'http://nonexistant.website.at.baddomain/',
  'http://slashdot.org/',
  'http://reddit.com/',
  'http://news.ycombinator.com/'
 ]

class worker(Thread):
    def __init__(self, link):
        Thread.__init__(self)
        self.link = link
        self.start()
    def run(self):
        # if Python2:
        res = urllib.urlopen(url).read() # as mentioned by @DhruvPathak
        # if Python3:
        # res = urllib.request.urlopen(url).read()
        with open(url, 'rb') as fh:
            fh.write(res) # store fetched data in a file called <link>

for url in urls:
    while len(enumerate()) > 500:
        sleep(0.25)
    worker(url)

while len(enumerate()) > 1:
    sleep(0.25) # wait for all threads to finish

答案 2 :(得分:0)

urllib.urlretrieve(url).read()应为urllib.urlopen(url).read()

答案 3 :(得分:0)

如何使用multiprocessing

示例代码:

#! /usr/bin/env python

# -*- coding: utf-8 -*-


import sys
import urllib
from multiprocessing import Pool

import os

POOL = 8
PDFS_DOWNLOAD_DIR = 'pdfs'
PDF_LINKS = sys.argv[1]


class DownloadFiles(object):
    def __init__(self):
        self.pdf_links = self.read_links_from_file()
        self.create_download_dir()

    def create_download_dir(self):
        try:
            if not os.path.exists(PDFS_DOWNLOAD_DIR):
                os.makedirs(PDFS_DOWNLOAD_DIR)
        except IOError as e:
            exit()

    def read_links_from_file(self):
        try:
            with open(PDF_LINKS, 'r') as f:
                return list(set([x.strip() for x in f]))
        except (IndexError, IOError) as e:
            exit()

    def get_file(self, link):

        filename = link.split('/')[-2]

        print('Downloading file --> "{filename}"'.format(
            filename=filename
        ))

        urllib.urlretrieve(link, filename='{pdfs_data}/{filename}'.format(
            pdfs_data=PDFS_DOWNLOAD_DIR,
            filename=filename
        ))

    def download(self):

        pool = Pool(POOL)
        pool.map(self.get_file, self.pdf_links)

        pool.close()
        pool.join()

        print('\nSuccessfully downloaded files from given source!\n')


d = DownloadFiles()
d.download()