尝试使用Scrapy根据我在文件中的关键字从搜索引擎中获取网站列表。
以下是Scrapy的错误输出:
Redirecting (301) to <GET https://duckduckgo.com/?q=> from <GET https://www.duckduckgo.com/?q=>
2014-07-18 16:23:39-0500 [wnd] DEBUG: Crawled (200) <GET https://duckduckgo.com/?q=> (referer: None)
以下是代码:
import re
import os
import sys
import json
from scrapy.spider import Spider
from scrapy.selector import Selector
searchstrings = "wnd.config"
searchoutcome = "searchResults.json"
class wndSpider(Spider):
name = "wnd"
allowed_domains = ['google.com']
url_prefix = []
#start_urls = ['https://www.google.com/search?q=']
start_urls = ['https://www.duckduckgo.com/?q=']
for line in open(searchstrings, 'r').readlines():
url_prefix = start_urls[0] + line
#url = url_prefix[0] + line
#f = open(searchstrings
#start_urls = [url_prefix]
#for f in f.readlines():
#f.close()
def parse(self, response):
sel = Selector(response)
goog_search_list = sel.xpath('//h3/a/@href').extract()
#goog_search_list = [re.search('q=(.*&sa',n).group(1) for n in goog_search_list]
#if re.search('q=(.*)&sa',n)]
#title = sel.xpath('//title/text()').extract()
#if len(title)>0: title = tilstle[0]
#contents = sel.xpath('/html/head/meta[@name="description"] /@content').extract()
#if len(contents)>0: contents = contents[0]
## dump output
#with open(searchoutcome, "w") as outfile:
#json.dump(searchoutcome ,outfile, indent=4)
答案 0 :(得分:1)
您需要在for循环中将url
附加到start_urls
。
start_urls = []
base_url = 'https://www.duckduckgo.com/?q='
for line in open(searchstrings, 'r'):
url = base + line.strip()
start_urls.append(url)
如果您的关键字包含特殊字符,请尝试urllib.urlencode
。