在尝试提取URL时使用Urllibopener时引发HTTP错误

时间:2018-05-06 22:25:02

标签: python selenium web-scraping

我正在尝试构建一个为Mazda Miata擦除Craigslist的脚本。我在函数" extract_post_url"中收到错误当它试图请求时。这是我试图遵循的教程: https://github.com/vprusso/youtube_tutorials/blob/master/web_scraping_and_automation/selenium/craigstlist_scraper.py

到目前为止,这是代码:

from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By

from bs4 import BeautifulSoup
import urllib.request 

class CraigslistScaper(object): 
    def __init__(self,query,location,max_price,transmission): 
        self.query = query
#        self.sort=sort
        self.location = location
#        self.postal = postal
        self.max_price = max_price
        self.transmission = auto_transmission


#https://sfbay.craigslist.org/search/cta?query=mazda+miata&sort=rel&max_price=6000&auto_transmission=1
        self.url = f"https://{location}.craigslist.org/search/cta?query={query}&sort=rel&max_price={max_price}&auto_transmission={transmission}"

        self.driver = webdriver.Chrome('/Users/MyLaptop/Desktop/chromedriver')
        self.delay = 5

    def load_craigslist_url(self): 
        self.driver.get(self.url)
        try:
            wait = WebDriverWait(self.driver, self.delay)
            wait.until(EC.presence_of_element_located((By.ID,"searchform")))              
            print("page is ready")
        except TimeoutError: 
            print('Loading took too much time')

    def extract_post_titles(self): 
        all_posts = self.driver.find_elements_by_class_name('result-row')
        post_titles_list=[]
        for post in all_posts: 
            print(post.text)
            post_titles_list.append(post.text)

    def extract_post_urls(self): 
        url_list = []
#        req = Request(self.url)
        html_page = urllib.request.urlopen(self.url)
        soup = BeautifulSoup(html_page,'lxml')
        for link in soup.findAll("a ", {"class": "result-title hrdlnk"}):
            print(link["href"])
            url_list.append(["href"])
            return url_list

    def quit(self): 
        self.driver.close()

location = "sfbay" 
#postal = "94519" 
max_price = "5000"
#radius = "250"
auto_transmission = 1
query = "Mazda Miata"

scraper = CraigslistScaper(query,location,max_price,auto_transmission)        

scraper.load_craigslist_url()
scraper.extract_post_titles()
scraper.extract_post_urls()
scraper.quit()

这是我得到的错误:

File "<ipython-input-2-edb38e647dc0>", line 1, in <module>
    runfile('/Users/MyLaptop/.spyder-py3/CraigslistScraper', wdir='/Users/MohitAsthana/.spyder-py3')

  File "/anaconda3/lib/python3.6/site-packages/spyder/utils/site/sitecustomize.py", line 705, in runfile
    execfile(filename, namespace)

  File "/anaconda3/lib/python3.6/site-packages/spyder/utils/site/sitecustomize.py", line 102, in execfile
    exec(compile(f.read(), filename, 'exec'), namespace)

  File "/Users/MyLaptop/.spyder-py3/CraigslistScraper", line 73, in <module>
    scraper.extract_post_urls()

  File "/Users/MyLaptop/.spyder-py3/CraigslistScraper", line 52, in extract_post_urls
    html_page = urllib.request.urlopen(req)

  File "/anaconda3/lib/python3.6/urllib/request.py", line 223, in urlopen
    return opener.open(url, data, timeout)

  File "/anaconda3/lib/python3.6/urllib/request.py", line 532, in open
    response = meth(req, response)

  File "/anaconda3/lib/python3.6/urllib/request.py", line 642, in http_response
    'http', request, response, code, msg, hdrs)

  File "/anaconda3/lib/python3.6/urllib/request.py", line 570, in error
    return self._call_chain(*args)

  File "/anaconda3/lib/python3.6/urllib/request.py", line 504, in _call_chain
    result = func(*args)

  File "/anaconda3/lib/python3.6/urllib/request.py", line 650, in http_error_default
    raise HTTPError(req.full_url, code, msg, hdrs, fp)

HTTPError: Bad Request

Chrome会打开正确的网址,但在我下载网址文件时会收到错误消息。

0 个答案:

没有答案