我想从https://www.pixiv.net/下载图像,所以有点麻烦。必须登录才能开始从页面上抓取详细信息。当我尝试urllib.request.url检索它们时,出现403禁止错误。我在网上搜索了其他方法,但它们始终以403禁止错误结束
这是我要抓取的页面示例, https://www.pixiv.net/member_illust.php?mode=medium&illust_id=71751889
要开始抓取,甚至必须登录,如果不登录,您将找不到必要的元素。
import requests
import time
import urllib.request
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
def login(browser):
Log_In = browser.find_element_by_link_text('Login')
Log_In.click()
Username = browser.find_element_by_xpath("//*[@id='LoginComponent']/form/div[1]/div[1]/input")
Username.send_keys('') #input username
Password = browser.find_element_by_xpath("//*[@id='LoginComponent']/form/div[1]/div[2]/input")
Password.send_keys('') #input password
Login = browser.find_elements_by_tag_name('button')[1]
time.sleep(1)
Login.click()
def search(browser):
time.sleep(1)
searchbox = browser.find_element_by_id('suggest-input')
searchbox.send_keys('toyosatomimi no miko')
searchbox.send_keys(Keys.ENTER)
image = browser.find_element_by_class_name('_25taFA4')
image.click()
def get_soup(browser):
return BeautifulSoup(browser.page_source, 'lxml')
def download_image(soup, file_path):
url = soup.find_all('a', {'target': '_blank'})[1].get('href')
file_name = 'image'
full_path = file_path + file_name + '.jpg'
urllib.request.urlretrieve(url,full_path)
url = "https://www.pixiv.net/"
browser = webdriver.Chrome(r'D:\\chromedriver_win32\\chromedriver.exe')
browser.get(url)
login(browser)
search(browser)
soup = get_soup(browser)
browser.get(url)
soup = get_soup(browser)
download_image(soup, 'D:\\instagram_photos')
Traceback (most recent call last):
File "D:/pixiv scraper/venv/pixiv scrape.py", line 95, in <module>
download_image(soup, 'D:\\instagram_photos')
File "D:/pixiv scraper/venv/pixiv scrape.py", line 57, in download_image
urllib.request.urlretrieve(url,full_path)
File "C:\Users\HP\AppData\Local\Programs\Python\Python37-
32\lib\urllib\request.py", line 247, in urlretrieve
with contextlib.closing(urlopen(url, data)) as fp:
File "C:\Users\HP\AppData\Local\Programs\Python\Python37-
32\lib\urllib\request.py", line 222, in urlopen
return opener.open(url, data, timeout)
File "C:\Users\HP\AppData\Local\Programs\Python\Python37-
32\lib\urllib\request.py", line 531, in open
response = meth(req, response)
File "C:\Users\HP\AppData\Local\Programs\Python\Python37-
32\lib\urllib\request.py", line 641, in http_response
'http', request, response, code, msg, hdrs)
File "C:\Users\HP\AppData\Local\Programs\Python\Python37-
32\lib\urllib\request.py", line 569, in error
return self._call_chain(*args)
File "C:\Users\HP\AppData\Local\Programs\Python\Python37-
32\lib\urllib\request.py", line 503, in _call_chain
result = func(*args)
File "C:\Users\HP\AppData\Local\Programs\Python\Python37-
32\lib\urllib\request.py", line 649, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 403: Forbidden
这是我的代码,我想知道是否有人可以提供帮助?
答案 0 :(得分:0)
.urlretrieve()
没有像在硒浏览器中那样的cookie或会话(为什么要获取403),并且还需要设置用户代理。
def download_image(browser, file_path):
userAgent = browser.execute_script("return navigator.userAgent;")
seleniumCookies= browser.get_cookies()
cookies = ''
for cookie in seleniumCookies:
cookies += '%s=%s;' % (cookie['name'], cookie['value'])
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent', userAgent)]
opener.addheaders.append(('Cookie', cookies))
soup = get_soup(browser)
url = soup.find_all('a', {'target': '_blank'})[1].get('href')
file_name = 'image'
full_path = file_path + file_name + '.jpg'
urllib.request.urlretrieve(url,full_path)
url = "https://www.pixiv.net/"
browser = webdriver.Chrome(r'D:\\chromedriver_win32\\chromedriver.exe')
browser.get(url)
login(browser)
search(browser)
# you may need to WebDriverWait until search result appear
download_image(browser, 'D:\\instagram_photos')