我制作了一个脚本来抓取网站,以获取特定类别中的所有产品信息,但是当特定类别中只有3000项时,我的代码返回了500 000+行。
我也是Python的新手,所以可以提供任何帮助。
代码附在下面:
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 4 20:31:23 2019
@author:
"""
import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
import selenium.webdriver.support.ui as ui
import selenium.webdriver.support.expected_conditions as EC
from bs4 import BeautifulSoup
import os, sys
import time
from urllib.parse import urljoin
import pandas as pd
import re
import numpy as np
# base set up
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
options.add_argument('--ignore-ssl-errors')
os.chdir("C:/Users/user/desktop/scripts/python")
cwd = os.getcwd()
main_dir = os.path.abspath(os.path.join(cwd, os.pardir))
print('Main Directory:', main_dir)
chromedriver = ("C:/Users/user/desktop/scripts/python/chromedriver.exe")
os.environ["webdriver.chrome.driver"] = chromedriver
# browser = webdriver.Chrome(options=options, executable_path=chromedriver)
mainurl = "https://www.bunnings.com.au/our-range"
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'}
page = requests.get(mainurl, headers=headers)
soup = BeautifulSoup(page.content, 'html.parser')
# script start
subcat = []
for item in soup.findAll('ul', attrs={'class': 'chalkboard-menu'}):
links = item.find_all('a')
for link in links:
subcat.append(urljoin(mainurl, link.get("href")))
subcat
result = pd.DataFrame()
for adrs in subcat[0:1]:
# headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'}
# page = requests.get(adrs, headers=headers)
# soup = BeautifulSoup(page.content, 'html.parser')
# pagelink = adrs
# adrs="https://www.bunnings.com.au/our-range/storage-cleaning/cleaning/brushware-mops/indoor-brooms"
catProd = pd.DataFrame()
url = adrs
browser = webdriver.Chrome(options=options, executable_path=chromedriver)
browser.get(url)
lenOfPage = browser.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
match = False
while (match == False):
lastCount = lenOfPage
time.sleep(3)
lenOfPage = browser.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
if lastCount == lenOfPage:
match = True
reached= False
while (reached==False):
try:
browser.find_element_by_css_selector('#MoreProductsButton > span').click()
lenOfPage = browser.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
match = True
while (match == True):
lastCount = lenOfPage
time.sleep(3)
lenOfPage = browser.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
if lastCount == lenOfPage:
match = True
browser.find_element_by_css_selector('#content-layout_inside-anchor > div.search-result__content > div > div > section > div:nth-child(4) > div > div:nth-child(2) > div > button > div.view-more_btn_text').click()
except:
reached=True
# grab the items
page = browser.page_source
soup = BeautifulSoup(page, 'html.parser')
browser.close()
for article in soup.findAll('article', attrs={'class':'product-list__item hproduct special-order-product'}):
for product in article.findAll('img', attrs={'class': 'photo'}):
pName = product['alt']
pCat = adrs
pID = article['data-product-id']
temp= pd.DataFrame({'proID':[pID],'Product':[pName],'Category':[pCat]})
catProd=catProd.append(temp)
result = result.append(catProd)
time.sleep(3)
result.head()
#writes to CSV
writer = pd.ExcelWriter('test123123.xlsx')
result.to_excel(writer,'Sheet1')
writer.save()
代码需要花费大约20分钟的时间来遍历3000〜个项目,这在我看来是疯狂的,但主要问题仍然在于,当我仅需要3500行时,我得到的重复项和50万行太多了对于该特定类别。
答案 0 :(得分:0)
问题就在这里:
for product in article.findAll('img', attrs={'class': 'photo'}):
pName = product['alt']
pCat = adrs
pID = article['data-product-id']
temp= pd.DataFrame({'proID':[pID],'Product':[pName],'Category':[pCat]}) #<-------------- temp DataFrame
catProd=catProd.append(temp) #<------------ temp appending into catProd dataframe
result = result.append(catProd) #<----------- catProd appending into result DataFrame
基本上,您正在执行两次附加操作,该操作将获取temp
数据帧,并附加到catProd
数据帧...然后在追加到result
数据帧之后。因此,您的结果数据帧呈指数增长。
有几种方法可以解决此问题。一种是将result = result.append(temp)
移到该循环之外,这样,catProd
会在填充满result
之后追加到catProd
。或者,只需一起消除catProd
,然后继续附加到result
。
我还清理了几件事。就是重置数据框的索引,但不包括excel写操作中的索引。我还添加了显式等待(即,等待按钮显示),而不是time.sleep,这应该会加快一点速度。
下面的完整代码。别忘了更改for adrs in subcat[0:1]
,使它遍及整个列表。我刚通过第一个网址进行操作。
最后一件事是,我想办法计时了。刚浏览完第一个网址(共895个产品)并保存到csv后,Duration: 0 Hours, 02 Minutes, 48 Seconds
最后,我不得不注释掉os.chdir之类的一些东西,以便我可以运行它。因此,请不要忘记对这些内容进行注释。
import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
import selenium.webdriver.support.ui as ui
from selenium.webdriver.support.ui import WebDriverWait
import selenium.webdriver.support.expected_conditions as EC
from bs4 import BeautifulSoup
import os, sys
import time
from urllib.parse import urljoin
import pandas as pd
import re
import numpy as np
import datetime
# base set up
start_time = datetime.datetime.now()
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
options.add_argument('--ignore-ssl-errors')
#os.chdir("C:/Users/user/desktop/scripts/python")
#cwd = os.getcwd()
#main_dir = os.path.abspath(os.path.join(cwd, os.pardir))
#print('Main Directory:', main_dir)
chromedriver = ("C:/chromedriver_win32/chromedriver.exe")
os.environ["webdriver.chrome.driver"] = chromedriver
# browser = webdriver.Chrome(options=options, executable_path=chromedriver)
mainurl = "https://www.bunnings.com.au/our-range"
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'}
page = requests.get(mainurl, headers=headers)
soup = BeautifulSoup(page.content, 'html.parser')
# script start
subcat = []
for item in soup.findAll('ul', attrs={'class': 'chalkboard-menu'}):
links = item.find_all('a')
for link in links:
subcat.append(urljoin(mainurl, link.get("href")))
subcat
result = pd.DataFrame()
for adrs in subcat:
# headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'}
# page = requests.get(adrs, headers=headers)
# soup = BeautifulSoup(page.content, 'html.parser')
# pagelink = adrs
# adrs="https://www.bunnings.com.au/our-range/storage-cleaning/cleaning/brushware-mops/indoor-brooms"
catProd = pd.DataFrame()
url = adrs
browser = webdriver.Chrome(options=options, executable_path=chromedriver)
browser.get(url)
lenOfPage = browser.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
match = False
while (match == False):
lastCount = lenOfPage
#time.sleep(3)
lenOfPage = browser.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
if lastCount == lenOfPage:
match = True
reached= False
while (reached==False):
try:
wait = WebDriverWait(browser, 10)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#MoreProductsButton")))
browser.find_element_by_css_selector('#MoreProductsButton').click()
lenOfPage = browser.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
match = True
while (match == True):
lastCount = lenOfPage
#time.sleep(3)
lenOfPage = browser.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
if lastCount == lenOfPage:
match = True
#time.sleep(3)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.view-more_btn_text")))
browser.find_element_by_css_selector('#content-layout_inside-anchor > div.search-result__content > div > div > section > div:nth-child(4) > div > div:nth-child(2) > div > button > div.view-more_btn_text').click()
except:
reached=True
# grab the items
page = browser.page_source
soup = BeautifulSoup(page, 'html.parser')
browser.close()
for article in soup.findAll('article', attrs={'class':'product-list__item hproduct special-order-product'}):
for product in article.findAll('img', attrs={'class': 'photo'}):
pName = product['alt']
pCat = adrs
pID = article['data-product-id']
temp= pd.DataFrame({'proID':[pID],'Product':[pName],'Category':[pCat]})
#catProd=catProd.append(temp)
result = result.append(temp)
#time.sleep(3)
result.head()
result.reset_index(drop=True)
#writes to CSV
writer = pd.ExcelWriter('C:/test123123.xlsx')
result.to_excel(writer,'Sheet1', index=False)
writer.save()
finish_time = datetime.datetime.now()
duration = finish_time - start_time
dur_list = str(duration).split(':')
hour = dur_list[0]
minutes = dur_list[1]
seconds = dur_list[2].split('.')[0]
print ('Duration: %s Hours, %s Minutes, %s Seconds' %(hour, minutes, seconds))