如何从多个网站页面将抓取的结果保存到CSV文件中?

时间:2020-01-15 11:58:02

标签: python selenium web-scraping

我正在尝试使用硒和beautifulsoup从亚马逊网站(仅ASIN)抓取一些ASIN(比如说600个ASIN)。我的主要问题是如何将所有抓取的数据保存到CSV文件中?我尝试了一些操作,但它只保存了最后一个抓取的页面。

代码如下:

from time import sleep
import requests
import time
import json
import re
import sys
import numpy as np
from selenium import webdriver
import urllib.request
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.keys import Keys
import pandas as pd
from urllib.request import urlopen


i = 1
while(True):
    try:
        if i == 1:
            url = "https://www.amazon.es/s?k=doll&i=toys&rh=n%3A599385031&dc&page=1"
        else:
            url = "https://www.amazon.es/s?k=doll&i=toys&rh=n%3A599385031&dc&page={}".format(i)
        r = requests.get(url)
        soup = BeautifulSoup(r.content, 'html.parser')

        #print page url
        print(url)

        #rest of the scraping code
        driver = webdriver.Chrome()
        driver.get(url)

        HTML = driver.page_source
        HTML1=driver.page_source
        soup = BeautifulSoup(HTML1, "html.parser")
        styles = soup.find_all(name="div", attrs={"data-asin":True})
        res1 = [i.attrs["data-asin"] for i in soup.find_all("div") if i.has_attr("data-asin")]
        print(res1)
        data_record.append(res1)
        #driver.close()

        #don't overflow website
        sleep(1)

        #increase page number
        i += 1
        if i == 3:
            print("STOP!!!")
            break
    except:
        break



1 个答案:

答案 0 :(得分:1)

删除可能暂时不可用的项目

import csv
import bs4
import requests
from selenium import webdriver
from time import sleep


def retrieve_asin_from(base_url, idx):
    url = base_url.format(idx)
    r = requests.get(url)
    soup = bs4.BeautifulSoup(r.content, 'html.parser')

    with webdriver.Chrome() as driver:
        driver.get(url)
        HTML1 = driver.page_source
        soup = bs4.BeautifulSoup(HTML1, "html.parser")
        res1 = [i.attrs["data-asin"]
                for i in soup.find_all("div") if i.has_attr("data-asin")]
    sleep(1)
    return res1


url = "https://www.amazon.es/s?k=doll&i=toys&rh=n%3A599385031&dc&page={}"
data_record = [retrieve_asin_from(url, i) for i in range(1, 4)]

combined_data_record = combine_records(data_record) # fcn to write

with open('asin_data.csv', 'w', newline='') as fd:
    csvfile = csv.writer(fd)
    csvfile.writerows(combined_data_record)