在python中第一次抓取后如何移动到第二页

时间:2019-02-10 15:19:09

标签: python selenium beautifulsoup

我正在尝试从速卖通刮下产品详细信息。我有两个问题。首先,如何抓取类别并将其保存在每个产品前面的csv文件中,其次,如何移动到第二页和其他页面,直到没有更多可用页面或直到第10页为止。

这是我为查找下一页而编写的代码

from bs4 import BeautifulSoup
import requests as r

page = r.get('https://www.aliexpress.com/category/200000664/jackets.html?spm=2114.11010108.102.4.650c649b8lfPOb')
soup = BeautifulSoup(page.content,'html.parser')

content = soup.find(id="pagination-bottom")

pages = content.findAll('a')

for i in pages:
    print('https:' + i.get('href'))

from selenium import webdriver 
from selenium.webdriver.support.ui import WebDriverWait 
from selenium.webdriver.support import expected_conditions as EC 
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException

from bs4 import BeautifulSoup import urllib.request 

filename = "alibaba.csv" 
f=open(filename, "w")

headers="product_name, price, Rating \n" 
f.write(headers)


class alibabascrape(object):
    def __init__(self, keyword):
        self.keyword = keyword

        self.url = f"https://www.aliexpress.com/wholesale?catId=0&initiative_id=&SearchText={keyword}"

        self.driver = webdriver.Firefox(executable_path = 'c:\geckodriver.exe')
        self.delay = 3

    def load_alibabalist_url(self):
        self.driver.get(self.url)
        try:
            wait = WebDriverWait(self.driver, self.delay)
            wait.until(EC.presence_of_all_elements_located((By.ID, "form-searchbar")))
            print("page is ready")
        except TimeoutException:
            print("Too much Time")

    def extract_post_information(self):
        all_posts = self.driver.find_elements_by_class_name("list-item")
        post_title_list = []
        for post in all_posts:
            title=post.text.split("\n")
            name=title[0]
            print(name)
            price=title[2]
            print(price)
            rating = title[6]
            print(rating)
            f.write(name + "," + price + "," + rating + "\n")
            post_title_list.append(post.text)

        return post_title_list



    def extract_category(self):
        category = self.driver.find_elements_by_class_name("col-sub")
        print(category)


    def extract_post_urls(self):
        url_list = []
        html_page = urllib.request.urlopen(self.url)
        soup = BeautifulSoup(html_page, "lxml")
        for link in soup.findAll("a", {"class": "history-item product"}):
            print(link["href"])
            url_list.append(link["href"])
        return url_list


keyword = "iphone"
scrapper = alibabascrape(keyword)
scrapper.load_alibabalist_url()
scrapper.extract_post_information()
scrapper.extract_category()
scrapper.extract_post_urls()

1 个答案:

答案 0 :(得分:0)

我可以帮助您进行分页:

  1. 如果获得所有引用链接,则只需使用for循环即可进行迭代 所有链接。
  2. 如果您只有上一页或下一页链接。然后使用while / do while循环检查链接是否存在,然后单击它。