.csv的数据重复三遍。我需要将三个不同的剪贴簿图导出到一个csv文件中

时间:2019-03-26 20:08:30

标签: python beautifulsoup

csv文件正在重复相同的信息。我需要从三个嵌入式来源发送到excel工作表的更新信息。有一些代码可以将href从基础页面链接到产品评论页面。另外,在检查来自spyder / anaconda python包的输出时,所有内容均已正确编码。因此,问题必须出在写函数之内。谁能帮忙吗?

我尝试使用a +和w / w +重新格式化代码,但似乎没有任何效果。

import requests as r
from bs4 import BeautifulSoup

#Get URL
main_url = 'http://drd.ba.ttu.edu/isqs6339/imbadproducts/'
response = r.get(main_url)

#Set filepaths
filepath = 'dataout1.csv'
filepath2 = 'dataout2.csv'

#Check for good link and get headers
print(response.status_code)
print (response.headers)

soup = BeautifulSoup(response.text, 'lxml')
print(soup.prettify())

#Find all anchors on the page
search_results = soup.find('div', attrs={'id' : 'searchresults'})
product_results = search_results.find_all('a')

#Define product link, id, title, price, and description for all products
for link in product_results:
    link_url = main_url + link.get('href')
    productId = link.find('span', attrs={'class' : 'productid'}).text
    product_title = link.find('span', attrs={'class' : 'producttitle'}).text
    product_price = link.find('span', attrs={'class' : 'productprice'}).text
    product_description = link.find('span', attrs={'class' : 'productdesc'}).text

    #Get links for each product
    response2 = r.get(link_url)
    soup2 = BeautifulSoup(response2.text, 'lxml')
    #Find each user review for the product on the page
    user_review = soup2.find('div', attrs={'id' : 'userreviews'})
    review_results = user_review.find_all('div')
    #Find author, stars, and review info for each review of the page's product and print results
    for rev in review_results:
        print ('ProductID: ' + productId)
        print ('Product Title: ' + product_title)
        print ('Product Price: ' + product_price)
        print('Product Description: ' + product_description)
        print ('User Review: ' )
        author = rev.find('span', attrs={'class' : 'rauthor'}).text
        print('Author: ' + author)
        stars = rev.find('span', attrs={'class' : 'rstars'}).text
        print('Stars: ' + stars)
        review_of_product = rev.find('span' , attrs={'class' : 'rtext'}).text
        print('Review: ' + review_of_product)
        review_length = len(review_of_product)
        print('Length: ')
        print(review_length)
        print('------------')

#Import CSV
import csv

#Open File 1 in CSV File
with open(filepath, 'w') as dataout:
    datawriter = csv.writer(dataout, delimiter= ',', quotechar= '"', quoting = csv.QUOTE_NONNUMERIC)
    headers = ['ProductId', 'Product Title', 'Product Price', 'Author', 'Stars', 'Length of Review']
    datawriter.writerow(headers)

    for link in product_results:
        productId = link.find('span', attrs={'class' : 'productid'}).text
        product_title = link.find('span', attrs={'class' : 'producttitle'}).text
        product_price = link.find('span', attrs={'class' : 'productprice'}).text
        for rev in review_results:    
            author = rev.find('span', attrs={'class' : 'rauthor'}).text
            stars = rev.find('span', attrs={'class' : 'rstars'}).text
            review_of_product = rev.find('span' , attrs={'class' : 'rtext'}).text
            datawriter.writerow([productId, product_title, product_price, author, stars, len(review_of_product)])

#Open File 2 in CSV File
with open(filepath2, 'w') as dataout2:
    datawriter = csv.writer(dataout2, delimiter= ',', quotechar= '"', quoting = csv.QUOTE_NONNUMERIC)
    headers = ['ProductId', 'Author', 'Stars', 'Review Text']
    datawriter.writerow(headers)

    for link in product_results:
        productId = link.find('span', attrs={'class' : 'productid'}).text
        for rev in review_results:    
            author = rev.find('span', attrs={'class' : 'rauthor'}).text
            stars = rev.find('span', attrs={'class' : 'rstars'}).text
            review_of_product = rev.find('span' , attrs={'class' : 'rtext'}).text
            datawriter.writerow([productId, author, stars, review_of_product])

1 个答案:

答案 0 :(得分:0)

您正在循环浏览每个链接的评论,但在循环中超出了链接。因此,基本上,您只需要保留这些评论的最后一次迭代即可。您需要在每个链接中循环浏览那些评论。因此,从本质上讲,您需要将其作为嵌套循环。

我还通过添加参数newline=''

解决了csv中跳过行的问题
import requests as r
from bs4 import BeautifulSoup
import csv

#Get URL
main_url = 'http://drd.ba.ttu.edu/isqs6339/imbadproducts/'
response = r.get(main_url)

#Set filepaths
filepath = 'dataout1.csv'
filepath2 = 'dataout2.csv'

#Check for good link and get headers
print(response.status_code)
print (response.headers)

soup = BeautifulSoup(response.text, 'lxml')
print(soup.prettify())

#Find all anchors on the page
search_results = soup.find('div', attrs={'id' : 'searchresults'})
product_results = search_results.find_all('a')


with open(filepath, 'w', newline = '') as dataout:
    datawriter = csv.writer(dataout, delimiter= ',', quotechar= '"', quoting = csv.QUOTE_NONNUMERIC)
    headers = ['ProductId', 'Product Title', 'Product Price', 'Author', 'Stars', 'Length of Review']
    datawriter.writerow(headers)
    #Define product link, id, title, price, and description for all products
    for link in product_results:
        link_url = main_url + link.get('href')
        productId = link.find('span', attrs={'class' : 'productid'}).text
        product_title = link.find('span', attrs={'class' : 'producttitle'}).text
        product_price = link.find('span', attrs={'class' : 'productprice'}).text
        product_description = link.find('span', attrs={'class' : 'productdesc'}).text

        response2 = r.get(link_url)
        soup2 = BeautifulSoup(response2.text, 'lxml')
        #Find each user review for the product on the page
        user_review = soup2.find('div', attrs={'id' : 'userreviews'})
        review_results = user_review.find_all('div')

        for rev in review_results:
                author = rev.find('span', attrs={'class' : 'rauthor'}).text
                stars = rev.find('span', attrs={'class' : 'rstars'}).text
                review_of_product = rev.find('span' , attrs={'class' : 'rtext'}).text
                datawriter.writerow([productId, product_title, product_price, author, stars, len(review_of_product)])




#Open File 2 in CSV File
with open(filepath2, 'w', newline = '') as dataout2:
    datawriter = csv.writer(dataout2, delimiter= ',', quotechar= '"', quoting = csv.QUOTE_NONNUMERIC)
    headers = ['ProductId', 'Author', 'Stars', 'Review Text']
    datawriter.writerow(headers)

    for link in product_results:
        link_url = main_url + link.get('href')
        productId = link.find('span', attrs={'class' : 'productid'}).text

        response2 = r.get(link_url)
        soup2 = BeautifulSoup(response2.text, 'lxml')
        #Find each user review for the product on the page
        user_review = soup2.find('div', attrs={'id' : 'userreviews'})
        review_results = user_review.find_all('div')

        for rev in review_results:    
            author = rev.find('span', attrs={'class' : 'rauthor'}).text
            stars = rev.find('span', attrs={'class' : 'rstars'}).text
            review_of_product = rev.find('span' , attrs={'class' : 'rtext'}).text
            datawriter.writerow([productId, author, stars, review_of_product])