我是python的新手并使用Python 3.6.2并且我试图使用特定关键字从前2页抓取数据。到目前为止,我能够将数据导入Python IDLE窗口,但我在将数据导出为CSV时遇到了困难。我尝试过使用BeautifulSoup 4和pandas但是无法导出。这是迄今为止我所做的。任何帮助将非常感激。
import csv
import requests
from bs4 import BeautifulSoup
import pandas as pd
url = "http://www.amazon.in/s/ref=nb_sb_noss?url=search-
alias%3Dautomotive&field-
keywords=helmets+for+men&rh=n%3A4772060031%2Ck%3Ahelmets+for+men&ajr=0"
request = requests.get(url)
soup = BeautifulSoup(request.content, "lxml")
#filename = auto.csv
#with open(str(auto.csv,"r+","\n")) as csvfile:
#headers = "Count , Asin \n"
#fo.writer(headers)
for url in soup.find_all('li'):
Nand = url.get('data-asin')
#print(Nand)
Result = url.get('id')
#print(Result)
#d=(str(Nand), str(Result))
df=pd.Index(url.get_attribute('url'))
#with open("auto.txt", "w",newline='') as dumpfile:
#dumpfilewriter = csv.writer(dumpfile)
#for Nand in soup:
#value = Nand.__gt__
#if value:
#dumpfilewriter.writerows([value])
df.to_csv(dumpfile)
dumpfile.close()
csvfile.csv.writer("auto.csv," , ',' ,'|' , "\n")
答案 0 :(得分:0)
问题:帮我将变量“Nand”和“Result”的数据导出到csv文件
with open("auto.csv", 'w') as fh:
writer = csv.DictWriter(fh, fieldnames=['Nand', 'Result'])
writer.writeheader()
data = {}
for url in soup.find_all('li'):
data['Nand'] = url.get('data-asin')
data['Result'] = url.get('id')
writer.writerow(data)
使用Python测试:3.4.2
答案 1 :(得分:0)
我在网站请求中添加user-agent
以逃避自动阻止机器人。你有很多None
,因为你没有指定你想要的<li>
标签。我也将它添加到代码中。
import requests
from bs4 import BeautifulSoup
import pandas as pd
url = "http://www.amazon.in/s/ref=nb_sb_noss?url=search-alias%3Dautomotive&field-keywords=helmets+for+men&rh=n%3A4772060031%2Ck%3Ahelmets+for+men&ajr=0"
request = requests.get(url, headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'})
soup = BeautifulSoup(request.content, "lxml")
res = []
for url in soup.find_all('li', class_ = 's-result-item'):
res.append([url.get('data-asin'), url.get('id')])
df = pd.DataFrame(data=res, columns=['Nand', 'Result'])
df.to_csv('path/where/you/want/to/store/file.csv')
编辑:用于处理您需要构建一个生成网址的循环所需的所有网页,然后您将传递给主处理区块(您已经拥有)。查看此页面:http://www.amazon.in/s/ref=sr_pg_2?rh=n%3A4772060031%2Ck%3Ahelmets+for+men&page=2&keywords=helmets+for+men&ie=UTF8&qid=1501133688&spIA=B01N0MAT2E,B01MY1ZZDS,B01N0RMJ1H
。
EDIT_2 :让我们循环page
参数。您可以手动将page
添加到传递给requests.get()
的网址。
import requests
from bs4 import BeautifulSoup
import pandas as pd
base_url = "http://www.amazon.in/s/ref=sr_pg_2?rh=n%3A4772060031%2Ck%3Ahelmets+for+men&keywords=helmets+for+men&ie=UTF8"
#excluding page from base_url for further adding
res = []
for page in range(1,72): # such range is because last page for needed category is 71
request = requests.get(base_url + '&page=' + str(page), headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'}) # here adding page
if request.status_code == 404: #added just in case of error
break
soup = BeautifulSoup(request.content, "lxml")
for url in soup.find_all('li', class_ = 's-result-item'):
res.append([url.get('data-asin'), url.get('id')])
df = pd.DataFrame(data=res, columns=['Nand', 'Result'])
df.to_csv('path/where/you/want/to/store/file.csv')