我的目标是抓取多个配置文件链接,然后在每个配置文件上抓取特定数据。
这是我的获取多个个人资料链接的代码(应该可以正常工作):
from bs4 import BeautifulSoup
from requests_html import HTMLSession
import re
session = HTMLSession()
r = session.get('https://www.khanacademy.org/computing/computer-science/algorithms/intro-to-algorithms/v/what-are-algorithms')
r.html.render(sleep=5)
soup=BeautifulSoup(r.html.html,'html.parser')
profiles = soup.find_all(href=re.compile("/profile/kaid"))
for links in profiles:
links_no_list = links.extract()
text_link = links_no_list['href']
text_link_nodiscussion = text_link[:-10]
final_profile_link ='https://www.khanacademy.org'+text_link_nodiscussion
print(final_profile_link)
现在这是我的代码,用于仅获取一个配置文件中的特定数据(它也可以正常工作):
from bs4 import BeautifulSoup
from requests_html import HTMLSession
session = HTMLSession()
import re
r = session.get('https://www.khanacademy.org/profile/Kkasparas/')
r.html.render(sleep=5)
soup=BeautifulSoup(r.html.html,'html.parser')
user_info_table=soup.find('table', class_='user-statistics-table')
if user_info_table is not None:
dates,points,videos=[tr.find_all('td')[1].text for tr in user_info_table.find_all('tr')]
else:
dates=points=videos='NA'
user_socio_table=soup.find_all('div', class_='discussion-stat')
data = {}
for gettext in user_socio_table:
category = gettext.find('span')
category_text = category.text.strip()
number = category.previousSibling.strip()
data[category_text] = number
full_data_keys=['questions','votes','answers','flags raised','project help requests','project help replies','comments','tips and thanks']
for header_value in full_data_keys:
if header_value not in data.keys():
data[header_value]='NA'
user_calendar = soup.find('div',class_='streak-calendar-scroll-container')
if user_calendar is not None:
#for getdate in user_calendar:
last_activity = user_calendar.find('span',class_='streak-cell filled')
last_activity_date = last_activity['title']
#print(last_activity)
#print(last_activity_date)
else:
last_activity_date='NA'
filename = "khanscraptry1.csv"
f = open(filename, "w")
headers = "date_joined, points, videos, questions, votes, answers, flags, project_request, project_replies, comments, tips_thx, last_date\n"
f.write(headers)
f.write(dates + "," + points.replace("," , "") + "," + videos + "," + data['questions'] + "," + data['votes'] + "," + data['answers'] + "," + data['flags raised'] + "," + data['project help requests'] + "," + data['project help replies'] + "," + data['comments'] + "," + data['tips and thanks'] + "," + last_activity_date + "\n")
f.close()
我的问题是:如何使脚本自动化? 换句话说:如何合并这两个脚本?
目标是创建一种每次都会成为不同配置文件链接的变量。
然后为每个配置文件链接获取特定数据,然后将其放入csv文件(每个配置文件的新行)。
答案 0 :(得分:1)
这样做非常直接。我不是打印配置文件链接,而是将它们存储到list variable。然后循环遍历list变量以抓取每个链接,然后写入 csv 文件。某些页面没有全部详细信息,因此您也必须handle例外。在下面的代码中,我按照您的代码中使用的约定将它们也标记为“ NA”。未来的另一注记是考虑使用python内置的 csv module 读取和写入csv文件。
合并脚本
from bs4 import BeautifulSoup
from requests_html import HTMLSession
import re
session = HTMLSession()
r = session.get('https://www.khanacademy.org/computing/computer-science/algorithms/intro-to-algorithms/v/what-are-algorithms')
r.html.render(sleep=5)
soup=BeautifulSoup(r.html.html,'html.parser')
profiles = soup.find_all(href=re.compile("/profile/kaid"))
profile_list=[]
for links in profiles:
links_no_list = links.extract()
text_link = links_no_list['href']
text_link_nodiscussion = text_link[:-10]
final_profile_link ='https://www.khanacademy.org'+text_link_nodiscussion
profile_list.append(final_profile_link)
filename = "khanscraptry1.csv"
f = open(filename, "w")
headers = "date_joined, points, videos, questions, votes, answers, flags, project_request, project_replies, comments, tips_thx, last_date\n"
f.write(headers)
for link in profile_list:
print("Scrapping ",link)
session = HTMLSession()
r = session.get(link)
r.html.render(sleep=5)
soup=BeautifulSoup(r.html.html,'html.parser')
user_info_table=soup.find('table', class_='user-statistics-table')
if user_info_table is not None:
dates,points,videos=[tr.find_all('td')[1].text for tr in user_info_table.find_all('tr')]
else:
dates=points=videos='NA'
user_socio_table=soup.find_all('div', class_='discussion-stat')
data = {}
for gettext in user_socio_table:
category = gettext.find('span')
category_text = category.text.strip()
number = category.previousSibling.strip()
data[category_text] = number
full_data_keys=['questions','votes','answers','flags raised','project help requests','project help replies','comments','tips and thanks']
for header_value in full_data_keys:
if header_value not in data.keys():
data[header_value]='NA'
user_calendar = soup.find('div',class_='streak-calendar-scroll-container')
if user_calendar is not None:
last_activity = user_calendar.find('span',class_='streak-cell filled')
try:
last_activity_date = last_activity['title']
except TypeError:
last_activity_date='NA'
else:
last_activity_date='NA'
f.write(dates + "," + points.replace("," , "") + "," + videos + "," + data['questions'] + "," + data['votes'] + "," + data['answers'] + "," + data['flags raised'] + "," + data['project help requests'] + "," + data['project help replies'] + "," + data['comments'] + "," + data['tips and thanks'] + "," + last_activity_date + "\n")
f.close()
khanscraptry1.csv的示例输出
date_joined, points, videos, questions, votes, answers, flags, project_request, project_replies, comments, tips_thx, last_date
6 years ago,1527829,1123,25,100,2,0,NA,NA,0,0,Saturday Jun 4 2016
6 years ago,1527829,1123,25,100,2,0,NA,NA,0,0,Saturday Jun 4 2016
6 years ago,3164708,1276,164,2793,348,67,16,3,5663,885,Wednesday Oct 31 2018
6 years ago,3164708,1276,164,2793,348,67,16,3,5663,885,Wednesday Oct 31 2018
NA,NA,NA,18,NA,0,0,NA,NA,0,NA,Monday Dec 24 2018
NA,NA,NA,18,NA,0,0,NA,NA,0,NA,Monday Dec 24 2018
5 years ago,240334,56,7,42,6,0,2,NA,12,2,Tuesday Nov 20 2018
5 years ago,240334,56,7,42,6,0,2,NA,12,2,Tuesday Nov 20 2018
...