我一直在尝试抓取news-site的某些内容 例如新闻描述,标签,评论等。成功完成了描述和标签。但是,尽管刮擦注释,但beautifulsoup在按标签找到标签后仍未显示标签,尽管我是否在检查页面也会显示标签。
我只想抓取页面中的所有注释(也包括嵌套注释),并使其成为单个字符串以保存到csv文件中。
import requests
import bs4
from time import sleep
import os
url = 'https://www.prothomalo.com/bangladesh/article/1573772/%E0%A6%AC%E0%A6%BE%E0%A6%82%E0%A6%B2%E0%A6%BE%E0%A6%A6%E0%A7%87%E0%A6%B6%E0%A6%BF-%E0%A6%AA%E0%A6%BE%E0%A6%B8%E0%A6%AA%E0%A7%8B%E0%A6%B0%E0%A7%8D%E0%A6%9F%E0%A6%A7%E0%A6%BE%E0%A6%B0%E0%A7%80-%E0%A6%B0%E0%A7%8B%E0%A6%B9%E0%A6%BF%E0%A6%99%E0%A7%8D%E0%A6%97%E0%A6%BE%E0%A6%B0%E0%A6%BE-%E0%A6%B8%E0%A7%8C%E0%A6%A6%E0%A6%BF-%E0%A6%A5%E0%A7%87%E0%A6%95%E0%A7%87-%E0%A6%A2%E0%A6%BE%E0%A6%95%E0%A6%BE%E0%A7%9F'
resource = requests.get(url, timeout = 3.0)
soup = bs4.BeautifulSoup(resource.text, 'lxml')
# working as expected
tags = soup.find('div', {'class':'topic_list'})
tag = ''
tags = tags.findAll('a', {'':''})
for t in range(len(tags)):
tag = tag + tags[t].text + '|'
# working as expected
content_tag = soup.find('div', {'itemprop':'articleBody'})
content_all = content_tag.findAll('p', {'':''})
content = ''
for c in range(len(content_all)):
content = content + content_all[c].text
# comments not found
comment = soup.find('div', {'class':'comments_holder'})
print(comment)
控制台:
<div class="comments_holder">
<div class="comments_holder_inner">
<div class="comments_loader"> </div>
<ul class="comments_holder_ul latest">
</ul>
</div>
</div>
答案 0 :(得分:0)
您在Firefox / Developer工具中看到的不是通过requests
获得的。注释通过AJAX分别加载,并且为JSON格式。
import re
import json
import requests
from bs4 import BeautifulSoup
url = 'https://www.prothomalo.com/bangladesh/article/1573772/%E0%A6%AC%E0%A6%BE%E0%A6%82%E0%A6%B2%E0%A6%BE%E0%A6%A6%E0%A7%87%E0%A6%B6%E0%A6%BF-%E0%A6%AA%E0%A6%BE%E0%A6%B8%E0%A6%AA%E0%A7%8B%E0%A6%B0%E0%A7%8D%E0%A6%9F%E0%A6%A7%E0%A6%BE%E0%A6%B0%E0%A7%80-%E0%A6%B0%E0%A7%8B%E0%A6%B9%E0%A6%BF%E0%A6%99%E0%A7%8D%E0%A6%97%E0%A6%BE%E0%A6%B0%E0%A6%BE-%E0%A6%B8%E0%A7%8C%E0%A6%A6%E0%A6%BF-%E0%A6%A5%E0%A7%87%E0%A6%95%E0%A7%87-%E0%A6%A2%E0%A6%BE%E0%A6%95%E0%A6%BE%E0%A7%9F'
comment_url = 'https://www.prothomalo.com/api/comments/get_comments_json/?content_id={}'
article_id = re.findall(r'article/(\d+)', url)[0]
comment_data = requests.get(comment_url.format(article_id)).json()
print(json.dumps(comment_data, indent=4))
打印:
{
"5529951": {
"comment_id": "5529951",
"parent": "0",
"label_depth": "0",
"commenter_name": "MD Asif Iqbal",
"commenter_image": "//profiles.prothomalo.com/profile/999009/picture/",
"comment": "\u098f\u0987 \u09ad\u09be\u09b0 \u09ac\u09be\u0982\u09b2\u09be\u09a6\u09c7\u09b6\u0995\u09c7 \u09b8\u09be\u09b0\u09be\u099c\u09c0\u09ac\u09a8 \u09ac\u09b9\u09a8 \u0995\u09b0\u09a4\u09c7 \u09b9\u09ac\u09c7",
"create_time": "2019-01-08 19:59",
"comment_status": "published",
"like_count": "\u09e6",
"dislike_count": "\u09e6",
"like_me": null,
"dislike_me": null,
"device": "phone",
"content_id": "1573772"
},
"5529952": {
"comment_id": "5529952",
"parent": "0",
... and so on.