我是python和BS4的新手,我想从特定网站上抓取新闻。
我的目标是根据今天的日期获取父URL的新闻,但是,当我尝试这样做时,它返回了一个空白的csv文件。请提供有关如何解决或改进的建议!预先感谢
这是我的代码:
from bs4 import BeautifulSoup
import requests, re, pprint
from datetime import date
import csv
today = date.today()
d2 = today.strftime("%B %d, %Y")
result = requests.get('https://www.spglobal.com/marketintelligence/en/news-insights/latest-news-headlines/')
soup = BeautifulSoup(result.content, "lxml")
urls =[]
titles = []
contents = []
#collect all links from 'latest news' into a list
for item in soup.find_all("a"):
url = item.get("href")
market_intelligence_pattern = re.compile("^/marketintelligence/en/news-insights/latest-news-headlines/.*")
if re.findall(market_intelligence_pattern, url):
if re.findall(market_intelligence_pattern, url)[0] == "/marketintelligence/en/news-insights/latest-news-headlines/index":
continue
else:
news = "https://www.spglobal.com/"+re.findall(market_intelligence_pattern, url)[0]
urls.append(news)
else:
continue
newfile = open('output.csv','w',newline='')
outputWriter = csv.writer(newfile)
#extract today's articles = format: date,title,content
for each in urls:
individual = requests.get(each)
soup2 = BeautifulSoup(individual.content, "lxml")
date = soup2.find("ul",class_="meta-data").text.strip() #getting the date
#print(date)
if d2 != date: #today's articles only
continue
else:
title = soup2.find("h2", class_="article__title").text.strip() #getting the title
titles.append(title)
#print(title)
precontent = soup2.find("div", class_="wysiwyg-content") #getting content
content = precontent.findAll("p")
indi_content = []
for i in content:
indi_content.append(i.text)
#contents.append(content)
outputWriter.writerow([date,title,indi_content])
答案 0 :(得分:0)
也许这会向正确的方向推动您:
from datetime import date
import requests
from bs4 import BeautifulSoup
result = requests.get('https://www.spglobal.com/marketintelligence/en/news-insights/latest-news-headlines/')
soup = BeautifulSoup(result.content, "lxml").find_all("a")
for item in soup:
if item['href'].startswith("/marketintelligence/en/news-insights/latest") and not item['href'].endswith("index"):
article_soup = BeautifulSoup(requests.get(f"https://spglobal.com{item['href']}").content, "lxml")
article_date = article_soup.find("li", {"class": "meta-data__date"})
if article_date.getText(strip=True) == str(date.today().strftime("%d %b, %Y")):
print(article_soup.find("h2", {"class": "article__title"}).getText(strip=True))
else:
continue
如果日期与今天的日期匹配,则会打印文章标题。
输出:
Houston, America's fossil fuel capital, braces for the energy transition
Blackstone to sell BioMed for $14.6B; Simon JV deal talks for J.C. Penney stall
Next mega-turbine is coming but 'the sky has a limit,' says MHI Vestas CEO