我正在尝试从网站的所有页面中提取文章的网址。仅重复刮取第一页中的URL并将其存储在csv文件中。 来自这些链接的信息再次以相同的方式被删除并存储在文本文件中。
在这个问题上需要一些帮助。
import requests
from bs4 import BeautifulSoup
import csv
import lxml
import urllib2
base_url = 'https://www.marketingweek.com/?s=big+data'
response = requests.get(base_url)
soup = BeautifulSoup(response.content, "lxml")
res = []
while 1:
search_results = soup.find('div', class_='archive-constraint') #localizing search window with article links
article_link_tags = search_results.findAll('a') #ordinary scheme goes further
res.append([url['href'] for url in article_link_tags])
#Automatically clicks next button to load other articles
next_button = soup.find('a', text='>>')
#Searches for articles till Next button is not found
if not next_button:
break
res.append([url['href'] for url in article_link_tags])
soup = BeautifulSoup(response.text, "lxml")
for i in res:
for j in i:
print(j)
####Storing scraped links in csv file###
with open('StoreUrl1.csv', 'w+') as f:
f.seek(0)
for i in res:
for j in i:
f.write('\n'.join(i))
#######Extracting info from URLs########
with open('StoreUrl1.csv', 'rb') as f1:
f1.seek(0)
reader = csv.reader(f1)
for line in reader:
url = line[0]
soup = BeautifulSoup(urllib2.urlopen(url), "lxml")
with open('InfoOutput1.txt', 'a+') as f2:
for tag in soup.find_all('p'):
f2.write(tag.text.encode('utf-8') + '\n')
答案 0 :(得分:1)
使用lxml的html解析器的解决方案。
共有361页,每页有12个链接。我们可以迭代到每个页面并使用xpath提取链接。
xpath有助于获得:
特定标记的值(此处:'a'标记的'href'属性值)
import csv
from lxml import html
from time import sleep
import requests
from random import randint
outputFile = open("All_links.csv", r'wb')
fileWriter = csv.writer(outputFile)
fileWriter.writerow(["Sl. No.", "Page Number", "Link"])
url1 = 'https://www.marketingweek.com/page/'
url2 = '/?s=big+data'
sl_no = 1
#iterating from 1st page through 361th page
for i in xrange(1, 362):
#generating final url to be scraped using page number
url = url1 + str(i) + url2
#Fetching page
response = requests.get(url)
sleep(randint(10, 20))
#using html parser
htmlContent = html.fromstring(response.content)
#Capturing all 'a' tags under h2 tag with class 'hentry-title entry-title'
page_links = htmlContent.xpath('//div[@class = "archive-constraint"]//h2[@class = "hentry-title entry-title"]/a/@href')
for page_link in page_links:
fileWriter.writerow([sl_no, i, page_link])
sl_no += 1
答案 1 :(得分:0)
我尝试打印next_button
,它是None
。如果我们可以通过稍微修改网址直接转到特定页面,为什么我们需要下一个按钮的a
标记。在您的代码中,假设您找到了指向下一页的链接,但未获得text
返回的元素的soup.find('a',text=">>")
。这是您修改过的代码。让我知道它是否适合你。
import requests
from bs4 import BeautifulSoup
import csv
import pandas as pd
import urllib2
df = pd.DataFrame()
base_url = 'https://www.marketingweek.com/?s=big+data'
res = []
page = 1
while page < 362:
page_url = 'https://www.marketingweek.com/page/'+str(page)+'/?s=big+data'
response = requests.get(page_url)
soup = BeautifulSoup(response.content, "html.parser")
search_results = soup.find('div', class_='columns-flex full-ads') #localizing search window with article links
try:
article_link_tags = search_results.findAll('a') #ordinary scheme goes further
except AttributeError:
continue
res.append([url['href'] for url in article_link_tags])
print('Found {} links on page {} '.format(len(res[page-1]), page))
df.append(res[page-1])
#for i in res:
# for j in i:
# print(j)
page += 1
# break
####Storing scraped links in csv file###
df.to_csv('SampleUrl.csv', index=False)
答案 2 :(得分:0)
我只治愈了网址收集部分:
import requests
from bs4 import BeautifulSoup
next_button = 'https://www.marketingweek.com/page/1/?s=big+data'
res = []
while 1:
response = requests.get(next_button)
soup = BeautifulSoup(response.text, "lxml")
search_results = soup.find('div', class_='archive-constraint') #localizing search window with article links
article_link_tags = search_results.findAll('a') #ordinary scheme goes further
#added duplication drop from list of urls
row = [url['href'] for url in article_link_tags]
row = list(set(row))
res.append(row)
#Automatically clicks next button to load other articles
next_button = soup.find('a', class_='next page-numbers')
#Searches for articles till Next button is not found
if not next_button:
break
next_button = next_button['href']
for i in res:
for j in i:
print(j)
编辑:第一版代码在网站上面临解析器僵尸程序阻塞。 下面的这些修复可能帮助(100%保证您需要了解机器人阻止程序在网站上的工作原理)。我在5到30秒内添加了随机sleep
,并在对站点的请求中随机user-agent
更改(仅限桌面用户代理)以跳过阻止:
import numpy as np
import time
import requests
from bs4 import BeautifulSoup
user_agents = {0:
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246',
1:
'Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36',
2:
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9',
3:
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36',
4:
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) Gecko/20100101 Firefox/15.0.1'}
next_button = 'https://www.marketingweek.com/page/1/?s=big+data'
res = []
while 1:
response = requests.get(next_button, headers={'User-Agent':user_agents[np.random.randint(0,5)]})
soup = BeautifulSoup(response.text, "lxml")
search_results = soup.find('div', class_='columns-flex full-ads') #localizing search window with article links
article_link_tags = search_results.findAll('a') #ordinary scheme goes further
#added duplication drop from list of urls
row = [url['href'] for url in article_link_tags]
row = list(set(row))
res.append(row)
#Automatically clicks next button to load other articles
next_button = soup.find('a', class_='next page-numbers')
#Searches for articles till Next button is not found
if not next_button:
break
next_button = next_button['href']
time.sleep((30 - 5) * np.random.random() + 5)
for i in res:
for j in i:
print(j)
如果在一段时间后仍然失败,请尝试将sleep
延长两倍或更长。