我正在尝试从https://www.seethroughny.net/payrolls/110681345抓取数据,但是该表很难处理。
我尝试了很多事情。
import pandas as pd
import ssl
import csv
ssl._create_default_https_context = ssl._create_unverified_context
calls_df = pd.read_html("https://www.seethroughny.net/payrolls/110681345", header=0)
print(calls_df)
calls_df.to_csv("calls.csv", index=False)
我想将其解析为一个csv文件,并将其与另一个数据集进行索引匹配。
答案 0 :(得分:2)
有一个包含html的json响应。似乎在所有结果循环版本的最后,随机点阻止了请求
单页版本,您可以将current_page
值更改为适当的页码。
import requests
import pandas as pd
from bs4 import BeautifulSoup as bs
url = 'https://www.seethroughny.net/tools/required/reports/payroll?action=get'
headers = {
'Accept' : 'application/json, text/javascript, */*; q=0.01' ,
'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8',
'User-Agent' : 'Mozilla/5.0',
'Referer' : 'https://www.seethroughny.net/payrolls/110681'
}
data = {
'PayYear[]' : '2018',
'BranchName[]' : 'Villages',
'SortBy' : 'YTDPay DESC',
'current_page' : '0',
'result_id' : '110687408',
'url' : '/tools/required/reports/payroll?action=get',
'nav_request' : '0'
}
r = requests.post(url, headers = headers, data = data).json()
soup = bs(r['html'], 'lxml')
results = []
for item in soup.select('tr:nth-child(odd)'):
row = [subItem.text for subItem in item.select('td')][1:]
results.append(row)
df = pd.DataFrame(results)
df.to_csv(r'C:\Users\User\Desktop\Data.csv', sep=',', encoding='utf-8-sig',index = False )
所有页面的版本(尽管有延迟,但当前请求正在进行中,可能无法在循环的不同点返回json)。 @sim换出用户代理的建议似乎有所改善。
import requests
import pandas as pd
from bs4 import BeautifulSoup as bs
import time
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
import random
ua = ['Mozilla/5.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.71 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko'
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
]
url = 'https://www.seethroughny.net/tools/required/reports/payroll?action=get'
headers = {
'Accept' : 'application/json, text/javascript, */*; q=0.01' ,
'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8',
'User-Agent' : 'Mozilla/5.0',
'Referer' : 'https://www.seethroughny.net/payrolls/110681'
}
data = {
'PayYear[]' : '2018',
'BranchName[]' : 'Villages',
'SortBy' : 'YTDPay DESC',
'current_page' : '0',
'result_id' : '110687408',
'url' : '/tools/required/reports/payroll?action=get',
'nav_request' : '0'
}
results = []
i = 0
with requests.Session() as s:
retries = Retry(total=5,
backoff_factor=0.1,
status_forcelist=[ 500, 502, 503, 504 ])
s.mount('http://', HTTPAdapter(max_retries=retries))
while len(results) < 1000: #total:
data['current_page'] = i
data['result_id'] = str(int(data['result_id']) + i)
try:
r = s.post(url, headers = headers, data = data).json()
except Exception as e:
print(e)
time.sleep(2)
headers['User-Agent'] = random.choice(ua)
r = s.post(url, headers = headers, data = data).json()
continue
soup = bs(r['html'], 'lxml')
for item in soup.select('tr:nth-child(odd)'):
row = [subItem.text for subItem in item.select('td')][1:]
results.append(row)
i+=1
@Sim的版本:
import requests
import pandas as pd
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
url = 'https://www.seethroughny.net/tools/required/reports/payroll?action=get'
headers = {
'User-Agent' : 'Mozilla/5.0',
'Referer' : 'https://www.seethroughny.net/payrolls/110681'
}
data = {
'PayYear[]' : '2018',
'BranchName[]' : 'Villages',
'SortBy' : 'YTDPay DESC',
'current_page' : '0',
'result_id' : '110687408',
'url' : '/tools/required/reports/payroll?action=get',
'nav_request' : '0'
}
results = []
i = 0
def get_content(i):
while len(results) < 15908:
print(len(results))
data['current_page'] = i
headers['User-Agent'] = ua.random
try:
r = requests.post(url, headers = headers, data = data).json()
except Exception:
time.sleep(1)
get_content(i)
soup = BeautifulSoup(r['html'], 'lxml')
for item in soup.select('tr:nth-child(odd)'):
row = [subItem.text for subItem in item.select('td')][1:]
results.append(row)
i+=1
if __name__ == '__main__':
ua = UserAgent()
get_content(i)