我尝试使用以下代码对网站进行网页报废。
import http.client
from bs4 import BeautifulSoup
import urllib.request
from lxml.html import fromstring
from http.client import HTTPConnection #as _HTTPConnection, HTTPException
base_url = "https://apct.gov.in/apportal/Search/ViewAPVATDealers.aspx"
page = urllib.request.urlopen(base_url)
soup = BeautifulSoup(page, "html.parser")
path = fromstring(soup.decode('utf-8'))
header = {
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding":"gzip, deflate, br",
"Accept-Language":"en-US,en;q=0.9",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36",
}
url = "https://apct.gov.in/apportal/Search/ViewAPVATDealers.aspx"
form_data={}
form_data["__EVENTTARGET"] = ""
form_data["__EVENTARGUMENT"] = ""
form_data["__LASTFOCUS"] = ""
form_data["__VIEWSTATE"] = path.xpath('//*[@id="__VIEWSTATE"]/@value')
form_data["__EVENTVALIDATION"] = path.xpath('//*[@id="__EVENTVALIDATION"]/@value')
form_data["ctl00$ContentPlaceHolder1$dropact"] = "LT"
form_data["ctl00$ContentPlaceHolder1$Ddl_Divisions"] = "GUNTUR"
form_data["ctl00$ContentPlaceHolder1$Ddl_Circles"] = "All Circles"
form_data["ctl00$ContentPlaceHolder1$ddlbusines"] = "Agent"
conn = http.client.HTTPConnection('apct.gov.in')
url_params = urllib.parse.urlencode(header)
# 1 #
# conn.request("POST", url, url_params, header)
# response = conn.getresponse()
# print(response.status, response.reason)
# data = response.read()
# print(data)
# conn.close()
# 2 #
# r = requests.post(url,form_data,url_params)
# #import pdb; pdb.set_trace()
# print(r.status_code, r.reason)
当我运行第一个评论部分以检索响应时,它显示403 forbidden
,当我运行第二个评论部分时,它显示internal server error
。
任何人都可以在任何行中发现任何错误,因为我收到此错误。我知道有这么有限的信息很难识别错误,但这是我的最后选择。
提前谢谢。
答案 0 :(得分:1)
我必须知道我的代码中没有错误,相反,如果填充了这些特定值,网站会显示一些错误。