这将获得一个URL并将其以XML格式打印。
import requests
from bs4 import BeautifulSoup
url = r'''https://secure.shippingapis.com/ShippingAPI.dll?API=Verify&XML=
<AddressValidateRequest USERID="564WILLC0589"><Address><Address1>
2451 Avalon Ct</Address1><Address2></Address2><City>Aurora</City>
<State>IL</State><Zip5></Zip5><Zip4></Zip4></Address></AddressValidateRequest>'''
#get the webpage
response = requests.get(url)
#see if the URL has been correctly encoded
r_url = response.text
#parse the downloaded page to get a beautifulsoup object
new_xml = BeautifulSoup(r_url, features = "xml").prettify()
print(new_xml)
# prints
>>>
<?xml version="1.0" encoding="utf-8"?>
<AddressValidateResponse>
<Address>
<Address2>
2001 GARDNER CIR W
</Address2>
<City>
AURORA
</City>
<State>
IL
</State>
<Zip5>
60503
</Zip5>
<Zip4>
6213
</Zip4>
</Address>
</AddressValidateResponse>
>>>
但是,我有一个需要以XML格式打印的URL列表。使用此列表,如何一次将一项传递给request.get()? Link到文本文件。
#text file that has all the URLs
txtfile = r'C:\Users\jpilbeam\USPSAPIWCHDUpdateAll.txt'
#convert text file into a list
with open (txtfile) as f:
x = (list(map(str.strip ,f.readlines())))
答案 0 :(得分:1)
import requests
from bs4 import BeautifulSoup
# convert text file into a list
def file_to_list(file_name):
with open (file_name) as f:
return list(map(str.strip ,f.readlines()))
def scrape(url):
response = requests.get(url)
new_xml = BeautifulSoup(response.text, "html.parser")
print(new_xml)
txtfile = r'C:\Users\jpilbeam\USPSAPIWCHDUpdateAll.txt'
links = file_to_list(txtfile)
for link in links:
scrape(link )