import requests
from bs4 import BeautifulSoup as bs
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36"}
url = "https://www.proxyscan.io/"
r=requests.get(url,headers=headers)
soup = bs(r.content,"html.parser")
a = soup.findAll(scope="row")
a = str(a).replace("<th scope=\"row\">", "").replace("</th>", "").replace("[","").replace("]","").replace(" ","")
a = a.split(",")
for proxy in a:
print(proxy)
答案 0 :(得分:1)
import requests
from bs4 import BeautifulSoup as bs
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36"}
url = "https://www.proxyscan.io/"
r=requests.get(url,headers=headers)
soup = bs(r.content,"html.parser")
a = soup.findAll(scope="row")
a = str(a).replace("<th scope=\"row\">", "").replace("</th>", "").replace("[","").replace("]","").replace(" ","")
a = a.split(",")
for proxy in a:
print(proxy)
答案 1 :(得分:0)
您可以使用find_next_siblings()
函数来获取下一个可用标签。
因此,通过敏锐地观察已解析的html,我们可以看到该端口是代理之后的下一个标记。因此,我们可以遍历变量a
并找到下一个相邻标签。
从find_next_siblings()
返回的数组中获取第一个元素。
这将是这样的<td>4145</td>
。从中清除html标签或从td
中提取字符串,您应该获取端口号。
for i in a:
full = i.find_next_siblings()[0]
port = str(full).replace("<td>","")
port = str(port).replace("</td>", "")
print(port)