如何使用美丽的汤添加到python中的href

时间:2019-10-09 15:39:19

标签: python selenium web-scraping beautifulsoup

我有一个脚本,它会抓取网站并测试所找到的网站中的所有链接。 我的问题是,当遇到带有双斜杠的链接(例如//us.cnn.com)时,我的脚本将失败。

这是我的脚本失败的代码:

elif "//" in link.get('href'):
    link = "http:" + str(link)
    print("tested link is: " + link)
    driver = webdriver.Chrome(
        '/home/ironmantis7x/PycharmProjects/WebScrapper/chromedriver')
    #driver.get(link)
    #driver.get(str(link))
    driver.get(link.get('href'))

我要做的是遇到带有“ //”(双斜杠)的链接时,我只想发送带有http:的链接,以便硒打开该完整链接({{3 }})。

如何正确完成此操作?

如果需要参考,这里是完整的脚本。

from bs4 import BeautifulSoup
from selenium import webdriver
import requests
import sys
import time
from datetime import date
from datetime import datetime
import datetime

# chrome browser control options
options = webdriver.ChromeOptions()
options.add_argument('headless')
# options.add_argument('--ignore-certificate-errors')
# options.add_argument("--test-type")
options.binary_location = "/usr/bin/google-chrome"  # <--- needed actual path to chrome browser
# hard set path to chromedriver in project
# driver = webdriver.Chrome('/home/ironmantis7x/Documents/BSSLLC/projects/PycharmProjects/WebScrapper/chromedriver')

# system time for time/date stamping
now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')

# fetching url to test
url = raw_input("Enter a website to extract the URL's from: ")

r = requests.get("http://" + url)

data = r.text

# soup = BeautifulSoup(data)
soup = BeautifulSoup(data, 'html.parser')

validURL = 0
validChildURL = 0
invalidURL = 0

for link in soup.find_all('a'):
    if "http" in link.get('href'):
        driver = webdriver.Chrome(
            '/home/ironmantis7x/PycharmProjects/WebScrapper/chromedriver')
        driver.get(link.get('href'))
        print(driver.title)
        with open('valid_link_2.txt', 'a') as f:
            print >> f, 'link:', link.get('href')
            print(link.get('href'))
            driver.get(link.get('href'))
            driver.quit()
            validURL = validURL + 1
    elif "https" in link.get('href'):
        driver = webdriver.Chrome(
            '/home/ironmantis7x/PycharmProjects/WebScrapper/chromedriver')
        driver.get(link.get('href'))
        print(driver.title)
        with open('valid_link_2.txt', 'a') as f:
            print >> f, 'link:', link.get('href')
            print(link.get('href'))
            driver.get(link.get('href'))
            driver.quit()
            validURL = validURL + 1
    elif "//" in link.get('href'):
        link = "http:" + str(link)
        print("tested link is: " + link)
        driver = webdriver.Chrome(
            '/home/ironmantis7x/PycharmProjects/WebScrapper/chromedriver')
        driver.get(link.get('href'))
        print(driver.title)
        with open('valid_link.txt', 'a') as f:
            print >> f, 'link:', link.get('href')
            print(link.get('href'))
            driver.get(link.get('href'))
            driver.quit()
            validURL = validURL + 1

    else:
        print(link.get('href') + " is an invalid link")
        with open('invalid_link.txt', 'a') as f:
            print >> f, link.get('href')
            print(link.get('href'))
            driver = webdriver.Chrome('/home/ironmantis7x/PycharmProjects/WebScrapper/chromedriver',
                                      chrome_options=options)
            driver.quit()
            invalidURL = invalidURL + 1

1 个答案:

答案 0 :(得分:1)

您可以使用检查所有链接,而无需使用硒。
为了解析和解析URL,我使用了requests.utils.urlparserequests.utils.urlunparse
为了检查URL是否有效,我检查了页面是否有标题。

import requests
from bs4 import BeautifulSoup

valid_urls = []
invalid_urls = []

response = requests.get("http://cnn.com/us")
print(f"base url: %s", response.url)

# scheme, netloc, path, params, query, fragment
parsed_base_url = requests.utils.urlparse(response.url)
base_scheme = parsed_base_url.scheme
base_netloc = parsed_base_url.netloc

page = BeautifulSoup(response.text, 'html5lib')
urls = [a.get("href") for a in page.select("a[href]")]
for url in urls:
    # scheme, netloc, path, params, query, fragment
    parsed_url = requests.utils.urlparse(url)
    scheme = base_scheme if parsed_url.scheme == '' else parsed_url.scheme
    netloc = base_netloc if parsed_url.netloc == '' else parsed_url.netloc

    new_url_components = (scheme, netloc, parsed_url.path, '', '', '')
    new_url = requests.utils.urlunparse(new_url_components)
    title = ''
    try:
        response = requests.get(new_url)
        title = BeautifulSoup(response.text, 'html5lib').select_one("title")
    except:
        pass

    if title != '':
        print(f"VALID: {title.text.strip()}", new_url)
        valid_urls.append(new_url)
    else:
        print(f"INVALID: {new_url}")
        invalid_urls.append(new_url)

print(f"Valid links count: {len(valid_urls)}")
print(f"Invalid links count: {len(invalid_urls)}")