import scrapy
from scrapy.crawler import CrawlerProcess
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.proxy import Proxy, ProxyType
class FooSpider(scrapy.Spider):
name = 'foo'
start_urls = ["https://www.whatismybrowser.com/"]
index=1
def __init__(self, *args, **kwargs):
super(FooSpider, self).__init__(*args, **kwargs)
self.download_delay = 0.25
chrome_options = Options() # Initializing Chrome
#chrome_options.add_argument("--headless")
chrome_options.add_argument('--ignore-certificate-errors')
chrome_options.add_argument('--ignore-ssl-errors')
IP = '176.31.69.183' # random free proxy from net
PORT = 8080
prox = Proxy()
prox.proxy_type = ProxyType.MANUAL
prox.http_proxy = f'{IP}:{PORT}'
prox.socks_proxy = f'{IP}:{PORT}'
prox.ssl_proxy = f'{IP}:{PORT}'
capabilities = webdriver.DesiredCapabilities.CHROME
prox.add_to_capabilities(capabilities)
self.browser = webdriver.Chrome(executable_path="/home/timmy/Downloads/chromedriver",options=chrome_options, desired_capabilities=capabilities)
#self.browser.implicitly_wait(60) #
def parse(self,response):
self.browser.get(response.url)
data= self.random_data()
print(data)
process = CrawlerProcess({'LOG_LEVEL':'INFO',})
process.crawl(FooSpider)
spider = next(iter(process.crawlers)).spider
process.start()
这是我得到的错误
raise exception_class(message, screen, stacktrace)
selenium.common.exceptions.InvalidArgumentException: Message: invalid argument: cannot parse capability: proxy
from invalid argument: Specifying 'socksProxy' requires an integer for 'socksVersion'
我从SSlproxies获得了代理,并且正在尝试使用它,我正在使用此问题running-selenium-webdriver-with-a-proxy-in-python的答案,但是上面却出现了错误
我该如何解决?
答案 0 :(得分:0)
您正在使用旧的Chrome代理语法。这是设置没有身份验证的代理服务器的方法。
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--proxy-server=http://ipaddress:port')
driver = webdriver.Chrome(chrome_options=chrome_options)```