我从网站上获取一张包含公司名称和注册号的表格。该表中的问题是,它不包含相关公司的地址。现在我的工作是使用公司名称,我必须从该网站收集地址。我有100个公司名称。无法为每个公司手动执行此操作。所以我找到了一种方法,它可以自动执行。这是我的代码。但这并不能带来我所要求的结果。
from bs4 import BeautifulSoup as BS
import requests
from googlesearch import search
from googleapiclient.discovery import build
import re
companylist = ['ARTA Management für das Handwerk GmbH + Co.', "aktive
Stuttgarter", 'ABEX Dachdecker Handwerks-GmbH',
'Academie für Kunst und Handwerk e.V.', 'AHA Agentur fürs Handwerk GmbH']
for com in companylist:
url_list = []
for url in search(com, tld='de', lang='de', stop=5):
url_list.append(url)
webSite = requests.get(url_list[0]+'kontakt')
if webSite.status_code == 200:
soup = BS(webSite.content, 'html.parser')
string = ''
for line in soup.findAll('p'):
string = string + line.text + ' '
match = re.search(r'\s\w+\s\Straße\s\w+\s\w+\s\w+\s', string)
print(match.group())
else:
url_list = []
for url in search(com, tld='de', lang='de', stop=5):
url_list.append(url)
for link in soup.find_all('a'):
if (link.get('href').endswith('kontakt/')):
webSite = requests.get((link.get('href')))
if webSite.status_code == 200:
soup = BS(webSite.content, 'html.parser')
string = ''
for line in soup.findAll('p'):
string = string + line.text + ' '
match = re.search(r'\s\w+\s\Straße\s\w+\s\w+\s\w+\s', string)
代码的小描述。首先,我把公司名称放在Google搜索上,它带回5个链接。我使用第一个链接,然后转到主页并找到联系人链接。然后,我进入联系页面并获取所有文本,然后从那里尝试使用“ re”查找公司地址。它无法正常工作。请给我一些建议。提前致谢。
答案 0 :(得分:0)
由于没有找到所有网站的地址,因此您必须计算出一些位。但是我一直在循环。另外,某些请求得到404响应。
我花了一些时间让它重试不同的搜索结果。也许不是您想要的东西,但是也许会给您一些需要的东西。我想至少,您可以找到多个地址,也许只剩下少数几个需要回去手动查找:
from bs4 import BeautifulSoup as BS
import requests
from googlesearch import search
from googleapiclient.discovery import build
import re
companylist = ['ARTA Management für das Handwerk GmbH + Co.', "aktive Stuttgarter", 'ABEX Dachdecker Handwerks-GmbH',
'Academie für Kunst und Handwerk e.V.', 'AHA Agentur fürs Handwerk GmbH']
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'}
not_found = []
for com in companylist:
url_list = []
for url in search(com, tld='de', lang='de', stop=50):
url_list.append(url)
found_contact = False
for url2 in url_list:
next_url = False
while found_contact == False and next_url == False:
try:
webSite = requests.get(url2+'kontakt', headers=headers)
if webSite.status_code == 200:
soup = BS(webSite.content, 'html.parser')
string = ''
for line in soup.findAll('p'):
string = string + line.text + ' '
match = re.search(r'\s\w+\s\Straße\s\w+\s\w+\s\w+\s', string)
try:
print('Found: %s\n%s' %(com,match.group().strip()),'\n')
found_contact = True
next_url = True
if com in not_found:
del not_found[com]
continue
except:
print('Attempt: %s of %s: Address not found for: %s' %(url_list.index(url2)+1, len(url_list),com))
next_url = True
if com not in not_found:
not_found.append(com)
else:
next_url = True
print('Attempt: %s of %s: Address not found for: %s' %(url_list.index(url2)+1, len(url_list),com))
except:
next_url = True
print('Attempt: %s of %s: Address not found for: %s' %(url_list.index(url2)+1, len(url_list),com))
Additioanl:
不是最快的方法,但是您可以使用Selenium自动查找这些公司。
from bs4 import BeautifulSoup as BS
from selenium import webdriver
import requests
url = 'https://www.firmenwissen.de/index.html'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'}
driver = webdriver.Chrome('C:/chromedriver_win32/chromedriver.exe')
driver.get(url)
companylist = ['ARTA Management für das Handwerk GmbH + Co.', "aktive Stuttgarter", 'ABEX Dachdecker Handwerks-GmbH',
'Academie für Kunst und Handwerk e.V.', 'AHA Agentur fürs Handwerk GmbH']
error = []
for company in companylist:
inputElement = driver.find_element_by_id("searchPhrase0")
inputElement.clear()
inputElement.send_keys(company)
inputElement.submit()
soup = BS(driver.page_source, 'html.parser')
link = soup.find('span',{'class':'company--name'})
a_link = link.find('a')['href']
response = requests.get('https://www.firmenwissen.de' + a_link + '?showEmail=true', headers=headers)
alpha_soup = BS(response.text, 'html.parser')
try:
phone = alpha_soup.find('span', {'class':'yp_phoneNumber'}).text.strip()
except:
phone = ''
try:
email = alpha_soup.find('span', {'class':'yp_email'}).text.strip()
except:
email = ''
try:
website = alpha_soup.find('span', {'class':'yp_website'}).text.strip()
except:
webiste = ''
try:
contact = soup.find('div', {'class':'company--info'})
address = contact.find_all('p')[-1].text.strip()
except:
print ('Could not locate %s company info' %(company))
error.append(company)
print('%s\n%s\n%s\n%s\n' %(address, phone, email, website))