我正在尝试获取YouTube上特定查询的搜索结果中显示的视频链接。我正在使用BeautifulSoup并请求Python库,这就是我所做的:
from bs4 import BeautifulSoup as bs
import requests
import pandas as pd
base="https://www.youtube.com/results?search_query="
query="mickey+mouse"
r = requests.get(base+query)
page=r.text
soup=bs(page,'html.parser')
vids = soup.findAll('a',attrs={'class':'yt-uix-tile-link'})
videolist=[]
for v in vids:
tmp = 'https://www.youtube.com' + v['href']
videolist.append(tmp)
pd.DataFrame(videolist).to_excel(<PATH>, header=False, index=False)
这将查找搜索结果,并将前20个视频(显示在页面中)的链接保存到Excel文件中。但是,我希望获得与同一查询相关的400或500个链接。我怎么能这样做?我知道如何从特定频道获取所有链接,但如何获取特定搜索查询的链接?
答案 0 :(得分:1)
除了导出到Excel之外,有人创建了几乎所有的内容,而是在不同的SE上导出为CSV。
不幸的是,我不允许从不同的SE网站粘贴可能的重复答案。
#!/usr/bin/python
# http://docs.python-requests.org/en/latest/user/quickstart/
# http://www.crummy.com/software/BeautifulSoup/bs4/doc/
import csv
import re
import requests
import time
from bs4 import BeautifulSoup
# scrapes the title
def getTitle():
d = soup.find_all("h1", "branded-page-header-title")
for i in d:
name = i.text.strip().replace('\n',' ').replace(',','').encode("utf-8")
f.write(name+',')
print('\t\t%s') % (name)
# scrapes the subscriber and view count
def getStats():
b = soup.find_all("li", "about-stat ") # trailing space is required.
for i in b:
value = i.b.text.strip().replace(',','')
name = i.b.next_sibling.strip().replace(',','')
f.write(value+',')
print('\t\t%s = %s') % (name, value)
# scrapes the description
def getDescription():
c = soup.find_all("div", "about-description")
for i in c:
description = i.text.strip().replace('\n',' ').replace(',','').encode("utf-8")
f.write(description+',')
#print('\t\t%s') % (description)
# scrapes all the external links
def getLinks():
a = soup.find_all("a", "about-channel-link ") # trailing space is required.
for i in a:
url = i.get('href')
f.write(url+',')
print('\t\t%s') % (url)
# scrapes the related channels
def getRelated():
s = soup.find_all("h3", "yt-lockup-title")
for i in s:
t = i.find_all(href=re.compile("user"))
for i in t:
url = 'https://www.youtube.com'+i.get('href')
rCSV.write(url+'\n')
print('\t\t%s,%s') % (i.text, url)
f = open("youtube-scrape-data.csv", "w+")
rCSV = open("related-channels.csv", "w+")
visited = []
base = "https://www.youtube.com/results?search_query="
q = ['search+query+here']
page = "&page="
count = 1
pagesToScrape = 20
for query in q:
while count <= pagesToScrape:
scrapeURL = base + str(query) + page + str(count)
print('Scraping %s\n') %(scrapeURL)
r = requests.get(scrapeURL)
soup = BeautifulSoup(r.text)
users = soup.find_all("div", "yt-lockup-byline")
for each in users:
a = each.find_all(href=re.compile("user"))
for i in a:
url = 'https://www.youtube.com'+i.get('href')+'/about'
if url in visited:
print('\t%s has already been scraped\n\n') %(url)
else:
r = requests.get(url)
soup = BeautifulSoup(r.text)
f.write(url+',')
print('\t%s') % (url)
getTitle()
getStats()
getDescription()
getLinks()
getRelated()
f.write('\n')
print('\n')
visited.append(url)
time.sleep(3)
count += 1
time.sleep(3)
print('\n')
count = 1
print('\n')
f.close()
来源:https://codereview.stackexchange.com/questions/92001/youtube-search-result-scraper