您好我是一名蟒蛇新手,很抱歉在我不知道出了什么问题时会问这样一个具体的问题..
我正在尝试从韩国新网站抓取新闻文章。 当我运行此代码时
import sys
from bs4 import BeautifulSoup
import urllib.request
from urllib.parse import quote
target_url_b4_pn="http://news.donga.com/search?p="
target_url_b4_keyword='&query='
target_url_rest="&check_news1&more=1&sorting1&search_date1&v1=&v2=&range=1"
def get_text(URL, output_file):
source_code_from_URL=urllib.request.urlopen(URL)
soup=BeautifulSoup(source_code_from_URL, 'lxml', from_encoding='UTF-8')
content_of_article=soup.select('div.article')
for item in content_of_article:
string_item=str(item.find_all(text=True))
output_file.write(string_item)
def get_link_from_news_title(page_num, URL, output_file):
for i in range(page_num):
current_page_num=1+i*15
position=URL.index('=')
URL_with_page_num=URL[:position+1]+str(current_page_num)+URL[position+1:]
source_code_from_URL=urllib.request.urlopen(URL_with_page_num)
soup=BeautifulSoup(source_code_from_URL, 'lxml',from_encoding='UTF-8')
for title in soup.find_all('p','tit'):
title_link=title.select('a')
article_URL=title_link[0]['href']
get_text(article_URL, output_file)
def main():
keyword="노무현"
page_num=1
output_file_name="output.txt"
target_url=target_url_b4_pn+target_url_b4_keyword+quote(keyword)+target_url_rest
output_file=open(output_file_name, "w", -1, "utf-8")
get_link_from_news_title(page_num, target_url, output_file)
output_file.close()
if __name__=='__main__':
main()
print(target_url)
print(11111)
认为代码以某种方式冻结它,请告诉我哪里可能出错?
答案 0 :(得分:0)
get_text
函数的第一行,urllib.request.urlopen(URL)
表示您打开网址,但就像打开文件一样,您必须read
。
因此,请在其后添加read()
urllib.request.urlopen(URL).read()
否则beautifulsoup将无法识别它。
并且在你的css选择器soup.select('div.article')
中,页面中没有这样的元素,我想你想要的是soup.select('div.article_txt')
,它与文章的段落相匹配。
您的print(target_url)
应移至您的main
函数,target_url
只在main
中定义。
import sys
from bs4 import BeautifulSoup
import urllib.request
from urllib.parse import quote
target_url_b4_pn="http://news.donga.com/search?p="
target_url_b4_keyword='&query='
target_url_rest="&check_news1&more=1&sorting1&search_date1&v1=&v2=&range=1"
def get_text(URL, output_file):
source_code_from_URL=urllib.request.urlopen(URL)
soup=BeautifulSoup(source_code_from_URL, 'lxml', from_encoding='UTF-8')
# change your css selector so it match some element
content_of_article=soup.select('div.article_txt')
for item in content_of_article:
string_item=item.find_all(text=True)
#write string to file
output_file.write(" ".join(string_item))
def get_link_from_news_title(page_num, URL, output_file):
for i in range(page_num):
current_page_num=1+i*15
position=URL.index('=')
URL_with_page_num=URL[:position+1]+str(current_page_num)+URL[position+1:]
source_code_from_URL=urllib.request.urlopen(URL_with_page_num)
soup=BeautifulSoup(source_code_from_URL, 'lxml',from_encoding='UTF-8')
for title in soup.find_all('p','tit'):
title_link=title.select('a')
article_URL=title_link[0]['href']
get_text(article_URL, output_file)
def main():
keyword="노무현"
page_num=1
output_file_name="output.txt"
target_url=target_url_b4_pn+target_url_b4_keyword+quote(keyword)+target_url_rest
# move `target_url` here
print(target_url)
output_file=open(output_file_name, "w", -1, "utf-8")
get_link_from_news_title(page_num, target_url, output_file)
output_file.close()
if __name__=='__main__':
main()
print(11111)