我正在阅读编程集体智慧,关于搜索引擎的章节,其中我遇到了以下代码,在实现时,它给了我错误。请帮助。
import urllib2
from BeautifulSoup import *
from urlparse import urljoin
class crawler:
def __init__(self,dbname):
pass
def __del__(self):
pass
def dbcommit(self):
pass
def getentryid(self,table,field,value,createnew=True):
return None
def addtoindex(self,url,soup):
print 'Indexing %s' % url
def gettextonly(self,soup):
return None
def seperatewords(self,text):
return None
def isindexed(self,url):
return False
def addlinkref(self,urlFrom,urlTo,linkText):
pass
def crawl(self,pages,depth=2):
for i in range(depth):
newpages=set()
for page in pages:
try:
c=urllib2.urlopen(page)
except:
print 'Could not open %s'%page
continue
soup=BeautifulSoup(c.read())
self.addtoindex(page,soup)
links=soup('a')
for link in links:
if('href' in dict(link.attrs)):
url=urljoin(page,link['href'])
if url.find("'")!=-1: continue
url=url.split('#')[0]
if url[0:4]=='http' and not self.isindexed(url):
newpages.add(url)
linkText=self.gettextonly(link)
self.addlinkref(page,url,linkTest)
self.dbcommit()
pages=newpages
def createindextables(self):
pass
我收到以下错误:
>>cwlr.crawl(pagelist)
Indexing http://en.wikipedia.org/wiki/Artificial_neural_network
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-50-97778b0c0db8> in <module>()
----> 1 cwlr.crawl(pagelist)
C:\Users\Blue\Anaconda\searchengine.py in crawl(self, pages, depth)
47 url=urljoin(page,link['href'])
48 if url.find("'")!=-1: continue
---> 49 url=url.split('#')[0]
50 if url[0:4]=='http' and not self.isindexed(url):
51 newpages.add(url)
NameError: global name 'linkTest' is not defined
答案 0 :(得分:1)
NameError:未定义全局名称“linkTest”
您将linkText
拼错为linkTest
:
linkText=self.gettextonly(link)
↑
self.addlinkref(page,url,linkTest)
↑