Reputation: 807
I was reading Programming Collective Intelligence,chapter on Search Engines where I came across the following piece of code and upon implementation,it gave me error.Please Help.
import urllib2
from BeautifulSoup import *
from urlparse import urljoin
class crawler:
def __init__(self,dbname):
pass
def __del__(self):
pass
def dbcommit(self):
pass
def getentryid(self,table,field,value,createnew=True):
return None
def addtoindex(self,url,soup):
print 'Indexing %s' % url
def gettextonly(self,soup):
return None
def seperatewords(self,text):
return None
def isindexed(self,url):
return False
def addlinkref(self,urlFrom,urlTo,linkText):
pass
def crawl(self,pages,depth=2):
for i in range(depth):
newpages=set()
for page in pages:
try:
c=urllib2.urlopen(page)
except:
print 'Could not open %s'%page
continue
soup=BeautifulSoup(c.read())
self.addtoindex(page,soup)
links=soup('a')
for link in links:
if('href' in dict(link.attrs)):
url=urljoin(page,link['href'])
if url.find("'")!=-1: continue
url=url.split('#')[0]
if url[0:4]=='http' and not self.isindexed(url):
newpages.add(url)
linkText=self.gettextonly(link)
self.addlinkref(page,url,linkTest)
self.dbcommit()
pages=newpages
def createindextables(self):
pass
I got the following error:
>>cwlr.crawl(pagelist)
Indexing http://en.wikipedia.org/wiki/Artificial_neural_network
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-50-97778b0c0db8> in <module>()
----> 1 cwlr.crawl(pagelist)
C:\Users\Blue\Anaconda\searchengine.py in crawl(self, pages, depth)
47 url=urljoin(page,link['href'])
48 if url.find("'")!=-1: continue
---> 49 url=url.split('#')[0]
50 if url[0:4]=='http' and not self.isindexed(url):
51 newpages.add(url)
NameError: global name 'linkTest' is not defined
Upvotes: 1
Views: 62
Reputation: 500317
NameError: global name 'linkTest' is not defined
You've misspelt linkText
as linkTest
:
linkText=self.gettextonly(link)
↑
self.addlinkref(page,url,linkTest)
↑
Upvotes: 1