craftApprentice
craftApprentice

Reputation: 2777

How to fix this Python TypeError: iteration over non-sequence?

When I run the entire code below, this line:

for f in features:

From this function (where getfeatures returns a dictionary):

def train(self,item,cat):
    features=self.getfeatures(item)
    # Increment the count for every feature with this category
    for f in features:
      self.incf(f,cat)
    # Increment the count for this category
    self.incc(cat)
    self.con.commit()

Produces this error:

TypeError: iteration over non-sequence              

I tried replace this line: for f in features: for this: for f in features.keys(): but didn't worked ("AttributeError: classifier instance has no attribute 'keys'"). When I try this:

print getfeatures('Nobody owns the water.')

It give me what was expected:

{'water': 1, 'the': 1, 'nobody': 1, 'owns': 1}

How to fix this error and iterate properly in f dictionary?

This code is from the (excellent) book "Programming Collective Intelligence". I just copied it from here (I also bought the book) and cut part of the code (the fisherclassifier, because I'm using only the naivebayes classifier). I find it hard to believe that this error has not been realized. I might be doing something wrong.

Here the entire code:

import sqlite3

#from pysqlite2 import dbapi2 as sqlite

import re
import math

def getfeatures(doc):
  splitter=re.compile('\\W*')
  # Split the words by non-alpha characters
  words=[s.lower() for s in splitter.split(doc)
          if len(s)>2 and len(s)<20]
  # Return the unique set of words only
#  return dict([(w,1) for w in words]).iteritems()
  return dict([(w,1) for w in words])

class classifier:
  def __init__(self,getfeatures,filename=None):
    # Counts of feature/category combinations
    self.fc={}
    # Counts of documents in each category
    self.cc={}
    self.getfeatures=getfeatures

  def setdb(self,dbfile):
    self.con=sqlite.connect(dbfile)
    self.con.execute('create table if not exists fc(feature,category,count)')
    self.con.execute('create table if not exists cc(category,count)')


  def incf(self,f,cat):
    count=self.fcount(f,cat)
    if count==0:
      self.con.execute("insert into fc values ('%s','%s',1)"
                       % (f,cat))
    else:
      self.con.execute(
        "update fc set count=%d where feature='%s' and category='%s'"
        % (count+1,f,cat))

  def fcount(self,f,cat):
    res=self.con.execute(
      'select count from fc where feature="%s" and category="%s"'
      %(f,cat)).fetchone()
    if res==None: return 0
    else: return float(res[0])

  def incc(self,cat):
    count=self.catcount(cat)
    if count==0:
      self.con.execute("insert into cc values ('%s',1)" % (cat))
    else:
      self.con.execute("update cc set count=%d where category='%s'"
                       % (count+1,cat))

  def catcount(self,cat):
    res=self.con.execute('select count from cc where category="%s"'
                         %(cat)).fetchone()
    if res==None: return 0
    else: return float(res[0])

  def categories(self):
    cur=self.con.execute('select category from cc');
    return [d[0] for d in cur]

  def totalcount(self):
    res=self.con.execute('select sum(count) from cc').fetchone();
    if res==None: return 0
    return res[0]


  def train(self,item,cat):
    features=self.getfeatures(item)
    # Increment the count for every feature with this category
    for f in features.keys():
##    for f in features:
      self.incf(f,cat)
    # Increment the count for this category
    self.incc(cat)
    self.con.commit()

  def fprob(self,f,cat):
    if self.catcount(cat)==0: return 0

    # The total number of times this feature appeared in this
    # category divided by the total number of items in this category
    return self.fcount(f,cat)/self.catcount(cat)

  def weightedprob(self,f,cat,prf,weight=1.0,ap=0.5):
    # Calculate current probability
    basicprob=prf(f,cat)

    # Count the number of times this feature has appeared in
    # all categories
    totals=sum([self.fcount(f,c) for c in self.categories()])

    # Calculate the weighted average
    bp=((weight*ap)+(totals*basicprob))/(weight+totals)
    return bp




class naivebayes(classifier):

  def __init__(self,getfeatures):
    classifier.__init__(self,getfeatures)
    self.thresholds={}

  def docprob(self,item,cat):
    features=self.getfeatures(item)

    # Multiply the probabilities of all the features together
    p=1
    for f in features: p*=self.weightedprob(f,cat,self.fprob)
    return p

  def prob(self,item,cat):
    catprob=self.catcount(cat)/self.totalcount()
    docprob=self.docprob(item,cat)
    return docprob*catprob

  def setthreshold(self,cat,t):
    self.thresholds[cat]=t

  def getthreshold(self,cat):
    if cat not in self.thresholds: return 1.0
    return self.thresholds[cat]

  def classify(self,item,default=None):
    probs={}
    # Find the category with the highest probability
    max=0.0
    for cat in self.categories():
      probs[cat]=self.prob(item,cat)
      if probs[cat]>max:
        max=probs[cat]
        best=cat

    # Make sure the probability exceeds threshold*next best
    for cat in probs:
      if cat==best: continue
      if probs[cat]*self.getthreshold(best)>probs[best]: return default
    return best


def sampletrain(cl):
  cl.train('Nobody owns the water.','good')
  cl.train('the quick rabbit jumps fences','good')
  cl.train('buy pharmaceuticals now','bad')
  cl.train('make quick money at the online casino','bad')
  cl.train('the quick brown fox jumps','good')


nb = naivebayes(classifier)

sampletrain(nb)

#print ('\nbuy is classified as %s'%nb.classify('buy'))
#print ('\nquick is classified as %s'%nb.classify('quick'))

##print getfeatures('Nobody owns the water.')

Upvotes: 1

Views: 9709

Answers (2)

ernie
ernie

Reputation: 6356

Your initialization never actually passes in the getfeatures function as you expect.

The giveaway is this:

tried replace this line: for f in features: for this: for f in features.keys(): but didn't worked ("AttributeError: classifier instance has no attribute 'keys'").

Note that it's saying that features is a classifier instance, not a dictionary.

So, looking at your code, you create:

nb = naivebayes(classifier)

The init for naivebayes is:

def __init__(self, getfeatures):
  classifier.__init__(self,getfeatures)
  self.thresholds={}

So, in this case, you're passing in classifier, which is going to be passed as the variable getfeatures to the init for classifier . . .

Upvotes: 2

Michael0x2a
Michael0x2a

Reputation: 64208

It looks like you're initializing an instance of naivebayes using classifier:

nb = naivebayes(classifier) 

You probably meant to do this instead:

nb = naivebayes(getfeatures)

Inside the for loop in the train method, instead of getting a dict from getfeatures, you were repeatedly instantiating a new instance of classifier.

Upvotes: 3

Related Questions