OMGPOP
OMGPOP

Reputation: 984

using scrapy crawl non-unicode asian language (chinese, for example) website encoding

I use scrapy to crawl something from several asian websites. Some of them use utf8 encoding. But some others use different ones like 'gb2312'.

I write my own output statement instead of pipelines and items. Here is the scrapy code for yelp restaurant reviews, which works:

from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
#from crawlsite.items import ReviewItem
import re

class ReviewSpider(BaseSpider):
    name = "yelp"
    allowed_domains = ['yelp.com']
    start_urls = ['http://www.yelp.com/biz/providence-los-angeles-2']

    #call back function when received response
    def parse(self, response):
        hxs = HtmlXPathSelector(response)

        stars = hxs.select('//meta[@itemprop="ratingValue"]/@content').extract()
        review = hxs.select('//p[@itemprop="description"]').extract()

        url = response.url
        date = hxs.select('//meta[@itemprop="datePublished"]/@content').extract()
        user = hxs.select('//li[@class="user-name"]/a/text()').extract()

        file = open('crawled.xml', 'a')

        starting = str(url)[55:]

        startingid = 0
        if starting.isdigit():
            startingid = int(starting)

        id = 0
        while id < 40:
            file.write('\n<doc id="%s">\n' % str(id + 1 + startingid))
            #the first stars rating is the overall rating
            file.write('\t<stars>%s</stars>\n' % stars[id+1])
            file.write('\t<url>\n\t%s\n\t</url>\n' % url)
            file.write('\t<date>%s</date>\n' % date[id])

            #user can be unicode as well
            file.write('\t<user>%s</user>\n' % user[id].encode('utf8'))
            #there is no title for yelp reviews
            file.write('\t<title>NULL</title>\n')

            #need to stripe the review
            file.write('\t<review>\n\t')
            review[id] = re.sub('<[^<]+?>', '', review[id])
            file.write(review[id].encode('utf8'))
            file.write('\n\t</review>\n')


            star = stars[id+1]
            polarity = "NULL"
            confidence = "NULL"

            file.write('\t<polarity>%s</polarity>\n' % polarity)
            file.write('\t<confidence>%s</confidence>\n' % confidence)




            file.write('</doc>\n')
            id += 1

        file.close()

note that some reviews contain french or spanish words, but they are all in 'utf8',

file.write(review[id].encode('utf8'))

This is the code to crawl an asian website with a different encoding:

    allowed_domains = ['duanwenxue.com']
    start_urls = ['http://www.duanwenxue.com/article/113415.html']


def parse(self, response):
        hxs = HtmlXPathSelector(response)

        content = hxs.select('//div[@class="content-in-similar"]/p/text()').extract();

        file = open('crawled.xml', 'a')

        file.write(str(content).decode('GB2312'))

        file.close()

the output file is like this:

[u'\u4e00\u5927\u5b66\u751f\u88ab\u654c\u4eba\u6293\u4e86\uff0c\u654c\u4eba\u628a\u4ed6\u7ed1\u5728\u4e86\u7535\u7ebf\u6746\u4e0a\uff0c\u7136\u540e\u95ee\u4ed6\uff1a\u8bf4\uff0c\u4f60\u662f\u54ea\u91cc\u7684\uff1f\u4e0d\u8bf4\u5c31\u7535\u6b7b\u4f60\uff01\u5927\u5b66\u751f\u56de\u4e86\u654c\u4eba\u4e00\u53e5\u8bdd\uff0c\u7ed3\u679c\u

I use w3c to check the encoding, should be correct. I have tried str(content).decode('GB2312').encode('utf8') and similar combinations, all of them do not work.

Upvotes: 1

Views: 3196

Answers (1)

Shane Evans
Shane Evans

Reputation: 2254

You should use unicode.encode to convert content from a unicode object to a str object using whatever encoding you wish for your output file. Using your example content:

>>> content = [u'\u4e00\u5927\u5b66\u751f\u88ab\u654c\u4eba\u6293\u4e86\uff0c\u654c\u4eba\u628a\u4ed6\u7ed1\u5728\u4e86\u7535\u7ebf\u6746\u4e0a\uff0c\u7136\u540e...']
>>> print content[0]
一大学生被敌人抓了,敌人把他绑在了电线杆上,然后...
>>> content_utf8 = content[0].encode('utf8')
>>> content_utf8[:10]
'\xe4\xb8\x80\xe5\xa4\xa7\xe5\xad\xa6\xe7'
>>> print content_utf8
一大学生被敌人抓了,敌人把他绑在了电线杆上,然后...

Then you can open the file and write the str object (content_utf8 in the code above).

Upvotes: 2

Related Questions