flyer
flyer

Reputation: 9806

scrapy : how to test the delay between every requests

I set delay between every requests and wanted to see the effect. But it seemed nothing happend. I set

DOWNLOAD_DELAY=5

CONCURRENT_REQUESTS=1

CONCURRNT_REQUESTS_PER_IP=1

RANDOM_DOWNLOAD_DELY=False

I thought if it worked, I would see the delay of 5 seconds between each request. But it didn't happen.

The following code snippet is the Spider:

class Useragent(BaseSpider):

    name = 'useragent'

    settings.overrides['DOWNLOAD_DELAY'] = 5
    settings.overrides['CONCURRENT_REQUESTS'] = 1
    settings.overrides['CONCURRENT_REQUESTS_PER_DOMAIN'] = 1
    settings.overrides['RANDOM_DOWNLOAD_DELAY'] = False

    fn_useragents = "utils/useragents.txt"
    fp_useragents = open(fn_useragents, 'rb')
    total_lines = len(fp_useragents.readlines())
    fp_useragents.close()

    if not os.path.isdir("data"):
        os.mkdir("data")
    fn_log = "data/log.txt"
    fp_log = open(fn_log, "ab+")

    def start_requests(self):
        urls = [
            'http://www.dangdang.com',
            'http://www.360buy.com',
            'http://www.amazon.com.cn',
            'http://www.taobao.com'
            ]

        for url in urls:
            ua = linecache.getline(Useragent.fn_useragents, randint(1, Useragent.total_lines)).strip()
            url_headers = {'User-Agent': ua}
            yield Request(url, callback=self.parse_origin, headers=url_headers)

    def parse_origin(self, response):
        current_url = response.url
        headers = response.request.headers

        data_log = current_url
        for k, v in headers.items():
            header = "%s\t%s" % (k, v)
            data_log = "\n".join((data_log, header))
        Useragent.fp_log.write("%s\n" % data_log)

UPDATE

I wrote another spider to see the effect of setting DOWNLOAD_DELAY, the following is the code:

from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.conf import settings
import sys, os, time

reload(sys)
sys.setdefaultencoding('utf-8')

class TestCrawl(CrawlSpider):

    name = 'crawldelay'
    start_urls = [
        'http://www.dangdang.com',
        ]

    rules = (
        Rule(SgmlLinkExtractor(allow=('.+'),), callback="parse_origin"),
        )

    def __init__(self):
        CrawlSpider.__init__(self)
        if not os.path.isdir("data"):
            os.mkdir("data")
        self.fn_log = "data/log.txt"
        self.fp_log = open(self.fn_log, 'ab+')

        settings.overrides['DOWNLOAD_DELAY'] = 60
        settings.overrides['RANDOM_DOWNLOAD_DELAY'] = False
        settings.overrides['CONCURRENT_REQUESTS'] = 1
        settings.overrides['CONCURRENT_REQUESTS_PER_IP'] = 1

    def parse_origin(self, response):
        current_url = response.url
        data_log = "%s\n%s\n\n" % (current_url, time.asctime())
        self.fp_log.write(data_log)

The following is part of the log file that I use to see the effect of setting DOWNLOAD_DELAY:

http://living.dangdang.com/furniture
Mon Aug 27 10:49:50 2012

http://static.dangdang.com/topic/744/200778.shtml
Mon Aug 27 10:49:50 2012

http://survey.dangdang.com/html/2389.html
Mon Aug 27 10:49:50 2012

http://fashion.dangdang.com/watch
Mon Aug 27 10:49:50 2012

https://login.dangdang.com/signin.aspx?returnurl=http://customer.dangdang.com/wishlist/
Mon Aug 27 10:49:50 2012

http://www.hd315.gov.cn/beian/view.asp?bianhao=010202001051000098
Mon Aug 27 10:49:51 2012

https://ss.cnnic.cn/verifyseal.dll?pa=2940051&sn=2010091900100002234
Mon Aug 27 10:49:51 2012

But it seemed that the DOWNLOAD_DELAY didn't have obvious effect.

Upvotes: 1

Views: 2821

Answers (2)

kev
kev

Reputation: 161604

It is caused by the implement of dnscache (delayed).
CONCURRENT_REQUESTS_PER_IP only works on 2nd same-domain request.
You can override the get() method of LocalCache to make it return a fixed value.
It causes scrapy see every request to the same IP.


from scrapy.utils.datatypes import LocalCache
LocalCache.get = lambda *args:'fake-dummy-domain'

Test your spider:

scrapy crawl crawldelay -s CONCURRENT_REQUESTS_PER_IP=1 -s DOWNLOAD_DELAY=1

Upvotes: 1

Steven Almeroth
Steven Almeroth

Reputation: 8192

You can only place attribute assignments and methods directly in class bodies. If you want to initialize object code then you need to override __init__():

class UseragentSpider(BaseSpider):

    name = 'useragent'
    fn_log = "data/log.txt"
    fn_useragents = "utils/useragents.txt"
    DOWNLOAD_DELAY = 5

    def __init__(self, name=None, **kwargs):
        settings.overrides['CONCURRENT_REQUESTS'] = 1
        settings.overrides['CONCURRENT_REQUESTS_PER_DOMAIN'] = 1
        settings.overrides['RANDOM_DOWNLOAD_DELAY'] = False

        fp_useragents = open(self.fn_useragents, 'rb')
        self.total_lines = len(fp_useragents.readlines())
        fp_useragents.close()

        if not os.path.isdir("data"):
            os.mkdir("data")
        self.fp_log = open(self.fn_log, "ab+")

        # remember to call BaseSpider __init__() since we're overriding it
        super(UseragentSpider, self).__init__(name, **kwargs)

    def start_requests(self):
        urls = ['http://www.dangdang.com',
                'http://www.360buy.com',
                'http://www.amazon.com.cn',
                'http://www.taobao.com',
            ]

        for url in urls:
            ua = linecache.getline(self.fn_useragents, randint(1, self.total_lines)).strip()
            url_headers = {'User-Agent': ua}
            yield Request(url, callback=self.parse_origin, headers=url_headers)

    def parse_origin(self, response):
        headers = response.request.headers
        data_log = response.url

        for k, v in headers.items():
            header = "%s\t%s" % (k, v)
            data_log = "\n".join((data_log, header))

        self.fp_log.write("%s\n" % data_log)

Upvotes: 0

Related Questions