Reputation: 2881
I have a implemented a crawler, which gets urls from text file and scrapes all the urls and then stops.
My implementation:
class CoreSpider(scrapy.Spider):
name = "final"
custom_settings = {
'ROBOTSTXT_OBEY': 'False',
'HTTPCACHE_ENABLED': 'True',
'LOG_ENABLED': 'False',
'DOWNLOADER_MIDDLEWARES': {
'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 110,
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
'random_useragent.RandomUserAgentMiddleware': 320
},
}
def __init__(self):
self.all_ngrams = get_ngrams()
# logging.DEBUG(self.all_ngrams)
self.search_term = ""
self.start_urls = self.read_url()
self.rules = (
Rule(LinkExtractor(unique=True), callback='parse', follow=True, process_request='process_request'),
)
.....
.....
I run this spider from script as below:
process = CrawlerProcess(get_project_settings())
process.crawl(CoreSpider)
process.start()
It gives error "
twisted.internet.error.ReactorNotRestartable
once it finished scraping all urls.
I tried using Runner
like below implementation and it gives same error as previous.
runner = CrawlerRunner(get_project_settings())
d = runner.crawl(CoreSpider)
d.addBoth(lambda _: reactor.stop())
reactor.run() # the script will block here until the crawling is finished
Then I tried running spider like this :
runner = CrawlerRunner(get_project_settings())
@defer.inlineCallbacks
def crawl():
yield runner.crawl(CoreSpider)
reactor.stop()
crawl()
reactor.run()
But still it gives the same error.
How to manually stops the spiders, once all the urls has been scraped?
Update: Python 2.7 Stack Trace
Traceback (most recent call last):
File "seed_list_generator.py", line 768, in <module>
process = CrawlerProcess(get_project_settings())
File "/root/anaconda2/lib/python2.7/site-packages/scrapy/crawler.py", line 243, in __init__
super(CrawlerProcess, self).__init__(settings)
File "/root/anaconda2/lib/python2.7/site-packages/scrapy/crawler.py", line 134, in __init__
self.spider_loader = _get_spider_loader(settings)
File "/root/anaconda2/lib/python2.7/site-packages/scrapy/crawler.py", line 330, in _get_spider_loader
return loader_cls.from_settings(settings.frozencopy())
File "/root/anaconda2/lib/python2.7/site-packages/scrapy/spiderloader.py", line 61, in from_settings
return cls(settings)
File "/root/anaconda2/lib/python2.7/site-packages/scrapy/spiderloader.py", line 25, in __init__
self._load_all_spiders()
File "/root/anaconda2/lib/python2.7/site-packages/scrapy/spiderloader.py", line 47, in _load_all_spiders
for module in walk_modules(name):
File "/root/anaconda2/lib/python2.7/site-packages/scrapy/utils/misc.py", line 71, in walk_modules
submod = import_module(fullpath)
File "/root/anaconda2/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/root/Public/company_profiler/profiler/spiders/run_spider.py", line 12, in <module>
process.start()
File "/root/anaconda2/lib/python2.7/site-packages/scrapy/crawler.py", line 285, in start
reactor.run(installSignalHandlers=False) # blocking call
File "/root/anaconda2/lib/python2.7/site-packages/twisted/internet/base.py", line 1242, in run
self.startRunning(installSignalHandlers=installSignalHandlers)
File "/root/anaconda2/lib/python2.7/site-packages/twisted/internet/base.py", line 1222, in startRunning
ReactorBase.startRunning(self)
File "/root/anaconda2/lib/python2.7/site-packages/twisted/internet/base.py", line 730, in startRunning
raise error.ReactorNotRestartable()
twisted.internet.error.ReactorNotRestartable
Python 3.6 Trace back:
File "seed_list_generator.py", line 769, in <module>
process = CrawlerProcess(get_project_settings())
File "/root/anaconda3/lib/python3.6/site-packages/scrapy/crawler.py", line 249, in __init__
super(CrawlerProcess, self).__init__(settings)
File "/root/anaconda3/lib/python3.6/site-packages/scrapy/crawler.py", line 137, in __init__
self.spider_loader = _get_spider_loader(settings)
File "/root/anaconda3/lib/python3.6/site-packages/scrapy/crawler.py", line 336, in _get_spider_loader
return loader_cls.from_settings(settings.frozencopy())
File "/root/anaconda3/lib/python3.6/site-packages/scrapy/spiderloader.py", line 61, in from_settings
return cls(settings)
File "/root/anaconda3/lib/python3.6/site-packages/scrapy/spiderloader.py", line 25, in __init__
self._load_all_spiders()
File "/root/anaconda3/lib/python3.6/site-packages/scrapy/spiderloader.py", line 47, in _load_all_spiders
for module in walk_modules(name):
File "/root/anaconda3/lib/python3.6/site-packages/scrapy/utils/misc.py", line 71, in walk_modules
submod = import_module(fullpath)
File "/root/anaconda3/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 978, in _gcd_import
File "<frozen importlib._bootstrap>", line 961, in _find_and_load
File "<frozen importlib._bootstrap>", line 950, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 655, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 205, in _call_with_frames_removed
File "/root/Public/company_profiler/profiler/spiders/run_spider.py", line 12, in <module>
process.start()
File "/root/anaconda3/lib/python3.6/site-packages/scrapy/crawler.py", line 291, in start
reactor.run(installSignalHandlers=False) # blocking call
File "/root/anaconda3/lib/python3.6/site-packages/twisted/internet/base.py", line 1242, in run
self.startRunning(installSignalHandlers=installSignalHandlers)
File "/root/anaconda3/lib/python3.6/site-packages/twisted/internet/base.py", line 1222, in startRunning
ReactorBase.startRunning(self)
File "/root/anaconda3/lib/python3.6/site-packages/twisted/internet/base.py", line 730, in startRunning
raise error.ReactorNotRestartable()
twisted.internet.error.ReactorNotRestartable
Thanks in advance.
Upvotes: 1
Views: 2426
Reputation: 2881
Finally I managed to stop the crawler by putting it inside if __name__ == "__main__"
block.
if __name__ == '__main__':
process = CrawlerProcess(get_project_settings())
process.crawl(CoreSpider)
process.start()
It stops the crawler gracefully once it finishes scrapping all urls.
Upvotes: 1
Reputation: 11
What happens when you change your code like this?
class CoreSpider(scrapy.Spider):
name = "final"
custom_settings = {
'ROBOTSTXT_OBEY': 'False',
'HTTPCACHE_ENABLED': 'True',
'LOG_ENABLED': 'False',
'DOWNLOADER_MIDDLEWARES': {
'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 110,
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
'random_useragent.RandomUserAgentMiddleware': 320
},
}
def __init__(self,*args,**kwargs):
# python 3
super().__init__(*args,**kwargs)
# python 2
# super(CoreSpider, self).__init__(*args, **kwargs)
self.all_ngrams = get_ngrams()
# logging.DEBUG(self.all_ngrams)
self.search_term = ""
self.start_urls = self.read_url()
self.rules = (
Rule(LinkExtractor(unique=True), callback='parse', follow=True, process_request='process_request'),
)
.....
.....
Upvotes: 1