Reputation: 99
Can a contemporary scraper recommend how to get around 403 errors with scrapy?
I've tried using Selenium but to no avail. I've also tried passing in the following header to each response:
headers= {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:48.0) Gecko/20100101 Firefox/48.0'}
# -*- coding: utf-8 -*-
import scrapy
import re
import os
import wget
class LivelinguaCrawlerSpider(scrapy.Spider):
name = 'livelingua_crawler'
allowed_domains = ['www.livelingua.com']
start_urls = ['https://www.livelingua.com/project/']
def parse(self, response):
language_links = response.css("div.col-md-4 a::attr(href)").getall()
for link in language_links[2:4]:
language = re.match('(.*)(?<=courses)(.*)', link).group(2)[1:-1]
dir_path = "redacted/" + language
try:
os.makedirs(dir_path)
except FileExistsError:
pass
self.dir_path = dir_path
request = response.follow(link, self.parseCourses)
request.meta['dir'] = dir_path
yield request
def parseCourses(self, response):
courses = response.css("span.thumb-info-caption h6 a::attr(href)").getall()
dir_path = response.meta['dir']
for course in courses:
request = response.follow(course, self.parseEBooks)
request.meta['dir'] = dir_path
yield request
def parseEBooks(self, response):
eBooks = response.css("div.row:first-child ul li a::attr(href)").getall()
for eBook in eBooks:
wget.download(eBook, response.meta['dir'])
yield {"ebook": eBook}
I get the following errors when I try to wget:
wget.download(eBook, response.meta['dir'])
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/wget.py", line 526, in download
(tmpfile, headers) = ulib.urlretrieve(binurl, tmpfile, callback)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/urllib/request.py", line 247, in urlretrieve
with contextlib.closing(urlopen(url, data)) as fp:
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/urllib/request.py", line 222, in urlopen
return opener.open(url, data, timeout)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/urllib/request.py", line 531, in open
response = meth(req, response)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/urllib/request.py", line 641, in http_response
'http', request, response, code, msg, hdrs)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/urllib/request.py", line 569, in error
return self._call_chain(*args)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/urllib/request.py", line 503, in _call_chain
result = func(*args)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/urllib/request.py", line 649, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 403: Forbidden
Upvotes: 0
Views: 1426
Reputation: 2116
As suggested by gangabass, using a files pipeline is the way to go here. This can be done pretty easily:
Add the files pipeline and set the field containing the url to download in settings.py:
ITEM_PIPELINES = {'scrapy.pipelines.files.FilesPipeline': 1}
FILES_URLS_FIELD = 'ebook'
The error-code you currently have when trying to download is probably related to the headers as Roland Weber said. To get around this problem I would suggest creating a class that inherits from FilesPipeline and overwrites the get_media_requests method:
class DownloadEbooksPipeline(FilesPipeline):
headers = {
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-GB,en;q=0.9,nl-BE;q=0.8,nl;q=0.7,ro-RO;q=0.6,ro;q=0.5,en-US;q=0.4',
}
def get_media_requests(self, item, info):
for ebook_url in item.get(self.files_urls_field, []):
request = Request(url=ebook_url,
headers=self.headers)
yield request
If you do it this way, you'll have to change the ITEM_PIPELINES in settings.py to refer to the file containing this class of course.
Upvotes: 1