Reputation: 89
I am a beginner in python and scrapy. I need help, I need to extract a product listing but he site hast a "view more" products at the end that execute with ajax an text/html request and load another html with new products.
import scrapy
from scrapy.http import Request
class ProdSpider(scrapy.Spider):
name = "easy"
allowed_domains = ["easy.com.ar"]
start_urls = ["https://www.easy.com.ar/webapp/wcs/stores/servlet/es/easyar/search/AjaxCatalogSearchResultContentView?searchTermScope=&searchType=1002&filterTerm=&orderBy=&maxPrice=&showResultsPage=true&langId=-5&sType=SimpleSearch&metaData=&pageSize=12&manufacturer=&resultCatEntryType=&catalogId=10051&pageView=image&searchTerm=&minPrice=&categoryId=39652&storeId=10151&beginIndex=12"]
beginIndex_index = 12
def parse(self, response):
SECTION_SELECTOR = '.thumb-product'
for soar in response.css(SECTION_SELECTOR):
Link = 'div.dojoDndItem a ::attr(href)'
# Marca = 'p.brand a ::text'
Nombre = 'div.thumb-name a ::text'
# Link = 'p.brand a ::attr(href)'
# SKU = './/p[@class="sku"]/text()' #p.sku ::text'
Price = './/span[@id="tarj-mas-edit"]/text()' #["0"].parentElement.innerText .//span[@class="thumb-price-e"]/text()
yield {
'Link': soar.css(Link).extract_first(),
# 'Marca': soar.css(Marca).extract_first(),
'Nombre': soar.css(Nombre).re_first(r'\n\s*(.*)'), # Limpia espacios y caracteres especiales
# 'Link': soar.css(Link).extract_first(),
# 'SKU': soar.xpath(SKU).re_first(r'SKU:\s*(.*)'),
'Price': soar.xpath(Price).re_first(r'\n\s*(.*)'),
}
# here if no products are available , simply return, means exiting from
# parse and ultimately stops the spider
self.beginIndex_index += 12
if beginIndex_index:
yield Request(url="https://www.easy.com.ar/webapp/wcs/stores/servlet/es/easyar/search/AjaxCatalogSearchResultContentView?searchTermScope=&searchType=1002&filterTerm=&orderBy=&maxPrice=&showResultsPage=true&langId=-5&sType=SimpleSearch&metaData=&pageSize=12&manufacturer=&resultCatEntryType=&catalogId=10051&pageView=image&searchTerm=&minPrice=&categoryId=39652&storeId=10151&beginIndex=%s" % (self.beginIndex_index + 12),
callback=self.parse)
I tried with code above but only 12 products are getting captured. The only parameter that changes in url is "beginIndex=12", Y want to sum +12 to the url until the products listings over. I'm getting stuck with this issue!
Thanks!
Upvotes: 2
Views: 858
Reputation: 151
You got it !
I see in your URL that you have an other parameter named pageSize
. I tested it and the website allows you to set it to 50 max.
To know when to stop, you can just test if you have items in response.css(SECTION_SELECTOR)
before yielding another request :
import scrapy
from scrapy.http import Request
from scrapy import Selector
class ProdSpider(scrapy.Spider):
name = "easy"
allowed_domains = ["easy.com.ar"]
url = "https://www.easy.com.ar/webapp/wcs/stores/servlet/es/easyar/search/AjaxCatalogSearchResultContentView?searchTermScope=&searchType=1002&filterTerm=&orderBy=&maxPrice=&showResultsPage=true&langId=-5&sType=SimpleSearch&metaData=&pageSize=50&manufacturer=&resultCatEntryType=&catalogId=10051&pageView=image&searchTerm=&minPrice=&categoryId=39652&storeId=10151&beginIndex={pagenum}"
product_fields_xpath = {
'Link': '//a[contains(@id, "CatalogEntry")]/@href',
'Nombre': '//a[contains(@id, "CatalogEntry")]/text()',
'Price': './/span[@class="thumb-price-e"]/text()'
}
section_selector = '//div[@class="thumb-product"]'
begin_index = 0
def start_request(self):
yield Request(url=url.format(pagenum=self.begin_index), method='GET', callback=self.parse)
def parse(self, response):
products = response.xpath(self.section_selector).extract()
n_items = 0
for product in products:
n_items += 1
sel = Selector(text=product)
item = dict()
for k, v in self.product_fields_xpath.iteritems():
item[k] = sel.xpath(v).extract_first()
yield item
self.begin_index += 50
if n_items > 0:
yield Request(url=url.format(pagenum=self.begin_index), method='GET', callback=self.parse)
I didn't test this code, but I hope you'll understand what I mean.
Upvotes: 1
Reputation: 2061
I suggest you use selenium, so you can 'click' on the view more button and load more data inside your spider. Here is a sample of spider (I did not test it, but it's the general idea):
import scrapy
from selenium import webdriver
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
class ProdSpider(scrapy.Spider):
name = "easy"
allowed_domains = ["easy.com.ar"]
start_urls = ["https://www.easy.com.ar/webapp/wcs/stores/servlet/es/easyar/search/aditivos-y-lubricantes"]
def __init__(self):
super(ProdSpider, self).__init__()
binary = FirefoxBinary('C:/Program Files (x86)/Mozilla Firefox/firefox.exe')
self.wb = webdriver.Firefox(firefox_binary=binary)
def parse(self, response):
self.wb.get(response.url)
while True:
view_more_button = self.wb.find_element_by_xpath('//*[@id="Search_Result_div"]/div[2]/div[9]/input')
if not view_more_button:
break
view_more_button.click()
#extract your data here...
Upvotes: 5