Shantanu Bedajna
Shantanu Bedajna

Reputation: 581

Python Scrapy keep getting same page link from next page button

i am trying to scrape amazon.com for the link of products that has more than 800 reviews but i keep getting the same page link from the next page button it keeps returning page 2 over and over again where i should get page 3,4 and so on

I HAVE SET A IF CONDITION TO SPILT AND CONVERT REVIEW STRING LIKE 1,020 TO INTEGER AND COMPARE IF GREATER THAN 800 OR NOT THEN BASED ON THAT VISIT THE PAGE

here is the code

# -*- coding: utf-8 -*-
import scrapy
from amazon.items import AmazonItem
from urlparse import urljoin


class AmazonspiderSpider(scrapy.Spider):
    name = "amazonspider"
    DOWNLOAD_DELAY = 1
    start_urls = ['https://www.amazon.com/s/ref=lp_165993011_nr_n_0?fst=as%3Aoff&rh=n%3A165793011%2Cn%3A%21165795011%2Cn%3A165993011%2Cn%3A2514571011&bbn=165993011&ie=UTF8&qid=1493778423&rnid=165993011']


    def parse(self, response):


        SET_SELECTOR = '.a-carousel-card.acswidget-carousel__card'
        for attr in response.css(SET_SELECTOR):
            #print '\n\n', attr

            item = AmazonItem()

            review_selector = './/*[@class="acs_product-rating__review-count"]/text()'
            link_selector = './/*[@class="a-link-normal"]/@href'

            if attr.xpath(review_selector).extract_first():
                if int(''.join(attr.xpath(review_selector).extract_first().split(','))) >= 800:
                    url = urljoin(response.url, attr.xpath(link_selector).extract_first())
                    item['LINKS'] = url
                    if url:
                        yield scrapy.Request(url, callback=self.parse_link, meta={'item': item})  


            next_page = './/span[@class="pagnRA"]/a[@id="pagnNextLink"]/@href'
            next_page = response.xpath(next_page).extract_first()
            print '\n\n', urljoin(response.url, next_page)
            if next_page:
                yield scrapy.Request(
                    urljoin(response.url, next_page),
                    callback=self.parse
                )
    def parse_link(self, response):

        item = AmazonItem(response.meta['item'])

        catselector = '.cat-link ::text'
        defaultcatselector = '.nav-search-label ::text'
        cat = response.css(catselector).extract_first()
        if cat:
            item['CATAGORY'] = cat
        else:
            item['CATAGORY'] = response.css(defaultcatselector).extract_first()
        return item

here is the output when i was printing the next page link before calling the parse function recursively

here and here

and here is the screenshot from the next page selector of the page here where am i going wrong ?

Upvotes: 1

Views: 1018

Answers (1)

CK Chen
CK Chen

Reputation: 664

Move the next page code block outside the loop.

class AmazonspiderSpider(scrapy.Spider):
name = "amazonspider"
DOWNLOAD_DELAY = 1
start_urls = ['https://www.amazon.com/s/ref=lp_165993011_nr_n_0?fst=as%3Aoff&rh=n%3A165793011%2Cn%3A%21165795011%2Cn%3A165993011%2Cn%3A2514571011&bbn=165993011&ie=UTF8&qid=1493778423&rnid=165993011']


def parse(self, response):


    SET_SELECTOR = '.a-carousel-card.acswidget-carousel__card'
    for attr in response.css(SET_SELECTOR):
        #print '\n\n', attr


        review_selector = './/*[@class="acs_product-rating__review-count"]/text()'
        link_selector = './/*[@class="a-link-normal"]/@href'

        if attr.xpath(review_selector).extract_first():
            if int(''.join(attr.xpath(review_selector).extract_first().split(','))) >= 800:
                url = urljoin(response.url, attr.xpath(link_selector).extract_first())


   next_page = './/span[@class="pagnRA"]/a[@id="pagnNextLink"]/@href'
   next_page = response.xpath(next_page).extract_first()
   print '\n\n', urljoin(response.url, next_page)

   if next_page:
       yield scrapy.Request(
           urljoin(response.url, next_page),
           callback=self.parse
       )

Upvotes: 2

Related Questions