AppliedResearcher
AppliedResearcher

Reputation: 63

How to crawl multiple URLs from csv using Selenium and Scrapy

I am currently trying to crawl multiple sites from https://blogabet.com/ At the moment, I have a "ursl.txt"-file which inlcudes two URLs: 1. http://sabobic.blogabet.com 2. http://dedi22.blogabet.com

The problem I have is the following: Selenium opens each of the two URLs one after the other in the same Tab. Thereby, it is just crawling the content of the second ULR in my "ursl.txt"-file twice. It is not crawling any content from the first URL.

I think there is a problem with the for-loop and how the "parse_tip"-function is called. This is my code:

import scrapy
from scrapy import Spider
from scrapy.selector import Selector
from scrapy.http import Request

from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from time import sleep

import re
import csv
from time import sleep

class AlltipsSpider(Spider):
    name = 'alltips'
    allowed_domains = ['blogabet.com']
    # We are not using the response parameter in this function because the start urls are not defined 
    # Our class Spider is searching for the function start_requests by default 
    # Request has to returned or yield 

    def start_requests(self):

        self.driver = webdriver.Chrome('C:\webdrivers\chromedriver.exe')    
        with open("urls.txt", "rt") as f:
            start_urls = [url.strip() for url in f.readlines()]
            for url in start_urls:
                self.driver.get(url) 
                self.driver.find_element_by_id('currentTab').click()
                sleep(3)
                self.logger.info('Sleeping for 5 sec.')
                self.driver.find_element_by_xpath('//*[@id="_blog-menu"]/div[2]/div/div[2]/a[3]').click()
                sleep(7)
                self.logger.info('Sleeping for 7 sec.')                           
                yield Request(url, callback=self.parse_tip)    

    def parse_tip(self, response):
        sel = Selector(text=self.driver.page_source)
        allposts = sel.xpath('//*[@class="block media _feedPick feed-pick"]')

        for post in allposts:
            username = post.xpath('.//div[@class="col-sm-7 col-lg-6 no-padding"]/a/@title').extract()
            publish_date = post.xpath('.//*[@class="bet-age text-muted"]/text()').extract()

            yield{'Username': username,
                'Publish date': publish_date
                }

Upvotes: 1

Views: 1227

Answers (1)

Umair Ayub
Umair Ayub

Reputation: 21341

Why are you doing another request yield Request(url, callback=self.parse_tip) when you already have a response from Selenium. Just pass that response text to parse_tip and use text inside that

class AlltipsSpider(Spider):
    name = 'alltips'
    allowed_domains = ['blogabet.com']

    def start_requests(self):

        self.driver = webdriver.Chrome('C:\webdrivers\chromedriver.exe')    
        with open("urls.txt", "rt") as f:
            start_urls = [url.strip() for url in f.readlines()]
            for url in start_urls:
                self.driver.get(url) 
                self.driver.find_element_by_id('currentTab').click()
                sleep(3)
                self.logger.info('Sleeping for 5 sec.')
                self.driver.find_element_by_xpath('//*[@id="_blog-menu"]/div[2]/div/div[2]/a[3]').click()
                sleep(7)
                self.logger.info('Sleeping for 7 sec.')                           
                for item in self.parse_tip(text= self.driver.page_source):
                    yield item

    def parse_tip(self, text):
        sel = Selector(text=text)
        allposts = sel.xpath('//*[@class="block media _feedPick feed-pick"]')

        for post in allposts:
            username = post.xpath('.//div[@class="col-sm-7 col-lg-6 no-padding"]/a/@title').extract()
            publish_date = post.xpath('.//*[@class="bet-age text-muted"]/text()').extract()

            yield{'Username': username,
                'Publish date': publish_date
                }

Upvotes: 1

Related Questions