helena
helena

Reputation: 39

How to make request to new url?

I already have this code, helped by a friend before. I already get all the links in the site. I want to get the name, merk, price, picture, description of the product, and the link of the product. The description's product only appear if we click the product.

I'm a beginner in Python.

from bs4 import BeautifulSoup
import urllib.request


count = 1
url = "https://www.sociolla.com/155-foundation?p=%d"

def get_url(url):
     req = urllib.request.Request(url)
     return urllib.request.urlopen(req)

expected_url = url % count
response = get_url(expected_url)

link = []
name = []
merk = []
price = []
pic = []
description = []


while (response.url == expected_url):
     #print("GET {0}".format(expected_url))
     soup = BeautifulSoup(response.read(), "html.parser")
     products = soup.find("div",{"id":"product-list-grid"})
     for i in products:
           data = products.findAll("div",{"class":"product-item"})
     for j in range(0, len(data)):
           link.append(data[j]["data-eec-href"])


     count += 1
     expected_url = url % count
     response = get_url(expected_url)


print(len(link))

"""
import csv
dataset=zip(link, merk, name, pic, price, description)    
with open("foundation_sociolla.csv","w", newline='') as csvfile:
    writer=csv.writer(csvfile)
    header=['link', 'merk', 'name', 'pic', 'price', 'description']
    writer.writerow(header)
    writer.writerows(dataset)
"""

Upvotes: 0

Views: 119

Answers (1)

Dan-Dev
Dan-Dev

Reputation: 9430

You need to make a request to the URL. Parse the content of that request and extract the data you want.

from bs4 import BeautifulSoup
import urllib.request

count = 1
url = "https://www.sociolla.com/155-foundation?p=%d"


def get_url(url):
    req = urllib.request.Request(url)
    return urllib.request.urlopen(req)

expected_url = url % count
response = get_url(expected_url)

link = []
name = []
make = []
price = []
pic = []
description = []

while response.url == expected_url:
    soup = BeautifulSoup(response.read(), "html.parser")
    for product in soup.select("div.product-item"):
        product_url = (product['data-eec-href'])
        link.append(product_url)
        product_response = get_url(product_url)
        product_soup = BeautifulSoup(product_response.read(), "html.parser")
        product_pic = product_soup.select('img#bigpic')[0]['src']
        pic.append(product_pic)
        product_price = product_soup.select('span#our_price_display')[0].text.strip()
        price.append(product_price)
        product_name = product_soup.select('div.detail-product-logo p')[0].text.strip()
        name.append(product_name)
        product_make = product_soup.select('div.detail-product-logo h3')[0].text.strip()
        make.append(product_make)
        product_description = product_soup.select('div#Details article')[0].text.strip()
        description.append(product_description)

        print(product_url, product_pic, product_price, product_name, product_make, product_description)

    count += 1
    expected_url = url % count
    response = get_url(expected_url)

But if your going to scrape a lot of pages you are much better off using something like Scrapy https://scrapy.org/

Upvotes: 1

Related Questions