user13018871
user13018871

Reputation:

Python image scraping

As part of my thesis I am trying to build a big training data corpus for deep learning.

My code runs on Conda 3.7 and works very well until I try to grab more than 80 images. In my case I would need a couple of hundred pictures.

Python code

import selenium
from selenium import webdriver
from selenium.webdriver.common.by import By
import requests
from bs4 import BeautifulSoup
import time
import io
import os
from PIL import Image
import hashlib

def fetch_image_urls(query:str, max_links_to_fetch:int, wd:webdriver, sleep_between_interactions:int=1):
    def scroll_to_end(wd):
        wd.execute_script("window.scrollTo(0, document.body.scrollHeight);")
        time.sleep(sleep_between_interactions)

    # Build the Google query
    search_url = "https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&q={q}&oq={q}&gs_l=img"

    # Load the page
    wd.get(search_url.format(q=query))

    image_urls = set()
    image_count = 0
    results_start = 0
    while image_count < max_links_to_fetch:
        scroll_to_end(wd)

        # Get all image thumbnail results
        thumbnail_results = wd.find_elements_by_css_selector("img.Q4LuWd")
        number_results = len(thumbnail_results)

        print(f"Found: {number_results} search results. Extracting links from {results_start}:{number_results}")

        for img in thumbnail_results[results_start:number_results]:
            # Try to click every thumbnail such that we can get the real image behind it
            try:
                img.click()
                time.sleep(sleep_between_interactions)
            except Exception:
                continue

            # Extract image URLs
            actual_images = wd.find_elements_by_css_selector('img.n3VNCb')
            for actual_image in actual_images:
                if actual_image.get_attribute('src') and 'http' in actual_image.get_attribute('src'):
                    image_urls.add(actual_image.get_attribute('src'))

            image_count = len(image_urls)

            if len(image_urls) >= max_links_to_fetch:
                print(f"Found: {len(image_urls)} image links, done!")
                break
        else:
            print("Found:", len(image_urls), "image links, looking for more...")
            time.sleep(1)
            return
            load_more_button = wd.find_element_by_css_selector(".mye4qd")
            if load_more_button:
                wd.execute_script("document.querySelector('.mye4qd').click();")

        # Move the result startpoint further down
        results_start = len(thumbnail_results)

    return image_urls

def persist_image(folder_path:str, url:str):
    try:
        image_content = requests.get(url).content

    except Exception as e:
        print(f"ERROR - Could not download {url} - {e}")

    try:
        image_file = io.BytesIO(image_content)
        image = Image.open(image_file).convert('RGB')
        file_path = os.path.join(folder_path, hashlib.sha1(image_content).hexdigest()[:10] + '.jpg')
        with open(file_path, 'wb') as f:
            image.save(f, "JPEG", quality=85)
        print(f"SUCCESS - saved {url} - as {file_path}")
    except Exception as e:
        print(f"ERROR - Could not save {url} - {e}")

# As soon as the number of images is over 80, an error is shown
def search_and_download(search_term:str, target_path='./images', number_images=170):
    target_folder = os.path.join(target_path, '_'.join(search_term.lower().split(' ')))

    if not os.path.exists(target_folder):
        os.makedirs(target_folder)

    with webdriver.Chrome() as wd:
        res = fetch_image_urls(search_term, number_images, wd=wd, sleep_between_interactions=0.5)

    for elem in res:
        persist_image(target_folder, elem)

# Change here to modify the search query

search_term = 'Hecht'

search_and_download(
    search_term = search_term,
)

And the error in the log

Found: 93 image links, looking for more ...
Traceback (most recent call last):
File "C:\Users\User\Desktop\Scraping\image-gathering-selenium\scrapy2.py", line 103, in search_term = search_term,

File "C:\Users\User\Desktop\Scraping\image-gathering-selenium\scrapy2.py", line 94, in search_and_download
for elem in res:
TypeError: 'NoneType' object is not iterable

Upvotes: 1

Views: 1014

Answers (2)

Ahmad Girach
Ahmad Girach

Reputation: 96

You're trying to iterate over res, but it's None. That's why it throws an error.

Add an if condition:

if res:
    for elem in res:
       persist_image(target_folder, elem)

Or add a blank list to this line:

res = fetch_image_urls(search_term, number_images, wd=wd, sleep_between_interactions=0.5) or []

Upvotes: 2

ali shibli
ali shibli

Reputation: 57

Try removing the return:

    else:
        print("Found:", len(image_urls), "image links, looking for more ...")
        time.sleep(1)
        # return
        load_more_button = wd.find_element_by_css_selector(".mye4qd")
        if load_more_button:
            wd.execute_script("document.querySelector('.mye4qd').click();")

Upvotes: 0

Related Questions