이훈석
이훈석

Reputation: 45

Only indeed pages work well when I run this files

I learned a job scraper with python !

When I run the codes, Indeed pages work very well! , but SO pages don't work well.

It works only for page 0

For example : So page : 0 scraping => indeed page : 0,1,2....15 scraping

What is the problem??

Simple link : https://repl.it/@timedosenotwait/Python-Scraper#main.py

Indeed.py

import requests
from bs4 import BeautifulSoup

LIMIT = 50
URL = f"https://kr.indeed.com/%EC%B7%A8%EC%97%85?as_and=python&as_phr=&as_any=&as_not=&as_ttl=&as_cmp=&jt=all&st=&salary=&radius=25&l=&fromage=any&limit={LIMIT}&sort=&psf=advsrch&from=advancedsearch"


def get_last_page():
    result = requests.get(URL)
    soup = BeautifulSoup(result.text, "html.parser")
    pagination = soup.find("div", {"class": "pagination"})
    links = pagination.find_all('a')
    pages = []
    for link in links[:-1]:
        pages.append(int(link.string))
    max_page = pages[-1]
    return max_page


def extract_job(html):
    title = html.find("h2", {"class": "title"}).find("a")["title"]
    company = html.find("span", {"class": "company"})
    company_anchor = company.find("a")
    if company_anchor is not None:
        company = company_anchor.string
    else:
        company = company.string
    location = html.find("div", {"class": "recJobLoc"})["data-rc-loc"]
    job_id = html["data-jk"]
    return {
        'title': title,
        'company': company,
        'location': location,
        "link": f"https://kr.indeed.com/viewjob?jk={job_id} "
    }


def extract_jobs(last_page):
    jobs = []
    for page in range(last_page):
        print(f"Scraping Indeed: Page {page}")
        result = requests.get(f"{URL}&start={page*LIMIT}")
        soup = BeautifulSoup(result.text, "html.parser")
        results = soup.find_all("div", {"class": "jobsearch-SerpJobCard"})

        for result in results:
            job = extract_job(result)
            jobs.append(job)
    return jobs


def get_jobs():
    last_page = get_last_page()
    jobs = extract_jobs(last_page)
    return jobs

StackOverflow.py

I think there is an error in this file...

import requests
from bs4 import BeautifulSoup

URL = f"https://stackoverflow.com/jobs?q=python"


def get_last_page():
    result = requests.get(URL)
    soup = BeautifulSoup(result.text, "html.parser")
    pages = soup.find("div", {"class": "s-pagination"}).find_all("a")
    last_page = pages[-2].get_text(strip=True)
    return int(last_page)


def extract_job(html):
  title = html.find("h2", {"class":"mb4"}).find("a")["title"]
  company, location = html.find("h3", {"class":"fc-black-700"}).find_all("span", recursive=False)
  company = company.get_text(strip=True)
  location = location.get_text(strip=True)
  job_id = html['data-jobid']
  return {'title':title, 'company':company, 'location':location, 'apply-link':f"https://stackoverflow.com/jobs/{job_id}"}


# get_text(strip=True).strip("-").strip("\r").strip("\n") -> "여러 옵션 strip 하는법"

def extract_jobs(last_page):
    jobs = []
    for page in range(last_page):
      print(f"Scraping SO: Page {page}")
      result = requests.get(f"{URL}&pg={page+1}")
      soup = BeautifulSoup(result.text, "html.parser")
      results = soup.find_all("div", {"class": "-job"})
      for result in results:
            job = extract_job(result)
            jobs.append(job)
      return jobs


def get_jobs():
    last_page = get_last_page()
    jobs = extract_jobs(last_page)
    return jobs

Upvotes: 0

Views: 100

Answers (1)

goalie1998
goalie1998

Reputation: 1442

Easy fix, in extract_jobs, your return statement is included in the first for loop, move it out.

Upvotes: 1

Related Questions