Reputation: 7043
I making a python script for personal use but it's not working for wikipedia...
This work:
import urllib2, sys
from bs4 import BeautifulSoup
site = "http://youtube.com"
page = urllib2.urlopen(site)
soup = BeautifulSoup(page)
print soup
This not work:
import urllib2, sys
from bs4 import BeautifulSoup
site= "http://en.wikipedia.org/wiki/StackOverflow"
page = urllib2.urlopen(site)
soup = BeautifulSoup(page)
print soup
This is the error:
Traceback (most recent call last):
File "C:\Python27\wiki.py", line 5, in <module>
page = urllib2.urlopen(site)
File "C:\Python27\lib\urllib2.py", line 126, in urlopen
return _opener.open(url, data, timeout)
File "C:\Python27\lib\urllib2.py", line 406, in open
response = meth(req, response)
File "C:\Python27\lib\urllib2.py", line 519, in http_response
'http', request, response, code, msg, hdrs)
File "C:\Python27\lib\urllib2.py", line 444, in error
return self._call_chain(*args)
File "C:\Python27\lib\urllib2.py", line 378, in _call_chain
result = func(*args)
File "C:\Python27\lib\urllib2.py", line 527, in http_error_default
raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
HTTPError: HTTP Error 403: Forbidden
Upvotes: 22
Views: 59633
Reputation: 1806
Within the current code:
import urllib2, sys
from BeautifulSoup import BeautifulSoup
site= "http://en.wikipedia.org/wiki/StackOverflow"
hdr = {'User-Agent': 'Mozilla/5.0'}
req = urllib2.Request(site,headers=hdr)
page = urllib2.urlopen(req)
soup = BeautifulSoup(page)
print soup
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
site= "http://en.wikipedia.org/wiki/StackOverflow"
hdr = {'User-Agent': 'Mozilla/5.0'}
req = Request(site,headers=hdr)
page = urlopen(req)
soup = BeautifulSoup(page, 'html.parser')
print(soup)
from selenium import webdriver as driver
browser = driver.PhantomJS()
p = browser.get("http://en.wikipedia.org/wiki/StackOverflow")
assert "Stack Overflow - Wikipedia" in browser.title
The reason modified version works is because Wikipedia checks for User-Agent to be of "popular browser"
Upvotes: 67
Reputation: 129
URL library 3
I used urllib3
instead.
import urllib3
resp = urllib3.request("GET", "your_URL")
html = resp.data.decode('utf-8')
To install urllib3, just type this in Command Prompt:
pip install urllib3
This is for Python 3, of course.
Upvotes: 0
Reputation: 41
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup as soup
url = "your_url"
req = Request(url , headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
new_soup = soup(webpage,"lxml")
text = new_soup.get_text()
text = re.sub("\n"," ",text)
text = re.sub("\t"," ",text)
text = re.sub("\s+"," ",text)
val = re.sub('[^a-zA-Z0-9@_,.$£+]', ' ', text).strip()
val
Upvotes: 0
Reputation: 692
from urllib.request import urlopen, Request
from urllib.parse import urlparse
from bs4 import BeautifulSoup as soup
def checkURL(requested_url):
if not urlparse(requested_url).scheme:
requested_url = "https://" + requested_url
return requested_url
def requestAndParse(requested_url):
requested_url = checkURL(requested_url)
try:
# define headers to be provided for request authentication
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) '
'AppleWebKit/537.11 (KHTML, like Gecko) '
'Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
request_obj = Request(url = requested_url, headers = headers)
opened_url = urlopen(request_obj)
page_html = opened_url.read()
opened_url.close()
page_soup = soup(page_html, "html.parser")
return page_soup, requested_url
except Exception as e:
print(e)
# Exmaple:
page, url = requestAndParse(url)
try out the above code snippet for beautifulsoup
page loading.
Upvotes: 0
Reputation: 11
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup as soup
url = 'yourlink'
req = Request(url , headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
page_soup = soup(webpage, "html.parser")
This worked for me, should also work for you!
Upvotes: 0