Reputation: 7746
I have scrapy code that looks like this
for row in response.css("div#flexBox_flex_calendar_mainCal table tr.calendar_row"):
print "================"
print row.xpath(".//td[@class='time']/text()").extract()
print row.xpath(".//td[@class='currency']/text()").extract()
print row.xpath(".//td[@class='impact']/span/@title").extract()
print row.xpath(".//td[@class='event']/span/text()").extract()
print row.xpath(".//td[@class='actual']/text()").extract()
print row.xpath(".//td[@class='forecast']/text()").extract()
print row.xpath(".//td[@class='previous']/text()").extract()
print "================"
I am able to get the same stuff using pure python like this,
from lxml import html
import requests
page = requests.get('http://www.forexfactory.com/calendar.php?day=dec1.2011')
tree = html.fromstring(page.text)
print tree.xpath(".//td[@class='time']/text()")
print tree.xpath(".//td[@class='currency']/text()")
print tree.xpath(".//td[@class='impact']/span/@title")
print tree.xpath(".//td[@class='event']/span/text()")
print tree.xpath(".//td[@class='actual']/text()")
print tree.xpath(".//td[@class='forecast']/text()")
print tree.xpath(".//td[@class='previous']/text()")
However I need to do this row by row. My first attempt to port to lxml doesn't work:
from lxml import html
import requests
page = requests.get('http://www.forexfactory.com/calendar.php?day=dec1.2011')
tree = html.fromstring(page.text)
for row in tree.css("div#flexBox_flex_calendar_mainCal table tr.calendar_row"):
print row.xpath(".//td[@class='time']/text()")
print row.xpath(".//td[@class='currency']/text()")
print row.xpath(".//td[@class='impact']/span/@title")
print row.xpath(".//td[@class='event']/span/text()")
print row.xpath(".//td[@class='actual']/text()")
print row.xpath(".//td[@class='forecast']/text()")
print row.xpath(".//td[@class='previous']/text()")
What is the correct way to port this scrapy code to pure lxml?
EDIT: I have gotten a little closer. I can see a table{}
object, I just don't know how to walk it.
import urllib2
from lxml import etree
#import requests
def wgetUrl(target):
try:
req = urllib2.Request(target)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3 Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
outtxt = response.read()
response.close()
except:
return ''
return outtxt
url = 'http://www.forexfactory.com/calendar.php?day='
date = 'dec1.2011'
data = wgetUrl(url + date)
parser = etree.HTMLParser()
tree = etree.fromstring(data, parser)
for elem in tree.xpath("//div[@id='flexBox_flex_calendar_mainCal']"):
print elem[0].tag, elem[0].attrib, elem[0].text
# elem[1] is where the table is
print elem[1].tag, elem[1].attrib, elem[1].text
print elem[1]
Upvotes: 4
Views: 1433
Reputation: 6070
I like to use lxml
for scraping. I usually do not use its xpath
functionality though and opt for their ElementPath
library instead. It is very similar in syntax. Below is how I would port your scrapy
code.
Going line by line:
initialization:
from lxml import etree
# analogous function xpath(.../text()).extract() for lxml etree nodes
def extract_text(elem):
if elem is None:
print None
else
return ''.join(i for i in elem.itertext())
data = wgetUrl(url+date) # wgetUrl, url, date you defined in your question
tree = etree.HTML(content)
line 1
# original
for row in response.css("div#flexBox_flex_calendar_mainCal table tr.calendar_row"):
# ported
for row in tree.findall(r'.//div[@id="flexBox_flex_calendar_mainCal"]//table/tr[@class="calendar_row"]'):
line 2
print "================"
line 3
# original
print row.xpath(".//td[@class='time']/text()").extract()
# ported
print extract_text(row.find(r'.//td[@class="time"]'))
line 4
# original
print row.xpath(".//td[@class='currency']/text()").extract()
# ported
print extract_text(row.find(r'.//td[@class="currency"]'))
line 5
# original
print row.xpath(".//td[@class='impact']/span/@title").extract()
# ported
td = row.find(r'.//td[@class="impact"]/span')
if td is not None and 'title' in td.attrib:
print td.attrib['title']
line 6
# original
print row.xpath(".//td[@class='event']/span/text()").extract()
# ported
print extract_text(row.find(r'.//td[@class="event"]/span'))
line 7
# original
print row.xpath(".//td[@class='actual']/text()").extract()
# ported
print extract_text(row.find(r'.//td[@class="actual"]'))
line 8
# original
print row.xpath(".//td[@class='forecast']/text()").extract()
# ported
print extract_text(row.find(r'.//td[@class="forecast"]'))
line 9
# original
print row.xpath(".//td[@class='previous']/text()").extract()
# ported
print extract_text(row.find(r'.//td[@class="previous"]'))
line 10
print "================"
And all together now:
from lxml import etree
def wgetUrl(target):
# same as you defined it
# analogous function xpath(.../text()).extract() for lxml etree nodes
def extract_text(elem):
if elem is None:
print None
else
return ''.join(i for i in elem.itertext())
content = wgetUrl(your_url) # wgetUrl as the function you defined in your question
node = etree.HTML(content)
for row in node.findall(r'.//div[@id="flexBox_flex_calendar_mainCal"]//table/tr[@class="calendar_row"]'):
print "================"
print extract_text(row.find(r'.//td[@class="time"]'))
print extract_text(row.find(r'.//td[@class="currency"]'))
td = row.find(r'.//td[@class="impact"]/span')
if td is not None and 'title' in td.attrib:
print td.attrib['title']
print extract_text(row.find(r'.//td[@class="event"]/span'))
print extract_text(row.find(r'.//td[@class="actual"]'))
print extract_text(row.find(r'.//td[@class="forecast"]'))
print extract_text(row.find(r'.//td[@class="previous"]'))
print "================"
Upvotes: 5