-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathscraper.py
53 lines (42 loc) · 1.71 KB
/
scraper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
# -*- coding: utf-8 -*-
import scraperwiki
from datetime import datetime, timedelta
from pprint import pprint
from lxml.html import parse
import lxml.etree as etree
tree = parse("http://www.sorocaba.ufscar.br/ufscar/?cardapio").getroot()
menu = []
skip = True
for table in tree.cssselect('table'):
trs = table.cssselect('tr')
tds = table.cssselect('td')
if "Card" in tds[0].text_content():
if skip:
skip = False
continue
titulo = tds[0].text_content().split('-')[1].strip()
datas = [el.text_content().strip() for el in trs[2]]
for i, td in enumerate(trs[3].cssselect('td')):
ps = td.findall('p')
tipos = [e.text_content().encode('utf-8').replace('ç', 'c').replace('ã', 'a') for e in ps[::3]]
pratos = [e.text_content() for e in ps[1::3]]
norm = dict(zip(tipos, pratos))
dados = {
'periodo' : titulo,
'data' : datas[i],
}
dados.update(norm)
menu.append(dados)
for i, td in enumerate(trs[5].cssselect('td')):
ps = td.findall('p')
tipos = ['Arroz']
pratos = [ps[0].text_content()]
tipos += [e.text_content().encode('utf-8').replace('ç', 'c').replace('ã', 'a') + ' vegetariano'
for e in ps[2::3]]
pratos += [e.text_content() for e in ps[3::3]]
veg = dict(zip(tipos, pratos))
dados = next(dado for dado in menu if dado['periodo'] == titulo and dado['data'] == datas[i])
dados.update(veg)
for dados in menu:
scraperwiki.sqlite.save(unique_keys=['periodo', 'data'], data=dados)
pprint(sorted(menu, key=lambda x:x['data']))