-
-
Notifications
You must be signed in to change notification settings - Fork 416
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Atualiza raspadores para Pau dos Ferros-RN e Paulínia-SP (#1159)
- Loading branch information
Showing
3 changed files
with
20 additions
and
55 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
11 changes: 11 additions & 0 deletions
11
data_collection/gazette/spiders/rn/rn_pau_dos_ferros_2022.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
from datetime import date | ||
|
||
from gazette.spiders.base.adiarios_v1 import BaseAdiariosV1Spider | ||
|
||
|
||
class RnPauDosFerrosSpider(BaseAdiariosV1Spider): | ||
TERRITORY_ID = "2409407" | ||
name = "rn_pau_dos_ferros_2022" | ||
allowed_domains = ["paudosferros.rn.gov.br"] | ||
BASE_URL = "https://www.paudosferros.rn.gov.br" | ||
start_date = date(2022, 9, 28) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,58 +1,11 @@ | ||
import datetime | ||
from datetime import date | ||
|
||
import scrapy | ||
from gazette.spiders.base.instar import BaseInstarSpider | ||
|
||
from gazette.items import Gazette | ||
from gazette.spiders.base import BaseGazetteSpider | ||
|
||
|
||
class SpPauliniaSpider(BaseGazetteSpider): | ||
name = "sp_paulinia" | ||
class SpPauliniaSpider(BaseInstarSpider): | ||
TERRITORY_ID = "3536505" | ||
start_date = datetime.date(2012, 1, 4) | ||
allowed_domains = ["www.paulinia.sp.gov.br"] | ||
start_urls = ["http://www.paulinia.sp.gov.br/semanarios"] | ||
|
||
def parse(self, response): | ||
years = response.css("div.col-md-1") | ||
|
||
for year in years: | ||
year_to_scrape = int(year.xpath("./a/text()").get()) | ||
|
||
if not (self.start_date.year <= year_to_scrape <= self.end_date.year): | ||
continue | ||
|
||
event_target = year.xpath("./a/@href").re_first(r"(ctl00.*?)',") | ||
|
||
yield scrapy.FormRequest.from_response( | ||
response, | ||
formdata={"__EVENTTARGET": event_target}, | ||
callback=self.parse_year, | ||
) | ||
|
||
yield from self.parse_year(response) | ||
|
||
def parse_year(self, response): | ||
editions = response.css("div.body-content div.row a[href*='AbreSemanario']") | ||
|
||
for edition in editions: | ||
title = edition.xpath("./text()") | ||
gazette_date = datetime.datetime.strptime( | ||
title.re_first(r"\d{2}/\d{2}/\d{4}"), | ||
"%d/%m/%Y", | ||
).date() | ||
|
||
if not (self.start_date <= gazette_date <= self.end_date): | ||
continue | ||
|
||
document_href = edition.xpath("./@href").get() | ||
edition_number = title.re_first(r"- (\d+) -") | ||
is_extra_edition = "extra" in title.get().lower() | ||
|
||
yield Gazette( | ||
date=gazette_date, | ||
edition_number=edition_number, | ||
file_urls=[response.urljoin(document_href)], | ||
is_extra_edition=is_extra_edition, | ||
power="executive", | ||
) | ||
name = "sp_paulinia" | ||
allowed_domains = ["paulinia.sp.gov.br"] | ||
base_url = "https://www.paulinia.sp.gov.br/portal/diario-oficial" | ||
start_date = date(2012, 1, 4) |