-
Notifications
You must be signed in to change notification settings - Fork 0
/
scraping.py
122 lines (94 loc) · 3.78 KB
/
scraping.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
# Import Splinter, BeautifulSoup, and Pandas
from splinter import Browser
from bs4 import BeautifulSoup as soup
import pandas as pd
import datetime as dt
from webdriver_manager.chrome import ChromeDriverManager
def scrape_all():
# Initiate headless driver for deployment
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=True)
news_title, news_paragraph = mars_news(browser)
# Run all scraping functions and store results in a dictionary
data = {
"news_title": news_title,
"news_paragraph": news_paragraph,
"featured_image": featured_image(browser),
"facts": mars_facts(),
"last_modified": dt.datetime.now(),
"hemisphere": mars_hemis(browser)
}
# Stop webdriver and return data
browser.quit()
return data
def mars_news(browser):
# Scrape Mars News
# Visit the mars nasa news site
url = 'https://data-class-mars.s3.amazonaws.com/Mars/index.html'
browser.visit(url)
# Optional delay for loading the page
browser.is_element_present_by_css('div.list_text', wait_time=1)
# Convert the browser html to a soup object and then quit the browser
html = browser.html
news_soup = soup(html, 'html.parser')
# Add try/except for error handling
try:
slide_elem = news_soup.select_one('div.list_text')
# Use the parent element to find the first 'a' tag and save it as 'news_title'
news_title = slide_elem.find('div', class_='content_title').get_text()
# Use the parent element to find the paragraph text
news_p = slide_elem.find('div', class_='article_teaser_body').get_text()
except AttributeError:
return None, None
return news_title, news_p
def featured_image(browser):
# Visit URL
url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html'
browser.visit(url)
# Find and click the full image button
full_image_elem = browser.find_by_tag('button')[1]
full_image_elem.click()
# Parse the resulting html with soup
html = browser.html
img_soup = soup(html, 'html.parser')
# Add try/except for error handling
try:
# Find the relative image url
img_url_rel = img_soup.find('img', class_='fancybox-image').get('src')
except AttributeError:
return None
# Use the base url to create an absolute url
img_url = f'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/{img_url_rel}'
return img_url
def mars_facts():
# Add try/except for error handling
try:
# Use 'read_html' to scrape the facts table into a dataframe
df = pd.read_html('https://data-class-mars-facts.s3.amazonaws.com/Mars_Facts/index.html')[0]
return df.to_html()
except BaseException:
return None
def mars_hemis(browser):
url = 'https://marshemispheres.com/'
browser.visit(url)
hemisphere_image_urls = []
for hemis in range(4):
browser.links.find_by_partial_text('Hemisphere')[hemis].click()
html = browser.html
hemi_soup = soup(html, 'html.parser')
title = hemi_soup.find('h2', class_='title').text
img_url = hemi_soup.find('li').a.get('href')
hemispheres = {}
hemispheres['img_url'] = f'https://marshemispheres.com/{img_url}'
hemispheres['title'] = title
hemisphere_image_urls.append(hemispheres)
browser.back()
return hemisphere_image_urls
# Assign columns and set index of dataframe
df.columns=['Description', 'Mars', 'Earth']
df.set_index('Description', inplace=True)
# Convert dataframe into HTML format, add bootstrap
return df.to_html(classes="table table-striped")
if __name__ == "__main__":
# If running as script, print scraped data
print(scrape_all())