-
Notifications
You must be signed in to change notification settings - Fork 0
/
scraper.py
82 lines (64 loc) · 2.85 KB
/
scraper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import requests
from bs4 import BeautifulSoup
import pandas as pd
headers = {"Accept-Language": "en-US, en;q=0.5", 'user-agent': 'xasd'}
url = "https://www.metacritic.com/browse/games/score/metascore/all/all/filtered?page=0"
result = requests.get(url, headers=headers)
soup = BeautifulSoup(result.text, "html.parser")
num_pages = int(soup.find('li', class_='page last_page').a.text)
def scraper(pages, head):
d = pd.DataFrame(columns=[
'name_game',
'meta_score',
'user_score',
'platform',
'release_date',
'description']
)
for num_page in range(0, pages):
urls = "https://www.metacritic.com/browse/games/score/metascore/all/all/filtered?page=" + str(num_page)
results = requests.get(urls, headers=head)
soups = BeautifulSoup(results.text, "html.parser")
data = soups.find_all('td', class_='clamp-summary-wrap')
name = []
meta_score = []
user_score = []
platform = []
release_date = []
description = []
for x in data:
# print(num, x.find('a', class_='title').h3.text) names passed
title = x.find('a', class_='title').h3.text
name.append(title)
# print(num, x.find('div', class_='metascore_w large game positive').text) metascore passed
score = x.find('a', class_='metascore_anchor').div.text
meta_score.append(score)
# userscore = x.find_all('a', class_='metascore_anchor') userscore passed
# print(num, userscore =[2].div.text)
user = x.find_all('a', class_='metascore_anchor')[2].div.text
user_score.append(user)
# print(num, x.find('span', class_='data').text.strip()) platform passed
plat = x.find('span', class_='data').text.strip()
platform.append(plat)
# data = x.find('div', class_='clamp-details').find_all('span') data passed
# print(num, data[2].text)
data = x.find('div', class_='clamp-details').find_all('span')
release_date.append(data[2].text)
# print(num, x.find('div', class_='summary').text.strip()) description passed
text = x.find('div', class_='summary').text.strip()
description.append(text)
final_data = pd.DataFrame({
'name_game': name,
'meta_score': meta_score,
'user_score': user_score,
'platform': platform,
'release_date': release_date,
'description': description,
})
d = d.append(final_data, ignore_index=True)
return d
# print(scraper(num_pages, headers))
games = scraper(num_pages, headers)
games.to_csv('datasets/dataset_games.csv')
# games = scraper(1, headers).reset_index(drop=True, inplace=False)
# games.to_csv('test_games.csv', index=False)