-
Notifications
You must be signed in to change notification settings - Fork 0
/
crawl_search.py
52 lines (42 loc) · 1.4 KB
/
crawl_search.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import requests
def crawl_search(keyword, num):
url = 'https://www.youtube.com/results?search_query='+keyword
yt = requests.get(url).text
url_pre_text = '"commandMetadata":{"webCommandMetadata":{"url":"/watch'
title_pre_text = '"title":{"runs":[{"text":"'
info_pre_text = '"}],"accessibility":{"accessibilityData":{"label":"'
urls = []
titles = []
ytdotcom = 'https://youtube.com'
idx = 0
while True:
yt = yt[idx:]
if info_pre_text in yt:
idx = yt.find(info_pre_text)
titles.append(yt[idx+len(info_pre_text):idx+len(info_pre_text)+yt.find('\"')])
if url_pre_text in yt:
idx = yt.find(url_pre_text)
end = 68#yt[idx:].find('\\')
element = yt[idx+48:idx+end]
idx += 1
urls.append(ytdotcom+element)
# print(urls[-1])
else:
break
else:
break
urls = list(set(urls))[:num]
titles = list(set(titles))[:num]
print(len(titles))
results = []
for idx, e in enumerate(zip(titles, urls)):
info = {}
info['idx'] = idx+1
info['title'] = e[0]
info['uploader'] = ''
info['url'] = e[1]
info['thumbnail_url'] = ''
info['video_id'] = e[1].split('=')[-1]
results.append(info)
return results
# print(crawl_search('한로로', 5))