Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixes #472 #474 #476 Add News search in Baidu, Parsijoo & Mojeek #486

Merged
merged 1 commit into from
Jan 29, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions app/scrapers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,10 @@ def feed_gen(query, engine, count=10, qtype=''):
engine = old_names.get(engine, engine)
if engine in ('quora', 'youtube'):
urls = scrapers[engine].search_without_count(query)
elif engine in ('baidu', 'parsijoo') and qtype == 'news':
urls = scrapers[engine].news_search(query, count, qtype)
elif engine == 'mojeek' and qtype == 'news':
urls = scrapers[engine].news_search_without_count(query)
elif engine in ('bing', 'parsijoo') and qtype == 'vid':
urls = scrapers[engine].video_search_without_count(query)
elif engine in ('bing', 'parsijoo') and qtype == 'isch':
Expand Down
17 changes: 17 additions & 0 deletions app/scrapers/baidu.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ class Baidu(Scraper):
def __init__(self):
Scraper.__init__(self)
self.url = 'https://www.baidu.com/s'
self.newsURL = 'http://news.baidu.com/ns'
self.defaultStart = 0
self.queryKey = 'wd'
self.startKey = 'pn'
Expand All @@ -28,3 +29,19 @@ def parse_response(soup):
print('Baidu parsed: ' + str(urls))

return urls

@staticmethod
def parse_news_response(soup):
""" Parse the response and return set of urls
Returns: urls (list)
[[Tile1,url1], [Title2, url2],..]
"""
urls = []
for h3 in soup.findAll('h3', {'class': 'c-title'}):
title = h3.a.getText()
link = h3.a.get('href')
urls.append({'title': title, 'link': link})

print('Baidu parsed: ' + str(urls))

return urls
50 changes: 50 additions & 0 deletions app/scrapers/generalized.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,21 @@ def get_page(self, query, startIndex=0, qtype=''):
url = self.imageURL
else:
url = self.url
elif qtype == 'news':
if self.name == 'baidu':
url = self.newsURL
payload = {'word': query, self.startKey: startIndex}
response = requests.get(
url, headers=self.headers, params=payload
)
return response
elif self.name == 'parsijoo':
url = self.newsURL
payload = {self.queryKey: query, 'page': startIndex}
response = requests.get(
url, headers=self.headers, params=payload
)
return response
payload = {self.queryKey: query, self.startKey: startIndex,
self.qtype: qtype}
response = requests.get(url, headers=self.headers, params=payload)
Expand Down Expand Up @@ -163,3 +178,38 @@ def image_search_without_count(self, query):
soup = BeautifulSoup(response.text, 'html.parser')
urls = self.parse_image_response(soup)
return urls

def news_search(self, query, num_results, qtype=''):
"""
Search for the query and return set of urls
Returns: list
"""
urls = []
if self.name == 'parsijoo':
current_start = self.newsStart
else:
current_start = self.defaultStart

while (len(urls) < num_results):
response = self.get_page(query, current_start, qtype)
soup = BeautifulSoup(response.text, 'html.parser')
new_results = self.parse_news_response(soup)
if new_results is None:
break
urls.extend(new_results)
current_start = self.next_start(current_start, new_results)
return urls[: num_results]

def news_search_without_count(self, query):
"""
Search for the query and return set of urls
Returns: list
"""
urls = []
if self.name == 'mojeek':
url = self.newsURL
payload = {self.queryKey: query, 'fmt': 'news'}
response = requests.get(url, headers=self.headers, params=payload)
soup = BeautifulSoup(response.text, 'html.parser')
urls = self.parse_news_response(soup)
return urls
21 changes: 21 additions & 0 deletions app/scrapers/mojeek.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ class Mojeek(Scraper):
def __init__(self):
Scraper.__init__(self)
self.url = 'https://www.mojeek.co.uk/search'
self.newsURL = 'https://www.mojeek.co.uk/search'
self.defaultStart = 1
self.startKey = 's'
self.name = 'mojeek'
Expand All @@ -27,3 +28,23 @@ def parse_response(soup):
print('Mojeek parsed: ' + str(urls))

return urls

@staticmethod
def parse_news_response(soup):
""" Parse response and returns the urls

Returns: urls (list)
[[url1], [url2], ...]
"""
urls = []
for a in soup.findAll('a', attrs={'class': 'ob'}):
title = a.getText()
url = a.get('href')
urls.append({
'title': title,
'link': url
})

print('Mojeek parsed: ' + str(urls))

return urls
22 changes: 22 additions & 0 deletions app/scrapers/parsijoo.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
from __future__ import print_function
from .generalized import Scraper
try:
from urllib.parse import unquote # Python 3
except ImportError:
from urllib import unquote # Python 2


class Parsijoo(Scraper):
Expand All @@ -10,7 +14,9 @@ def __init__(self):
self.url = 'https://parsijoo.ir/web'
self.imageURL = 'https://image.parsijoo.ir/image'
self.videoURL = 'https://video.parsijoo.ir/video'
self.newsURL = 'http://khabar.parsijoo.ir/search/'
self.defaultStart = 0
self.newsStart = 1
self.startKey = 'co'
self.name = 'parsijoo'

Expand Down Expand Up @@ -71,3 +77,19 @@ def parse_image_response(soup):
print('Parsijoo parsed: ' + str(urls))

return urls

@staticmethod
def parse_news_response(soup):
""" Parse the response and return set of urls
Returns: urls (list)
[[Tile1,url1], [Title2, url2],..]
"""
urls = []
for div in soup.findAll('div', {'class': 'news-title-link'}):
title = div.a.getText()
link = unquote(div.a.get('href'))
urls.append({'title': title, 'link': link})

print('Parsijoo parsed: ' + str(urls))

return urls
12 changes: 7 additions & 5 deletions app/templates/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -98,14 +98,16 @@ <h1><code>query-server</code></h1>
<div id="type" class="btn-group btn-group-vertical" style="display:inline-flex;padding:0; margin: 0 auto;" data-toggle="buttons">
<label class=" active typeButton" style="padding:10px;">General<br/>
<input type="radio" name = "stype" value="" autocomplete="off" checked>
</label>
</label>
<label class=" typeButton" style="padding:10px;">Images<br/>
<input type="radio" name = "stype" value="isch" autocomplete="off">
</label>
<label class=" typeButton" style="padding:10px;">
Video<br/>
</label>
<label class=" typeButton" style="padding:10px;">Video<br/>
<input type="radio" name = "stype" value="vid" autocomplete="off">
</label>
</label>
<label class=" typeButton" style="padding:10px;">News<br/>
<input type="radio" name = "stype" value="news" autocomplete="off">
</label>
</div>
</div>
<div class="col-sm-2 col-xs-6">
Expand Down
13 changes: 13 additions & 0 deletions test/test_baidu.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,3 +14,16 @@ def test_parse_response():
'link': u'mock_url'
}]
assert resp == expected_resp


def test_parse_news_response():
html_text = """<h3 class="c-title">
<a href="mock_url" target="_blank">mock_title</a>
</h3>"""
dummy_soup = BeautifulSoup(html_text, 'html.parser')
resp = Baidu().parse_news_response(dummy_soup)
expected_resp = [{
'title': u'mock_title',
'link': u'mock_url'
}]
assert resp == expected_resp
11 changes: 11 additions & 0 deletions test/test_mojeek.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,14 @@ def test_parse_response():
}]
resp = Mojeek().parse_response(dummy_soup)
assert resp == expected_resp


def test_parse_news_response():
html_text = '<a href="mock_url" class="ob">mock_title</a>'
dummy_soup = BeautifulSoup(html_text, 'html.parser')
expected_resp = [{
'title': u'mock_title',
'link': u'mock_url'
}]
resp = Mojeek().parse_news_response(dummy_soup)
assert resp == expected_resp
13 changes: 13 additions & 0 deletions test/test_parsijoo.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,3 +45,16 @@ def test_parse_image_response():
}]
resp = Parsijoo().parse_image_response(dummy_soup)
assert resp == expected_resp


def test_parse_news_response():
html_text = """<div class="news-title-link">
<a href="mock_url">mock_title</a>
</div>"""
dummy_soup = BeautifulSoup(html_text, 'html.parser')
expected_resp = [{
'title': u'mock_title',
'link': u'mock_url'
}]
resp = Parsijoo().parse_news_response(dummy_soup)
assert resp == expected_resp