-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathauthors.py
193 lines (151 loc) · 5.21 KB
/
authors.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
#import scholar
#from SComplexity.
#!pip install -e +git https://github.com/ckreibich/scholar.py
import sys
from scholar_scrape import scholar
import pandas as pd
#sys.version_info[0] == 3:
unicode = str # pylint: disable-msg=W0622
encode = lambda s: unicode(s) # pylint: disable-msg=C0103 '''
def csv(querier, header=False, sep='|'):
articles = querier.articles
results = []
for art in articles:
result = art.as_csv(header=header, sep=sep)
results.append(result)
print(encode(result))
header = False
return results
from delver import Crawler
C = Crawler()
import requests
from SComplexity.crawl import collect_pubs
from SComplexity.scholar_scrape import scholar
import io
from delver import Crawler
C = Crawler()
import requests
import io
import selenium
from selenium import webdriver
from pyvirtualdisplay import Display
display = Display(visible=0, size=(1024, 768))
display.start()
#from StringIO import StringIO
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
import os
import sys, getopt
from io import StringIO
from selenium.webdriver.firefox.options import Options
import re
from bs4 import BeautifulSoup
import bs4 as bs
import urllib.request
from io import StringIO
import io
display = Display(visible=0, size=(1024, 768))
display.start()
from selenium.webdriver.firefox.options import Options
import re
from bs4 import BeautifulSoup
import PyPDF2
from PyPDF2 import PdfFileReader
import textract
def html_to_author(content,link):
soup = BeautifulSoup(content, 'html.parser')
page = urllib.request.urlopen(url)
soup = BeautifulSoup(page, 'lxml')
mydivs = soup.findAll("h3", { "class" : "gsc_1usr_name"})
outputFile = open('sample.csv', 'w', newline='')
outputWriter = csv.writer(outputFile)
for each in mydivs:
for anchor in each.find_all('a'):
print(anchor.text)
#strip HTML
'''
for script in soup(["script", "style"]):
script.extract() # rip it out
text = soup.get_text()
wt = copy.copy(text)
#organize text
lines = (line.strip() for line in text.splitlines()) # break into lines and remove leading and trailing space on each
chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) # break multi-headlines into a line each
text = '\n'.join(chunk for chunk in chunks if chunk) # drop blank lines
str_text = str(text)
'''
return str_text
def url_to_text(tuple_link):
index,link = tuple_link
buff = None
#se_b, page_rank, link, category, buff = link_tuple
if str('pdf') not in link:
if C.open(link) is not None:
content = C.open(link).content
#print(content)
content = requests.get(link, stream=True)
buff = html_to_author(content,link)
print(buff)
else:
print('problem')
else:
pass
#pdf_file = requests.get(link, stream=True)
#f = io.BytesIO(pdf_file.content)
#reader = PdfFileReader(f)
#buff = reader.getPage(0).extractText().split('\n')
import pdb; pdb.set_trace()
return buff
#@jit
'''
def buffer_to_pickle(link_tuple):
se_b, page_rank, link, category, buff = link_tuple
link_tuple = se_b, page_rank, link, category, buff
fname = 'results_dir/{0}_{1}_{2}.p'.format(category,se_b,page_rank)
if type(buff) is not None:
with open(fname,'wb') as f:
pickle.dump(link_tuple,f)
return
'''
def process(item):
text = url_to_text(item)
#buffer_to_pickle(text)
return
def search_author(author):
# from https://github.com/ckreibich/scholar.py/issues/80
querier = scholar.ScholarQuerier()
settings = scholar.ScholarSettings()
querier.apply_settings(settings)
query = scholar.SearchScholarQuery()
query.set_words(str('author:')+author)
querier.send_query(query)
#results0 = csv(querier)
#results1 = citation_export(querier)
links = [ a.attrs['url'][0] for a in querier.articles if a.attrs['url'][0] is not None ]
sp = scholar.ScholarArticleParser()
#sp._parse_article(links[0])
#[ process((index,l)) for index,l in enumerate(links) ]
import pdb
pdb.set_trace()
return links#, results1, links
links = search_author('R Gerkin')
print(links)
'''
NUM_LINKS but can't be bothered refactoring.
def search_scholar(get_links):
# from https://github.com/ckreibich/scholar.py/issues/80
se_,index,category,category,buff = get_links
querier = scholar.ScholarQuerier()
settings = scholar.ScholarSettings()
querier.apply_settings(settings)
query = scholar.SearchScholarQuery()
query.set_words(category)
querier.send_query(query)
links = [ a.attrs['url'][0] for a in querier.articles if a.attrs['url'][0] is not None ]
#links = query.get_url()
#print(links)
#if len(links) > NUM_LINKS: links = links[0:NUM_LINKS]
[ process((se_,index,l,category,buff)) for index,l in enumerate(links) ]
'''