Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 22 additions & 0 deletions CPU-bound.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
from hashlib import md5
from random import choice
import concurrent.futures
from datetime import datetime

def gen_coin(zero):
while True:
s = "".join([choice("0123456789") for i in range(50)])
h = md5(s.encode('utf8')).hexdigest()
if h.endswith("0000"):
return f"{s} {h}"

def main():
with concurrent.futures.ProcessPoolExecutor(max_workers=100) as executor:
for coin in zip(executor.map(gen_coin, "0000")):
print(coin)

if __name__ == '__main__':
start = datetime.now()
main()
end = datetime.now()
print(end - start)
21 changes: 21 additions & 0 deletions IO-bound.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import urllib.request
import concurrent.futures

links = open('res.txt', encoding='utf8').read().split('\n')


def load_url(url, timeout):
with urllib.request.urlopen(url, timeout=timeout) as conn:
return conn.read()


with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:
future_to_url = {executor.submit(load_url, url, 5): url for url in links}
for future in concurrent.futures.as_completed(future_to_url):
url = future_to_url[future]
try:
data = future.result()
except Exception as exc:
print(url, exc)
else:
print(200)
18 changes: 18 additions & 0 deletions get_text.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
from urllib.request import urlopen
from urllib.parse import unquote
from bs4 import BeautifulSoup
from tqdm import tqdm

url = 'https://ru.wikipedia.org/wiki/%D0%A1%D0%BB%D1%83%D0%B6%D0%B5%D0%B1%D0%BD%D0%B0%D1%8F:%D0%A1%D0%BB%D1%83%D1%87%D0%B0%D0%B9%D0%BD%D0%B0%D1%8F_%D1%81%D1%82%D1%80%D0%B0%D0%BD%D0%B8%D1%86%D0%B0'

res = open('res.txt', 'w', encoding='utf8')

for i in tqdm(range(100)):
html = urlopen(url).read().decode('utf8')
soup = BeautifulSoup(html, 'html.parser')
links = soup.find_all('a')

for l in links:
href = l.get('href')
if href and href.startswith('http') and 'wiki' not in href:
print(href, file=res)
Loading