Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# file: ~/.gitignore_global
.DS_Store
.idea
src/*
tests/*
.idea/
venv
__pycache__/
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
# Hello world
This is repository for OTUS course.
# DDT in testing API
This is the fourth task from OTUS course.
Empty file added auto_list_of_curls/__init__.py
Empty file.
50 changes: 50 additions & 0 deletions auto_list_of_curls/parsing.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin


class ParsingPage:
def __init__(self, base_url):
self.base_url = base_url

def extract(self, pages, css_selector, base_url=None, attr="href"):
data = []
if base_url is None:
base_url = self.base_url
elements = pages.select(css_selector)
for element in elements:
if attr in element.attrs:
content = element[attr]
else:
content = element.text.strip() # Извлекаем текст элемента, если атрибут не указан

if base_url and content:
content = urljoin(base_url, content) # Присоединяем базовый URL к относительным ссылкам

data.append(content)
return data

@staticmethod
def create_page(link):
try:
response = requests.get(link)
response.raise_for_status()
return BeautifulSoup(response.text, 'html.parser')
except requests.exceptions.HTTPError as error:
print(f"Error: {error.response.status_code} - {error.response.reason}")
except requests.exceptions.RequestException as error:
print(f"Error: {error}")
return None

@staticmethod
def fetch_content_list(url):
try:
response = requests.get(url)
response.raise_for_status() # Проверяем, не вернул ли сервер ошибку
data = response.json() # Парсим ответ сервера из JSON
return data
except requests.exceptions.HTTPError as http_err:
print(f"HTTP error occurred: {http_err}") # HTTP ошибка (например, 404, 501, и т.д.)
except ValueError as json_err:
print(f"JSON parsing error occurred: {json_err}") # Ошибки парсинга JSON
return None
Empty file added dog_api/__init__.py
Empty file.
15 changes: 15 additions & 0 deletions dog_api/take_curls.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
from auto_list_of_curls.parsing import create_page, extract, fetch_content_list

HTML = create_page("https://dog.ceo/dog-api/documentation/")

data = []
urls = extract(HTML, 'ul.endpoints-list li a', base_url='https://dog.ceo')

for url in urls:
# Для каждой страницы извлекаем HTML и затем curl команды
page_html = create_page(url)
if page_html:
curls = extract(page_html, 'span.code', attr=None) # Извлекаем текст, не атрибут
data.append(curls)

lists_breeds = fetch_content_list('https://dog.ceo/api/breeds/list/all')
3 changes: 0 additions & 3 deletions main.py
Original file line number Diff line number Diff line change
@@ -1,3 +0,0 @@
print('Hello world!')


2 changes: 2 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
requests
beautifulsoup4