Skip to content

Commit

Permalink
Transport metrics to track individual requests (#289)
Browse files Browse the repository at this point in the history
* Transport metrics to track individual requests

* Add to CHANGELOG
  • Loading branch information
Pliner authored Jan 9, 2025
1 parent 780b6f7 commit c2321a0
Show file tree
Hide file tree
Showing 3 changed files with 67 additions and 2 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

* [Increase metrics buckets precision](https://github.com/anna-money/aio-request/pull/287)
* [Expose methods to build requests](https://github.com/anna-money/aio-request/pull/288)
* [Transport metrics to track individual requests](https://github.com/anna-money/aio-request/pull/289)


## v0.2.0 (2025-01-09)
Expand Down
2 changes: 1 addition & 1 deletion aio_request/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

latency_histogram = prom.Histogram(
"aio_request_latency",
"Duration of HTTP client requests.",
"Duration of client requests.",
labelnames=(
"request_endpoint",
"request_method",
Expand Down
66 changes: 65 additions & 1 deletion aio_request/pipeline.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import abc
import asyncio
import collections.abc
import time

import multidict
import yarl
Expand All @@ -13,6 +14,62 @@
from .response_classifier import ResponseClassifier, ResponseVerdict
from .transport import Transport


try:
import prometheus_client as prom

latency_histogram = prom.Histogram(
"aio_request_transport_latency",
"Duration of transport requests.",
labelnames=(
"request_endpoint",
"request_method",
"request_path",
"response_status",
),
buckets=(
0.005,
0.01,
0.025,
0.05,
0.075,
0.1,
0.15,
0.2,
0.25,
0.3,
0.35,
0.4,
0.45,
0.5,
0.75,
1.0,
5.0,
10.0,
15.0,
20.0,
),
)

def capture_metrics(
*, endpoint: yarl.URL, request: Request, status: int, started_at: float
) -> None:
label_values = (
endpoint.human_repr(),
request.method,
request.url.path,
str(status),
)
elapsed = max(0.0, time.perf_counter() - started_at)
latency_histogram.labels(*label_values).observe(elapsed)

except ImportError:

def capture_metrics(
*, endpoint: yarl.URL, request: Request, status: int, started_at: float
) -> None:
pass

NextModuleFunc = collections.abc.Callable[
[yarl.URL, Request, Deadline, Priority], collections.abc.Awaitable[ClosableResponse]
]
Expand Down Expand Up @@ -113,7 +170,14 @@ async def execute(
enriched_request = await enriched_request
request = enriched_request # type: ignore

return await self.__transport.send(endpoint, request, deadline.timeout)
started_at = time.perf_counter()
try:
response = await self.__transport.send(endpoint, request, deadline.timeout)
capture_metrics(endpoint=endpoint, request=request, status=response.status, started_at=started_at)
return response
except asyncio.CancelledError:
capture_metrics(endpoint=endpoint, request=request, status=499, started_at=started_at)
raise


class CircuitBreakerModule(RequestModule):
Expand Down

0 comments on commit c2321a0

Please sign in to comment.