diff --git a/node/flatpak_node_generator/main.py b/node/flatpak_node_generator/main.py index f5e831c1..b6183453 100644 --- a/node/flatpak_node_generator/main.py +++ b/node/flatpak_node_generator/main.py @@ -1,11 +1,16 @@ from pathlib import Path -from typing import Iterator, List, Set +from typing import Any, ContextManager, Iterator, List, Optional, Set import argparse import asyncio +import contextlib import json import os import sys +import time + +from rich.console import Console, Group, RenderableType +from rich.live import Live from .cache import Cache, FilesystemBasedCache from .manifest import ManifestGenerator @@ -18,6 +23,8 @@ from .providers.yarn import YarnProviderFactory from .requests import Requests, StubRequests +_CONSOLE_REFRESH_PER_SECOND = 12.5 + def _scan_for_lockfiles(base: Path, patterns: List[str]) -> Iterator[Path]: for root, _, files in os.walk(base.parent): @@ -52,6 +59,11 @@ async def _async_main() -> None: action='append', help='Given -r, restrict files to those matching the given pattern.', ) + parser.add_argument( + '--no-live-progress', + action='store_true', + help='Disable live progress output', + ) parser.add_argument( '--registry', help='The registry to use (npm only)', @@ -135,13 +147,20 @@ async def _async_main() -> None: dest='xdg_layout', help="Don't use the XDG layout for caches", ) - # Internal option, useful for testing. + # Internal options, useful for testing. parser.add_argument('--stub-requests', action='store_true', help=argparse.SUPPRESS) + parser.add_argument( + '--traceback-on-interrupt', + action='store_true', + help=argparse.SUPPRESS, + ) args = parser.parse_args() Requests.retries = args.retries + console = Console() if not args.no_live_progress and sys.stdout.isatty else None + if args.type == 'yarn' and (args.no_devel or args.no_autopatch): sys.exit('--no-devel and --no-autopatch do not apply to Yarn.') @@ -187,6 +206,8 @@ async def _async_main() -> None: else: assert False, args.type + start_time = time.monotonic() + print('Reading packages from lockfiles...') packages: Set[Package] = set() rcfile_node_headers: Set[NodeHeaders] = set() @@ -220,16 +241,45 @@ async def _async_main() -> None: ) special = SpecialSourceProvider(gen, options) - with provider_factory.create_module_provider(gen, special) as module_provider: - with GeneratorProgress( + live: ContextManager[Any] + if console is not None: + requests_renderable = Requests.instance.get_renderable(console) + generator_renderable: Optional[RenderableType] = None + + def get_renderable() -> RenderableType: + if generator_renderable is not None: + return Group( + requests_renderable, + generator_renderable, + ) + else: + return requests_renderable + + live = Live( + get_renderable=get_renderable, + refresh_per_second=_CONSOLE_REFRESH_PER_SECOND, + console=console, + ) + else: + live = contextlib.nullcontext() + + with live: + with provider_factory.create_module_provider( + gen, special + ) as module_provider, GeneratorProgress( packages, module_provider, - args.max_parallel, + max_parallel=args.max_parallel, + traceback_on_interrupt=args.traceback_on_interrupt, ) as progress: + if console is not None: + generator_renderable = progress.get_renderable(console) + await progress.run() - for headers in rcfile_node_headers: - print(f'Generating headers {headers.runtime} @ {headers.target}') - await special.generate_node_headers(headers) + + for headers in rcfile_node_headers: + print(f'Generating headers {headers.runtime} @ {headers.target}...') + await special.generate_node_headers(headers) if args.xdg_layout: script_name = 'setup_sdk_node_headers.sh' @@ -246,6 +296,8 @@ async def _async_main() -> None: ) gen.add_command(f'bash {gen.data_root / script_name}') + elapsed = round(time.monotonic() - start_time, 1) + if args.split: i = 0 for i, part in enumerate(gen.split_sources()): @@ -254,7 +306,7 @@ async def _async_main() -> None: with open(output, 'w') as fp: json.dump(part, fp, indent=ManifestGenerator.JSON_INDENT) - print(f'Wrote {gen.source_count} to {i + 1} file(s).') + print(f'Wrote {gen.source_count} to {i + 1} file(s) in {elapsed} second(s).') else: with open(args.output, 'w') as fp: json.dump( @@ -270,7 +322,7 @@ async def _async_main() -> None: ) print(' (Pass -s to enable splitting.)') - print(f'Wrote {gen.source_count} source(s).') + print(f'Wrote {gen.source_count} source(s) in {elapsed} second(s).') def main() -> None: diff --git a/node/flatpak_node_generator/progress.py b/node/flatpak_node_generator/progress.py index 194df892..46acea77 100644 --- a/node/flatpak_node_generator/progress.py +++ b/node/flatpak_node_generator/progress.py @@ -1,27 +1,96 @@ -from typing import Collection, ContextManager, Optional, Type +from dataclasses import dataclass +from typing import Collection, ContextManager, Optional, Set, Type import asyncio import shutil import sys +import traceback import types +from rich.console import ( + Console, + ConsoleOptions, + ConsoleRenderable, + RenderableType, + RenderResult, +) +from rich.measure import Measurement +from rich.segment import Segment +from rich.status import Status + from .package import Package from .providers import ModuleProvider +def _generating_packages(finished: int, total: int) -> str: + return f'Generating packages [{finished}/{total}]' + + +class _GeneratingPackagesRenderable(ConsoleRenderable): + def __init__(self, finished: int, total: int, processing: Set[Package]) -> None: + self.generating_string = _generating_packages(finished, total) + self.processing = processing + + def __rich_measure__( + self, console: Console, options: ConsoleOptions + ) -> Measurement: + return Measurement(0, options.max_width) + + def __rich_console__( + self, console: Console, options: ConsoleOptions + ) -> RenderResult: + ARROW = ' => ' + ELLIPSES = '...' + SEPARATOR = ', ' + + yield Segment(self.generating_string) + space_remaining = options.max_width - len(self.generating_string) + + generating_string_width = len(self.generating_string) + if space_remaining < len(ELLIPSES): + return + elif options.max_width < len(ELLIPSES) + len(ARROW): + return ELLIPSES + + packages = sorted( + f'{package.name} @ {package.version}' for package in self.processing + ) + + yield Segment(ARROW) + space_remaining -= len(ARROW) + len(ELLIPSES) + + for i, package in enumerate(packages): + if i: + package = SEPARATOR + package + if len(package) > space_remaining: + break + + yield Segment(package) + space_remaining -= len(package) + + yield Segment(ELLIPSES) + + class GeneratorProgress(ContextManager['GeneratorProgress']): def __init__( self, packages: Collection[Package], module_provider: ModuleProvider, + *, max_parallel: int, + traceback_on_interrupt: bool, ) -> None: self.finished = 0 + self.processing: Set[Package] = set() self.packages = packages self.module_provider = module_provider self.parallel_limit = asyncio.Semaphore(max_parallel) - self.previous_package: Optional[Package] = None - self.current_package: Optional[Package] = None + self.traceback_on_interrupt = traceback_on_interrupt + self.status: Optional[Status] = None + + @property + def _total(self) -> int: + return len(self.packages) def __exit__( self, @@ -29,45 +98,56 @@ def __exit__( exc_value: Optional[BaseException], tb: Optional[types.TracebackType], ) -> None: - print() - - def _format_package(self, package: Package, max_width: int) -> str: - result = f'{package.name} @ {package.version}' - - if len(result) > max_width: - result = result[: max_width - 3] + '...' - - return result + line = f'Generated {self._total} package(s).' + if self.status is not None: + self.status.update(line) + self.status.stop() + else: + print(line) def _update(self) -> None: - columns, _ = shutil.get_terminal_size() - - sys.stdout.write('\r' + ' ' * columns) - - prefix_string = f'\rGenerating packages [{self.finished}/{len(self.packages)}] ' - sys.stdout.write(prefix_string) - max_package_width = columns - len(prefix_string) - - if self.current_package is not None: - sys.stdout.write( - self._format_package(self.current_package, max_package_width) - ) - - sys.stdout.flush() - - def _update_with_package(self, package: Package) -> None: - self.previous_package, self.current_package = ( - self.current_package, - package, + if self.status is None: + # No TTY. Only print an update on multiples of 10 to avoid spamming + # the console. + if self.finished % 10 == 0 or self.finished == self._total: + print( + f'{_generating_packages(self.finished, self._total)}...', + flush=True, + ) + return + + self.status.update( + _GeneratingPackagesRenderable(self.finished, self._total, self.processing) ) - self._update() async def _generate(self, package: Package) -> None: async with self.parallel_limit: - self._update_with_package(package) - await self.module_provider.generate_package(package) + self.processing.add(package) + # Don't bother printing an update here without live progress, since + # then the currently processing packages won't appear anyway. + if self.status is not None: + self._update() + + try: + await self.module_provider.generate_package(package) + except asyncio.CancelledError: + if self.traceback_on_interrupt: + print(f'========== {package.name} ==========', file=sys.stderr) + traceback.print_exc() + print(file=sys.stderr) + raise + self.finished += 1 - self._update_with_package(package) + self.processing.remove(package) + self._update() + + def get_renderable(self, console: Console) -> RenderableType: + if self.status is not None: + assert self.status.console is console + else: + self.status = Status('', console=console) + + return self.status async def run(self) -> None: self._update() diff --git a/node/flatpak_node_generator/requests.py b/node/flatpak_node_generator/requests.py index f280ab96..e7c5d0b8 100644 --- a/node/flatpak_node_generator/requests.py +++ b/node/flatpak_node_generator/requests.py @@ -1,6 +1,11 @@ -from typing import AsyncIterator, ClassVar +from types import TracebackType +from typing import AsyncIterator, ClassVar, ContextManager, Optional, Tuple, Type import contextlib +import os + +from rich.console import Console, RenderableType +from rich.progress import Progress, TaskID import aiohttp @@ -9,20 +14,89 @@ DEFAULT_PART_SIZE = 4096 +def _format_bytes_as_mb(n: int) -> str: + return f'{n/1024/1024:.2f} MiB' + + +class _ResponseStream(ContextManager['_ResponseStream']): + _MINIMUM_SIZE_FOR_LIVE_PROGRESS = 1 * 1024 * 1024 + + def __init__( + self, + response: aiohttp.ClientResponse, + progress: Optional[Progress], + ) -> None: + self._response = response + + self._task: Optional[TaskID] = None + self._read = 0 + self._total = 0 + self._progress_task: Optional[Tuple[Progress, TaskID]] = None + if ( + progress is not None + and response.content_length is not None + and response.content_length > self._MINIMUM_SIZE_FOR_LIVE_PROGRESS + ): + self._total = response.content_length + task = progress.add_task('', total=self._total, start=False) + self._progress_task = (progress, task) + self._update_progress() + progress.start_task(task) + + def __enter__(self) -> '_ResponseStream': + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_value: Optional[BaseException], + traceback: Optional[TracebackType], + ) -> None: + if self._progress_task is not None: + progress, task = self._progress_task + progress.remove_task(task) + + def _update_progress(self) -> None: + if self._progress_task is None: + return + + assert self._total + + progress, task = self._progress_task + mb_read = _format_bytes_as_mb(self._read) + mb_total = _format_bytes_as_mb(self._total) + progress.update( + task, + completed=self._read, + description=f'{os.path.basename(self._response.url.path)} [{mb_read}/{mb_total}]', + ) + + async def read(self, n: int = -1) -> bytes: + data = await self._response.content.read(n) + self._read += len(data) + self._update_progress() + + return data + + class Requests: instance: 'Requests' DEFAULT_RETRIES = 5 retries: ClassVar[int] = DEFAULT_RETRIES + def __init__(self) -> None: + self.progress: Optional[Progress] = None + def __get_cache_bucket(self, cachable: bool, url: str) -> Cache.BucketRef: return Cache.get_working_instance_if(cachable).get(f'requests:{url}') @contextlib.asynccontextmanager - async def _open_stream(self, url: str) -> AsyncIterator[aiohttp.StreamReader]: + async def _open_stream(self, url: str) -> AsyncIterator[_ResponseStream]: async with aiohttp.ClientSession(raise_for_status=True) as session: async with session.get(url) as response: - yield response.content + with _ResponseStream(response, self.progress) as stream: + yield stream async def _read_parts( self, url: str, size: int = DEFAULT_PART_SIZE @@ -82,6 +156,14 @@ async def read_all(self, url: str, *, cachable: bool = False) -> bytes: assert False + def get_renderable(self, console: Console) -> RenderableType: + if self.progress is not None: + assert self.progress.console is console + else: + self.progress = Progress(console=console) + + return self.progress + class StubRequests(Requests): async def _read_parts( diff --git a/node/poetry.lock b/node/poetry.lock index 9e49bc53..8b499939 100644 --- a/node/poetry.lock +++ b/node/poetry.lock @@ -104,7 +104,7 @@ flake8 = ">=3.8,<5.0.0" type = "git" url = "https://github.com/grantjenks/blue" reference = "HEAD" -resolved_reference = "d818dd6417fb1985dba31be9aa16e85f0d814801" +resolved_reference = "0e9f225963754cbc29449d9d220ac89b1026b0bc" [[package]] name = "charset-normalizer" @@ -137,6 +137,17 @@ category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +[[package]] +name = "commonmark" +version = "0.9.1" +description = "Python parser for the CommonMark Markdown spec" +category = "main" +optional = false +python-versions = "*" + +[package.extras] +test = ["flake8 (==3.7.8)", "hypothesis (==3.55.3)"] + [[package]] name = "execnet" version = "1.9.0" @@ -360,6 +371,17 @@ category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +[[package]] +name = "Pygments" +version = "2.13.0" +description = "Pygments is a syntax highlighting package written in Python." +category = "main" +optional = false +python-versions = ">=3.6" + +[package.extras] +plugins = ["importlib-metadata"] + [[package]] name = "pyparsing" version = "3.0.9" @@ -459,6 +481,22 @@ psutil = ["psutil (>=3.0)"] setproctitle = ["setproctitle"] testing = ["filelock"] +[[package]] +name = "rich" +version = "12.6.0" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +category = "main" +optional = false +python-versions = ">=3.6.3,<4.0.0" + +[package.dependencies] +commonmark = ">=0.9.0,<0.10.0" +pygments = ">=2.6.0,<3.0.0" +typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"] + [[package]] name = "tomli" version = "2.0.1" @@ -525,7 +563,7 @@ testing = ["func-timeout", "jaraco.itertools", "pytest (>=6)", "pytest-black (>= [metadata] lock-version = "1.1" python-versions = "^3.7" -content-hash = "179bc4857ff39cc5e6eeb8e367ce5f42688bc0d884e07717bfed13b24bbc42a5" +content-hash = "5e1ec7ea533641ba6417a8de7b4267b68ec12d0b9642edd3689f102f8f3d0aaa" [metadata.files] aiohttp = [ @@ -671,6 +709,10 @@ colorama = [ {file = "colorama-0.4.5-py2.py3-none-any.whl", hash = "sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da"}, {file = "colorama-0.4.5.tar.gz", hash = "sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4"}, ] +commonmark = [ + {file = "commonmark-0.9.1-py2.py3-none-any.whl", hash = "sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9"}, + {file = "commonmark-0.9.1.tar.gz", hash = "sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60"}, +] execnet = [ {file = "execnet-1.9.0-py2.py3-none-any.whl", hash = "sha256:a295f7cc774947aac58dde7fdc85f4aa00c42adf5d8f5468fc630c1acf30a142"}, {file = "execnet-1.9.0.tar.gz", hash = "sha256:8f694f3ba9cc92cab508b152dcfe322153975c29bda272e2fd7f3f00f36e47c5"}, @@ -928,6 +970,10 @@ pyflakes = [ {file = "pyflakes-2.4.0-py2.py3-none-any.whl", hash = "sha256:3bb3a3f256f4b7968c9c788781e4ff07dce46bdf12339dcda61053375426ee2e"}, {file = "pyflakes-2.4.0.tar.gz", hash = "sha256:05a85c2872edf37a4ed30b0cce2f6093e1d0581f8c19d7393122da7e25b2b24c"}, ] +Pygments = [ + {file = "Pygments-2.13.0-py3-none-any.whl", hash = "sha256:f643f331ab57ba3c9d89212ee4a2dabc6e94f117cf4eefde99a0574720d14c42"}, + {file = "Pygments-2.13.0.tar.gz", hash = "sha256:56a8508ae95f98e2b9bdf93a6be5ae3f7d8af858b43e02c5a2ff083726be40c1"}, +] pyparsing = [ {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"}, {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"}, @@ -957,6 +1003,10 @@ pytest-xdist = [ {file = "pytest-xdist-2.5.0.tar.gz", hash = "sha256:4580deca3ff04ddb2ac53eba39d76cb5dd5edeac050cb6fbc768b0dd712b4edf"}, {file = "pytest_xdist-2.5.0-py3-none-any.whl", hash = "sha256:6fe5c74fec98906deb8f2d2b616b5c782022744978e7bd4695d39c8f42d0ce65"}, ] +rich = [ + {file = "rich-12.6.0-py3-none-any.whl", hash = "sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e"}, + {file = "rich-12.6.0.tar.gz", hash = "sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0"}, +] tomli = [ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, diff --git a/node/pyproject.toml b/node/pyproject.toml index 0d185663..3371fd11 100644 --- a/node/pyproject.toml +++ b/node/pyproject.toml @@ -7,6 +7,7 @@ authors = ["Ryan Gonzalez ", "Filippe LeMarchand