diff --git a/.devcontainer/conda-forge/devcontainer.json b/.devcontainer/conda-forge/devcontainer.json index 6e175228..d8493aed 100644 --- a/.devcontainer/conda-forge/devcontainer.json +++ b/.devcontainer/conda-forge/devcontainer.json @@ -3,6 +3,9 @@ "name": "Miniforge (default-channel=conda-forge)", "image": "condaforge/miniforge3:latest", + // Uncomment to force x64 instead of native (slower!) + // "runArgs": ["--platform=linux/amd64"], + // Features to add to the dev container. More info: https://containers.dev/features. // "features": {}, @@ -31,7 +34,8 @@ "extensions": [ "charliermarsh.ruff", "eamodio.gitlens", - "ms-toolsai.jupyter" + "ms-toolsai.jupyter", + "be5invis.toml" ] } } diff --git a/.devcontainer/defaults/devcontainer.json b/.devcontainer/defaults/devcontainer.json index 26ec392d..4c3a9383 100644 --- a/.devcontainer/defaults/devcontainer.json +++ b/.devcontainer/defaults/devcontainer.json @@ -3,6 +3,9 @@ "name": "Miniconda (default-channel=defaults)", "image": "continuumio/miniconda3:latest", + // Uncomment to force x64 instead of native (slower!) + // "runArgs": ["--platform=linux/amd64"], + // Features to add to the dev container. More info: https://containers.dev/features. // "features": {}, @@ -31,7 +34,8 @@ "extensions": [ "charliermarsh.ruff", "eamodio.gitlens", - "ms-toolsai.jupyter" + "ms-toolsai.jupyter", + "be5invis.toml" ] } } diff --git a/.devcontainer/post_create.sh b/.devcontainer/post_create.sh index 33fe3079..b90a1d79 100644 --- a/.devcontainer/post_create.sh +++ b/.devcontainer/post_create.sh @@ -38,4 +38,5 @@ echo "Installing dev & test dependencies..." --file="$SRC_CONDA/tests/requirements-Linux.txt" \ --file="$SRC_CONDA/tests/requirements-s3.txt" \ --file="$SRC_CONDA_LIBMAMBA_SOLVER/dev/requirements.txt" \ - --file="$SRC_CONDA_LIBMAMBA_SOLVER/tests/requirements.txt" + --file="$SRC_CONDA_LIBMAMBA_SOLVER/tests/requirements.txt"\ + pre-commit diff --git a/.devcontainer/post_start.sh b/.devcontainer/post_start.sh index 09ac0838..b8c1d689 100644 --- a/.devcontainer/post_start.sh +++ b/.devcontainer/post_start.sh @@ -23,19 +23,30 @@ function develop-mamba() ( echo "Miniconda not compatible with develop-mamba" exit 1 fi - if [ ! -f "$SRC_MAMBA/mamba/setup.py" ]; then - echo "Mamba 1.x not found at $SRC_MAMBA" + if [ ! -f "$SRC_MAMBA/libmamba/CMakeLists.txt" ]; then + echo "Could not find mamba-org/mamba at $SRC_MAMBA" exit 1 fi + if [ -f "$SRC_MAMBA/mamba/setup.py" ]; then + echo "Mamba 1.x found at $SRC_MAMBA" + mamba_version=1 + environment_yaml="$SRC_MAMBA/mamba/environment-dev.yml" + else + echo "Mamba 2.x found at $SRC_MAMBA" + environment_yaml="$SRC_MAMBA/dev/environment-dev.yml" + mamba_version=2 + fi # Install mamba dev dependencies only once: if [ ! -f ~/.mamba-develop-installed ]; then # remove "sel(win)" in environment yaml hack since conda does not understand # libmamba specific specs - echo "Installing mamba 1.x in dev mode..." - sed '/sel(.*)/d' "$SRC_MAMBA/mamba/environment-dev.yml" > /tmp/mamba-environment-dev.yml - CONDA_QUIET=1 "$BASE_CONDA/condabin/conda" env update -p "$BASE_CONDA" \ - --file /tmp/mamba-environment-dev.yml - "$BASE_CONDA/condabin/conda" install make -yq # missing in mamba's dev env + sed '/sel(.*)/d' "\$environment_yaml" > /tmp/mamba-environment-dev.yml + # Environment.yml is missing make + echo " - make" >> /tmp/mamba-environment-dev.yml + "$BASE_CONDA/condabin/conda" env update \ + --quiet \ + --prefix "$BASE_CONDA" \ + --file /tmp/mamba-environment-dev.yml # Clean build directory to avoid issues with stale build files test -f "$SRC_MAMBA/build/CMakeCache.txt" && rm -rf "$SRC_MAMBA/build" fi @@ -54,7 +65,11 @@ function develop-mamba() ( make install -C build/ cd - "$BASE_CONDA/bin/pip" install -e "$SRC_MAMBA/libmambapy/" --no-deps - test -f "$BASE_CONDA/conda-meta/mamba-"*".json" && "$BASE_CONDA/bin/pip" install -e "$SRC_MAMBA/mamba/" --no-deps + if [ "\$mamba_version" == "1" ]; then + test -f "$BASE_CONDA/conda-meta/mamba-"*".json" && "$BASE_CONDA/bin/pip" install -e "$SRC_MAMBA/mamba/" --no-deps + else + echo "Mamba binary installation not supported yet" + fi touch ~/.mamba-develop-installed || true ) EOF diff --git a/.github/workflows/builds-review.yaml b/.github/workflows/builds-review.yaml index a147dab5..5441e9c9 100644 --- a/.github/workflows/builds-review.yaml +++ b/.github/workflows/builds-review.yaml @@ -52,7 +52,7 @@ jobs: subdir: ${{ matrix.subdir }} anaconda-org-channel: conda-canary anaconda-org-label: '${{ github.event.repository.name }}-pr-${{ github.event.number }}' - anaconda-org-token: ${{ secrets.CONDA_CANARY_ANACONDA_ORG_TOKEN }} + anaconda-org-token: ${{ secrets.ANACONDA_ORG_CONDA_CANARY_TOKEN }} comment-headline: 'Review build status' comment-token: ${{ secrets.CANARY_ACTION_TOKEN }} - conda-build-arguments: '--override-channels -c conda-forge -c defaults' + conda-build-arguments: '--override-channels -c conda-forge/label/mamba_dev -c conda-forge' diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 04606276..40ed83d6 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -82,13 +82,13 @@ jobs: fail-fast: false matrix: # test lower version (w/ defaults) and upper version (w/ defaults and conda-forge) - python-version: ['3.8', '3.11', '3.12'] # CONDA-LIBMAMBA-SOLVER CHANGE + python-version: ['3.9', '3.11', '3.12'] # CONDA-LIBMAMBA-SOLVER CHANGE default-channel: [defaults, conda-forge] test-type: [conda-libmamba-solver, unit, integration] # CONDA-LIBMAMBA-SOLVER CHANGE test-group: [1, 2, 3] exclude: - default-channel: conda-forge - python-version: '3.8' + python-version: '3.9' - default-channel: defaults # CONDA-LIBMAMBA-SOLVER CHANGE python-version: '3.11' # CONDA-LIBMAMBA-SOLVER CHANGE - default-channel: conda-forge # CONDA-LIBMAMBA-SOLVER CHANGE @@ -223,13 +223,11 @@ jobs: fail-fast: false matrix: # test all lower versions (w/ defaults) and upper version (w/ defaults and conda-forge) - python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] + python-version: ['3.9', '3.10', '3.11', '3.12'] default-channel: [defaults, conda-forge] test-type: [conda-libmamba-solver, unit, integration] # CONDA-LIBMAMBA-SOLVER CHANGE test-group: [1, 2, 3] exclude: - - python-version: '3.8' - default-channel: conda-forge - python-version: '3.9' default-channel: conda-forge - python-version: '3.10' @@ -283,7 +281,8 @@ jobs: - name: Conda Install working-directory: conda - run: conda install + run: > + conda install --yes --file tests/requirements.txt --file tests/requirements-${{ runner.os }}.txt @@ -496,19 +495,19 @@ jobs: matrix: # test lower version (w/ osx-64 & defaults & unit tests) and upper version (w/ osx-arm64 & conda-forge & integration tests) arch: [osx-64, osx-arm64] - python-version: ['3.8', '3.11'] + python-version: ['3.9', '3.12'] default-channel: [defaults, conda-forge] test-type: [conda-libmamba-solver, unit, integration] # CONDA-LIBMAMBA-SOLVER CHANGE test-group: [1, 2, 3] exclude: - arch: osx-64 - python-version: '3.11' + python-version: '3.12' - arch: osx-64 default-channel: conda-forge - arch: osx-64 test-type: integration - arch: osx-arm64 - python-version: '3.8' + python-version: '3.9' - arch: osx-arm64 default-channel: defaults - arch: osx-arm64 @@ -565,7 +564,8 @@ jobs: - name: Conda Install working-directory: conda # CONDA-LIBMAMBA-SOLVER CHANGE # CONDA-LIBMAMBA-SOLVER CHANGE: add conda-libmamba-solver requirements.txt - run: conda install + run: > + conda install --yes --file tests/requirements.txt --file tests/requirements-ci.txt diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 35ee5fb1..9d273236 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,10 +4,11 @@ ci: # generally speaking we ignore all vendored code as well as tests data exclude: | (?x)^( - tests/data/ | + tests/data/.* | conda_libmamba_solver/mamba_utils\.py - )/ + )$ repos: + # generic verification and formatting - repo: https://github.com/pre-commit/pre-commit-hooks rev: v5.0.0 hooks: @@ -18,46 +19,45 @@ repos: # ensure syntaxes are valid - id: check-toml - id: check-yaml - exclude: ^(recipe/meta.yaml|tests/data/) + exclude: | + (?x)^( + (conda\.)?recipe/meta.yaml + ) # catch git merge/rebase problems - id: check-merge-conflict - - repo: https://github.com/asottile/pyupgrade - rev: v3.18.0 - hooks: - - id: pyupgrade - args: ["--py38-plus"] - exclude: ^conda/exports.py - - repo: https://github.com/psf/black - rev: 24.10.0 - hooks: - - id: black - exclude: tests/_reposerver\.py - - repo: https://github.com/pycqa/isort - rev: 5.13.2 - hooks: - - id: isort - exclude: tests/_reposerver\.py - repo: https://github.com/asottile/blacken-docs rev: 1.19.0 hooks: + # auto format Python codes within docstrings - id: blacken-docs additional_dependencies: [black] - - repo: https://github.com/PyCQA/flake8 - rev: 7.1.1 + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.6.9 hooks: - - id: flake8 - - repo: https://github.com/PyCQA/pylint - rev: v3.3.1 + # lint & attempt to correct failures (e.g. pyupgrade) + - id: ruff + args: [--fix] + # compatible replacement for black + - id: ruff-format + - repo: meta + # see https://pre-commit.com/#meta-hooks hooks: - - id: pylint - args: [--exit-zero] + - id: check-hooks-apply + - id: check-useless-excludes - repo: https://github.com/PyCQA/bandit rev: 1.7.10 hooks: - id: bandit args: [--exit-zero] - # ignore all tests, not just tests data - exclude: ^tests/ + exclude: ^(tests/) + - repo: local + hooks: + - id: git-diff + name: git diff + entry: git diff --exit-code + language: system + pass_filenames: false + always_run: true - repo: https://github.com/Lucas-C/pre-commit-hooks rev: v1.5.5 hooks: diff --git a/conda_libmamba_solver/__init__.py b/conda_libmamba_solver/__init__.py index 6f6a4944..d02862b9 100644 --- a/conda_libmamba_solver/__init__.py +++ b/conda_libmamba_solver/__init__.py @@ -12,20 +12,10 @@ except ImportError: __version__ = "0.0.0.unknown" -from warnings import warn as _warn -from .solver import LibMambaSolver - - -def get_solver_class(key="libmamba"): +def get_solver_class(key: str = "libmamba"): if key == "libmamba": - return LibMambaSolver - if key == "libmamba-draft": - _warn( - "The 'libmamba-draft' solver has been deprecated. " - "The 'libmamba' solver will be used instead. " - "Please consider updating your code to remove this warning. " - "Using 'libmamba-draft' will result in an error in a future release.", - ) + from .solver import LibMambaSolver + return LibMambaSolver raise ValueError("Key must be 'libmamba'") diff --git a/conda_libmamba_solver/conda_build_exceptions.py b/conda_libmamba_solver/conda_build_exceptions.py index 64088f72..ebfcf3d7 100644 --- a/conda_libmamba_solver/conda_build_exceptions.py +++ b/conda_libmamba_solver/conda_build_exceptions.py @@ -8,6 +8,7 @@ conda_build is not a dependency, but we only import this when conda-build is calling the solver, so it's fine to import it here. """ + from conda_build.exceptions import DependencyNeedsBuildingError diff --git a/conda_libmamba_solver/exceptions.py b/conda_libmamba_solver/exceptions.py index 9da0dc9c..71e81cd0 100644 --- a/conda_libmamba_solver/exceptions.py +++ b/conda_libmamba_solver/exceptions.py @@ -1,6 +1,10 @@ # Copyright (C) 2022 Anaconda, Inc # Copyright (C) 2023 conda # SPDX-License-Identifier: BSD-3-Clause +""" +Exceptions used in conda-libmamba-solver +""" + from conda.exceptions import UnsatisfiableError diff --git a/conda_libmamba_solver/index.py b/conda_libmamba_solver/index.py index ce00ef99..4961fd9f 100644 --- a/conda_libmamba_solver/index.py +++ b/conda_libmamba_solver/index.py @@ -70,169 +70,279 @@ We maintain a map of subdir-specific URLs to `conda.model.channel.Channel` and `libmamba.Repo` objects. """ + from __future__ import annotations import logging import os from dataclasses import dataclass -from functools import lru_cache, partial +from functools import partial from pathlib import Path -from tempfile import NamedTemporaryFile -from typing import Iterable - -import libmambapy as api -from conda import __version__ as conda_version -from conda.base.constants import REPODATA_FN -from conda.base.context import context, reset_context -from conda.common.io import DummyExecutor, ThreadLimitedThreadPoolExecutor, env_var -from conda.common.serialize import json_dump, json_load -from conda.common.url import percent_decode, remove_auth, split_anaconda_token +from typing import TYPE_CHECKING + +from conda.base.constants import KNOWN_SUBDIRS, REPODATA_FN, ChannelPriority +from conda.base.context import context +from conda.common.compat import on_win +from conda.common.io import DummyExecutor, ThreadLimitedThreadPoolExecutor +from conda.common.url import path_to_url, remove_auth, split_anaconda_token from conda.core.package_cache_data import PackageCacheData from conda.core.subdir_data import SubdirData from conda.models.channel import Channel from conda.models.match_spec import MatchSpec from conda.models.records import PackageRecord -from conda.models.version import VersionOrder +from libmambapy import MambaNativeException, Query +from libmambapy.solver.libsolv import ( + Database, + PackageTypes, + PipAsPythonDependency, + Priorities, + RepodataOrigin, +) +from libmambapy.specs import ( + Channel as LibmambaChannel, +) +from libmambapy.specs import ( + ChannelResolveParams, + CondaURL, + NoArchType, + PackageInfo, +) + +from .mamba_utils import logger_callback + +if TYPE_CHECKING: + from collections.abc import Iterable + from typing import Literal + + from conda.gateways.repodata import RepodataState + from libmambapy import QueryResult + from libmambapy.solver.libsolv import RepoInfo -from .mamba_utils import set_channel_priorities -from .state import IndexHelper -from .utils import escape_channel_url log = logging.getLogger(f"conda.{__name__}") -@dataclass(frozen=True) +@dataclass class _ChannelRepoInfo: "A dataclass mapping conda Channels, libmamba Repos and URLs" - channel: Channel - repo: api.Repo - full_url: str - noauth_url: str + channel: Channel | None + repo: RepoInfo + url_w_cred: str + url_no_cred: str + + @property + def canonical_name(self): + if self.channel: + return self.channel.canonical_name + url_parts = self.url_no_cred.split("/") + if url_parts[-1] in KNOWN_SUBDIRS: + return url_parts[-2] + return url_parts[-1] + + +class LibMambaIndexHelper: + """ + Interface between conda and libmamba for the purpose of building the "index". + The index is the collection of package records that can be part of a solution. + It is built by collecting all the repodata.json files from channels and their + subdirs. For existing environments, the installed packages are also added to + the index (this helps with simplifying solutions and outputs). The local cache + can also be added as a "channel", which is useful in offline mode or with no + channels configured. + """ -class LibMambaIndexHelper(IndexHelper): def __init__( self, - installed_records: Iterable[PackageRecord] = (), - channels: Iterable[Channel | str] | None = None, - subdirs: Iterable[str] | None = None, + channels: Iterable[Channel], + subdirs: Iterable[str] = (), repodata_fn: str = REPODATA_FN, - query_format=api.QueryFormat.JSON, - load_pkgs_cache: bool = False, + installed_records: Iterable[PackageRecord] = (), + pkgs_dirs: Iterable[os.PathLike] = (), ): - self._channels = context.channels if channels is None else channels - self._subdirs = context.subdirs if subdirs is None else subdirs - self._repodata_fn = repodata_fn - - self._repos = [] - self._pool = api.Pool() - - installed_repo = self._load_installed(installed_records) - self._repos.append(installed_repo) - - if load_pkgs_cache: - self._repos.extend(self._load_pkgs_cache()) - - self._index = self._load_channels() - self._repos += [info.repo for info in self._index.values()] - - self._query = api.Query(self._pool) - self._format = query_format - - def get_info(self, key: str) -> _ChannelRepoInfo: - orig_key = key - if not key.startswith("file://"): - # The conda functions (specifically remove_auth) assume the input - # is a url; a file uri on windows with a drive letter messes them up. - # For the rest, we remove all forms of authentication - key = split_anaconda_token(remove_auth(key))[0] - try: - return self._index[key] - except KeyError as exc: - # some libmamba versions return encoded URLs - try: - return self._index[percent_decode(key)] - except KeyError: - pass # raise original error below - raise KeyError( - f"Channel info for {orig_key} ({key}) not found. " - f"Available keys: {list(self._index)}" - ) from exc - - def reload_local_channels(self): - """ - Reload a channel that was previously loaded from a local directory. - """ - for noauth_url, info in self._index.items(): - if noauth_url.startswith("file://") or info.channel.scheme == "file": - url, json_path = self._fetch_channel(info.full_url) - repo_position = self._repos.index(info.repo) - info.repo.clear(True) - new = self._json_path_to_repo_info(url, json_path, try_solv=False) - self._repos[repo_position] = new.repo - self._index[noauth_url] = new - set_channel_priorities(self._index) - - def _repo_from_records( - self, pool: api.Pool, repo_name: str, records: Iterable[PackageRecord] = () - ) -> api.Repo: - """ - Build a libmamba 'Repo' object from conda 'PackageRecord' objects. - - This is done by rebuilding a repodata.json-like dictionary, which is - then exported to a temporary file that will be loaded with 'libmambapy.Repo'. - """ - exported = {"packages": {}, "packages.conda": {}} - additional_infos = {} - for record in records: - record_data = dict(record.dump()) - # These fields are expected by libmamba, but they don't always appear - # in the record.dump() dict (e.g. exporting from S3 channels) - # ref: https://github.com/mamba-org/mamba/blob/ad46f318b/libmamba/src/core/package_info.cpp#L276-L318 # noqa - for field in ( - "sha256", - "track_features", - "license", - "size", - "url", - "noarch", - "platform", - "timestamp", - ): - if field in record_data: - continue # do not overwrite - value = getattr(record, field, None) - if value is not None: - if field == "timestamp" and value: - value = int(value * 1000) # from s to ms - record_data[field] = value - if record.fn.endswith(".conda"): - exported["packages.conda"][record.fn] = record_data + platform_less_channels = [] + for channel in channels: + if channel.platform: + # When .platform is defined, .urls() will ignore subdirs kw. Remove! + log.info( + "Platform-aware channels are not supported. " + "Ignoring platform %s from channel %s. " + "Use subdirs keyword if necessary.", + channel.platform, + channel, + ) + channel = Channel(**{k: v for k, v in channel.dump().items() if k != "platform"}) + platform_less_channels.append(channel) + self.channels = platform_less_channels + self.subdirs = subdirs or context.subdirs + self.repodata_fn = repodata_fn + self.db = self._init_db() + self.repos: list[_ChannelRepoInfo] = self._load_channels() + if pkgs_dirs: + self.repos.extend(self._load_pkgs_cache(pkgs_dirs)) + if installed_records: + self.repos.append(self._load_installed(installed_records)) + self._set_repo_priorities() + + @classmethod + def from_platform_aware_channel(cls, channel: Channel): + if not channel.platform: + raise ValueError(f"Channel {channel} must define 'platform' attribute.") + subdir = channel.platform + channel = Channel(**{k: v for k, v in channel.dump().items() if k != "platform"}) + return cls(channels=(channel,), subdirs=(subdir,)) + + def n_packages( + self, + repos: Iterable[RepoInfo] | None = None, + filter_: callable | None = None, + ) -> int: + repos = repos or [repo_info.repo for repo_info in self.repos] + count = 0 + for repo in repos: + if filter_: + for pkg in self.db.packages_in_repo(repo): + if filter_(pkg): + count += 1 else: - exported["packages"][record.fn] = record_data + count += len(self.db.packages_in_repo(repo)) + return count + + def reload_channel(self, channel: Channel): + urls = {} + for url in channel.urls(with_credentials=False, subdirs=self.subdirs): + for repo_info in self.repos: + if repo_info.url_no_cred == url: + log.debug("Reloading repo %s", repo_info.url_no_cred) + urls[repo_info.url_w_cred] = channel + self.db.remove_repo(repo_info.repo) + for new_repo_info in self._load_channels(urls, try_solv=False): + for repo_info in self.repos: + if repo_info.url_no_cred == new_repo_info.url_no_cred: + repo_info.repo = new_repo_info.repo + self._set_repo_priorities() + + def _init_db(self) -> Database: + custom_channels = { + # Add custom channels as a workaround for this weird conda behavior + # See https://github.com/conda/conda/issues/13501 + **{c.name: c for c in self.channels if c.location != context.channel_alias.location}, + **context.custom_channels, + } + custom_channels = { + name: LibmambaChannel( + url=CondaURL.parse(channel.base_url), + display_name=name, + platforms=set(self.subdirs), + ) + for (name, channel) in custom_channels.items() + if channel.base_url + } + custom_multichannels = { + channel_name: [ + custom_channels.get( + channel.name, + LibmambaChannel( + url=CondaURL.parse(channel.base_url), + display_name=channel.name, + platforms=set(self.subdirs), + ), + ) + for channel in channels + if channel.base_url + ] + for channel_name, channels in context.custom_multichannels.items() + } + params = ChannelResolveParams( + platforms=set(self.subdirs), + channel_alias=CondaURL.parse(str(context.channel_alias)), + custom_channels=ChannelResolveParams.ChannelMap(custom_channels), + custom_multichannels=ChannelResolveParams.MultiChannelMap(custom_multichannels), + home_dir=str(Path.home()), + current_working_dir=os.getcwd(), + ) + db = Database(params) + db.set_logger(logger_callback) + return db - # extra info for libmamba - info = api.ExtraPkgInfo() - if record.noarch: - info.noarch = record.noarch.value - if record.channel and record.channel.subdir_url: - info.repo_url = record.channel.subdir_url - additional_infos[record.fn] = info + def _load_channels( + self, + urls_to_channel: dict[str, Channel] | None = None, + try_solv: bool = True, + ) -> list[_ChannelRepoInfo]: + if urls_to_channel is None: + urls_to_channel = self._channel_urls() + urls_to_json_path_and_state = self._fetch_repodata_jsons(tuple(urls_to_channel.keys())) + channel_repo_infos = [] + for url_w_cred, (json_path, state) in urls_to_json_path_and_state.items(): + url_no_token, _ = split_anaconda_token(url_w_cred) + url_no_cred = remove_auth(url_no_token) + repo = self._load_repo_info_from_json_path( + json_path, + url_no_cred, + state, + try_solv=try_solv, + ) + channel_repo_infos.append( + _ChannelRepoInfo( + channel=urls_to_channel[url_w_cred], + repo=repo, + url_w_cred=url_w_cred, + url_no_cred=url_no_cred, + ) + ) + return channel_repo_infos - with NamedTemporaryFile(suffix=".json", delete=False, mode="w") as f: - f.write(json_dump(exported)) + def _channel_urls(self) -> dict[str, Channel]: + "Maps authenticated URLs to channel objects" + urls = {} + seen_noauth = set() + channels_with_subdirs = [] + for channel in self.channels: + for url in channel.urls(with_credentials=True, subdirs=self.subdirs): + channels_with_subdirs.append(Channel(url)) + for channel in channels_with_subdirs: + noauth_urls = [ + url + for url in channel.urls(with_credentials=False) + if url.endswith(tuple(self.subdirs)) + ] + if seen_noauth.issuperset(noauth_urls): + continue + auth_urls = [ + url + for url in channel.urls(with_credentials=True) + if url.endswith(tuple(self.subdirs)) + ] + if noauth_urls != auth_urls: # authed channel always takes precedence + urls.update({url: channel for url in auth_urls}) + seen_noauth.update(noauth_urls) + continue + # at this point, we are handling an unauthed channel; in some edge cases, + # an auth'd variant of the same channel might already be present in `urls`. + # we only add them if we haven't seen them yet + for url in noauth_urls: + if url not in seen_noauth: + urls[url] = channel + seen_noauth.add(url) + return urls - try: - repo = api.Repo(pool, repo_name, f.name, "") - repo.add_extra_pkg_info(additional_infos) - return repo - finally: - os.unlink(f.name) + def _fetch_repodata_jsons(self, urls: dict[str, str]) -> dict[str, tuple[str, RepodataState]]: + Executor = ( + DummyExecutor + if context.debug or context.repodata_threads == 1 + else partial(ThreadLimitedThreadPoolExecutor, max_workers=context.repodata_threads) + ) + with Executor() as executor: + return { + url: (str(path), state) + for (url, path, state) in executor.map(self._fetch_one_repodata_json, urls) + } - def _fetch_channel(self, url: str) -> tuple[str, os.PathLike]: + def _fetch_one_repodata_json(self, url: str) -> tuple[str, os.PathLike, RepodataState]: channel = Channel.from_url(url) if not channel.subdir: - raise ValueError(f"Channel URLs must specify a subdir! Provided: {url}") + raise ValueError("Channel URLs must specify a subdir!") if "PYTEST_CURRENT_TEST" in os.environ: # Workaround some testing issues - TODO: REMOVE @@ -240,151 +350,222 @@ def _fetch_channel(self, url: str) -> tuple[str, os.PathLike]: for key, cached in list(SubdirData._cache_.items()): if not isinstance(key, tuple): continue # should not happen, but avoid IndexError just in case - if key[:2] == (url, self._repodata_fn) and cached._mtime == float("inf"): + if key[:2] == (url, self.repodata_fn) and cached._mtime == float("inf"): del SubdirData._cache_[key] # /Workaround - log.debug("Fetching %s with SubdirData.repo_fetch", channel) - subdir_data = SubdirData(channel, repodata_fn=self._repodata_fn) + subdir_data = SubdirData(channel, repodata_fn=self.repodata_fn) if context.offline or context.use_index_cache: # This might not exist (yet, anymore), but that's ok because we'll check # for existence later and safely ignore if needed json_path = subdir_data.cache_path_json + state = None else: - json_path, _ = subdir_data.repo_fetch.fetch_latest_path() - - return url, json_path - - def _json_path_to_repo_info( - self, url: str, json_path: str, try_solv: bool = True - ) -> _ChannelRepoInfo | None: - channel = Channel.from_url(url) - noauth_url = channel.urls(with_credentials=False, subdirs=(channel.subdir,))[0] + # TODO: This method loads reads the whole JSON file (does not parse) + json_path, state = subdir_data.repo_fetch.fetch_latest_path() + return url, json_path, state + + def _load_repo_info_from_json_path( + self, json_path: str, channel_url: str, state: RepodataState, try_solv: bool = True + ) -> RepoInfo | None: + if try_solv and on_win: + # .solv loading is so slow on Windows is not even worth it. Use JSON instead. + # https://github.com/mamba-org/mamba/pull/2753#issuecomment-1739122830 + log.debug("Overriding truthy 'try_solv' as False on Windows for performance reasons.") + try_solv = False json_path = Path(json_path) - try: - json_stat = json_path.stat() - except OSError as exc: - log.debug("Failed to stat %s", json_path, exc_info=exc) - json_stat = None - if try_solv: - try: - solv_path = json_path.parent / f"{json_path.stem}.solv" - solv_stat = solv_path.stat() - except OSError as exc: - log.debug("Failed to stat %s", solv_path, exc_info=exc) - solv_stat = None + solv_path = json_path.with_suffix(".solv") + if state: + repodata_origin = RepodataOrigin(url=channel_url, etag=state.etag, mod=state.mod) else: - solv_path = None - solv_stat = None - - if solv_stat is None and json_stat is None: - log.warning( - "No repodata found for channel %s. Solve will fail.", channel.canonical_name + repodata_origin = None + channel = Channel(channel_url) + channel_id = channel.canonical_name + if channel_id in context.custom_multichannels: + # In multichannels, the canonical name of a "subchannel" is the multichannel name + # which makes it ambiguous for `channel::specs`. In those cases, take the channel + # regular name; e.g. for repo.anaconda.com/pkgs/main, do not take defaults, but + # pkgs/main instead. + channel_id = channel.name + if try_solv and repodata_origin: + try: + log.debug( + "Loading %s (%s) from SOLV repodata at %s", channel_id, channel_url, solv_path + ) + return self.db.add_repo_from_native_serialization( + path=str(solv_path), + expected=repodata_origin, + channel_id=channel_id, + add_pip_as_python_dependency=context.add_pip_as_python_dependency, + ) + except Exception as exc: + log.debug("Failed to load from SOLV. Trying JSON at %s", json_path, exc_info=exc) + try: + repo = self.db.add_repo_from_repodata_json( + path=str(json_path), + url=channel_url, + channel_id=channel_id, + add_pip_as_python_dependency=PipAsPythonDependency( + context.add_pip_as_python_dependency + ), + package_types=( + PackageTypes.TarBz2Only + if context.use_only_tar_bz2 + else PackageTypes.CondaOrElseTarBz2 + ), ) - return - if solv_stat is None: - path_to_use = json_path - elif json_stat is None: - path_to_use = solv_path # better than nothing - elif json_stat.st_mtime <= solv_stat.st_mtime: - # use solv file if it's newer than the json file - path_to_use = solv_path - else: - path_to_use = json_path + except MambaNativeException as exc: + if "does not exist" in str(exc) and context.offline: + # Ignore errors in offline mode. This is needed to pass + # tests/test_create.py::test_offline_with_empty_index_cache. + # In offline mode, with no repodata cache available, conda can still + # create a channel from the pkgs/ content. For that to work, we must + # not error out this early. If the package is still not found, the solver + # will complain that the package cannot be found. + log.warning("Could not load repodata for %s.", channel_id) + log.debug("Ignored MambaNativeException in offline mode: %s", exc, exc_info=exc) + return None + raise exc + if try_solv and repodata_origin: + try: + self.db.native_serialize_repo( + repo=repo, path=str(solv_path), metadata=repodata_origin + ) + except MambaNativeException as exc: + log.debug("Ignored SOLV writing error for %s", channel_id, exc_info=exc) + return repo - repo = api.Repo(self._pool, noauth_url, str(path_to_use), escape_channel_url(noauth_url)) + def _load_installed(self, records: Iterable[PackageRecord]) -> RepoInfo: + packages = [self._package_info_from_package_record(record) for record in records] + repo = self.db.add_repo_from_packages( + packages=packages, + name="installed", + add_pip_as_python_dependency=PipAsPythonDependency.No, + ) + self.db.set_installed_repo(repo) return _ChannelRepoInfo( - repo=repo, - channel=channel, - full_url=url, - noauth_url=noauth_url, + channel=None, repo=repo, url_w_cred="installed", url_no_cred="installed" ) - def _load_channels(self) -> dict[str, _ChannelRepoInfo]: - # 1. Obtain and deduplicate URLs from channels - urls = [] - seen_noauth = set() - for _c in self._channels: - # When .platform is defined, .urls() will ignore subdirs kw. Remove! - c = Channel(**{k: v for k, v in Channel(_c).dump().items() if k != "platform"}) - noauth_urls = c.urls(with_credentials=False, subdirs=self._subdirs) - if seen_noauth.issuperset(noauth_urls): - continue - auth_urls = c.urls(with_credentials=True, subdirs=self._subdirs) - if noauth_urls != auth_urls: # authed channel always takes precedence - urls += auth_urls - seen_noauth.update(noauth_urls) - continue - # at this point, we are handling an unauthed channel; in some edge cases, - # an auth'd variant of the same channel might already be present in `urls`. - # we only add them if we haven't seen them yet - for url in noauth_urls: - if url not in seen_noauth: - urls.append(url) - seen_noauth.add(url) + def _load_pkgs_cache(self, pkgs_dirs: Iterable[os.PathLike]) -> list[RepoInfo]: + repos = [] + for path in pkgs_dirs: + package_cache_data = PackageCacheData(path) + package_cache_data.load() + packages = [ + self._package_info_from_package_record(record) + for record in package_cache_data.values() + ] + repo = self.db.add_repo_from_packages(packages=packages, name=path) + path_as_url = path_to_url(path) + repos.append( + _ChannelRepoInfo( + channel=None, repo=repo, url_w_cred=path_as_url, url_no_cred=path_as_url + ) + ) + return repos - urls = tuple(dict.fromkeys(urls)) # de-duplicate + def _package_info_from_package_record(self, record: PackageRecord) -> PackageInfo: + if record.get("noarch", None) and record.noarch.value in ("python", "generic"): + noarch = NoArchType(record.noarch.value.title()) + else: + noarch = NoArchType("No") + return PackageInfo( + name=record.name, + version=record.version, + build_string=record.build or "", + build_number=record.build_number or 0, + channel=str(record.channel), + package_url=record.get("url") or "", + platform=record.subdir, + filename=record.fn or f"{record.name}-{record.version}-{record.build or ''}", + license=record.get("license") or "", + md5=record.get("md5") or "", + sha256=record.get("sha256") or "", + signatures=record.get("signatures") or "", + # conda can have list or tuple, but libmamba only accepts lists + track_features=list(record.get("track_features") or []), + depends=list(record.get("depends") or []), + constrains=list(record.get("constrains") or []), + defaulted_keys=list(record.get("defaulted_keys") or []), + noarch=noarch, + size=record.get("size") or 0, + timestamp=int((record.get("timestamp") or 0) * 1000), + ) - # 2. Fetch URLs (if needed) - Executor = ( - DummyExecutor - if context.debug or context.repodata_threads == 1 - else partial(ThreadLimitedThreadPoolExecutor, max_workers=context.repodata_threads) + def _set_repo_priorities(self): + has_priority = context.channel_priority in ( + ChannelPriority.STRICT, + ChannelPriority.FLEXIBLE, ) - with Executor() as executor: - jsons = {url: str(path) for (url, path) in executor.map(self._fetch_channel, urls)} - # 3. Create repos in same order as `urls` - index = {} - for url in urls: - info = self._json_path_to_repo_info(url, jsons[url]) - if info is not None: - index[info.noauth_url] = info + subprio_index = len(self.repos) + if has_priority: + # max channel priority value is the number of unique channels + channel_prio = len({repo.canonical_name for repo in self.repos}) + current_channel_name = self.repos[0].canonical_name - # 4. Configure priorities - set_channel_priorities(index) + for repo_info in self.repos: + if repo_info.repo is None: + continue + if has_priority: + if repo_info.canonical_name != current_channel_name: + channel_prio -= 1 + current_channel_name = repo_info.canonical_name + priority = channel_prio + else: + priority = 0 + if has_priority: + # NOTE: -- This was originally 0, but we need 1. + # Otherwise, conda/conda @ test_create::test_force_remove fails :shrug: + subpriority = 1 + else: + subpriority = subprio_index + subprio_index -= 1 + + log.debug( + "Channel: %s, prio: %s : %s", + repo_info.url_no_cred, + priority, + subpriority, + ) + self.db.set_repo_priority(repo_info.repo, Priorities(priority, subpriority)) - return index + # region Repoquery + ################# - def _load_pkgs_cache(self, pkgs_dirs=None) -> Iterable[api.Repo]: - if pkgs_dirs is None: - pkgs_dirs = context.pkgs_dirs - repos = [] - for path in pkgs_dirs: - package_cache_data = PackageCacheData(path) - package_cache_data.load() - repo = self._repo_from_records(self._pool, path, package_cache_data.values()) - repos.append(repo) - return repos + def search( + self, + queries: Iterable[str | MatchSpec] | str | MatchSpec, + return_type: Literal["records", "dict", "raw"] = "records", + ) -> list[PackageRecord] | dict | QueryResult: + if isinstance(queries, (str, MatchSpec)): + queries = [queries] + queries = list(map(str, queries)) + result = Query.find(self.db, queries) + return self._process_query_result(result, return_type) - def _load_installed(self, records: Iterable[PackageRecord]) -> api.Repo: - repo = self._repo_from_records(self._pool, "installed", records) - repo.set_installed() - return repo + def depends( + self, + query: str | MatchSpec, + tree: bool = False, + return_type: Literal["records", "dict", "raw"] = "records", + ) -> list[PackageRecord] | dict | QueryResult: + query = str(query) + result = Query.depends(self.db, query, tree) + return self._process_query_result(result, return_type) def whoneeds( - self, query: str | MatchSpec, records=True - ) -> Iterable[PackageRecord] | dict | str: - result_str = self._query.whoneeds(self._prepare_query(query), self._format) - if self._format == api.QueryFormat.JSON: - return self._process_query_result(result_str, records=records) - return result_str - - def depends( - self, query: str | MatchSpec, records=True - ) -> Iterable[PackageRecord] | dict | str: - result_str = self._query.depends(self._prepare_query(query), self._format) - if self._format == api.QueryFormat.JSON: - return self._process_query_result(result_str, records=records) - return result_str - - def search(self, query: str | MatchSpec, records=True) -> Iterable[PackageRecord] | dict | str: - result_str = self._query.find(self._prepare_query(query), self._format) - if self._format == api.QueryFormat.JSON: - return self._process_query_result(result_str, records=records) - return result_str - - def explicit_pool(self, specs: Iterable[MatchSpec]) -> Iterable[str]: + self, + query: str | MatchSpec, + tree: bool = False, + return_type: Literal["records", "dict", "raw"] = "records", + ) -> list[PackageRecord] | dict | QueryResult: + query = str(query) + result = Query.whoneeds(self.db, query, tree) + return self._process_query_result(result, return_type) + + def explicit_pool(self, specs: Iterable[MatchSpec]) -> tuple[str]: """ Returns all the package names that (might) depend on the passed specs """ @@ -395,70 +576,26 @@ def explicit_pool(self, specs: Iterable[MatchSpec]) -> Iterable[str]: explicit_pool.add(record.name) return tuple(explicit_pool) - def _prepare_query(self, query: str | MatchSpec) -> str: - if isinstance(query, str): - if "[" not in query: - return query - query = MatchSpec(query) - # libmambapy.Query only supports some matchspec syntax - # https://github.com/conda/conda-libmamba-solver/issues/327 - # NOTE: Channel specs are currently ignored by libmambapy.Query searches - # if query.get_raw_value("channel"): - # result = f"{query.get_raw_value('channel')}::{query.name}" - # if query.version and query.get_raw_value("version").startswith((">", "<", "!", "=")): - # result += query.get_raw_value("version") - # elif query.version: - # result += f"={query.get_raw_value('version')}" - # else: - # result += "=*" - # if query.get_raw_value("build"): - # result += f"={query.get_raw_value('build')}" - # return result - if not query.get_raw_value("version"): - query = MatchSpec(query, version="*") - return query.conda_build_form() - def _process_query_result( self, - result_str, - records=True, - ) -> Iterable[PackageRecord] | dict: - result = json_load(result_str) + result: QueryResult, + return_type: Literal["records", "dict", "raw"] = "records", + ) -> list[PackageRecord] | dict | QueryResult: + if return_type == "raw": + return result + result = result.to_dict() if result.get("result", {}).get("status") != "OK": query_type = result.get("query", {}).get("type", "") query = result.get("query", {}).get("query", "") - error_msg = result.get("result", {}).get("msg", f"Faulty response: {result_str}") + error_msg = result.get("result", {}).get("msg", f"Faulty response: {result.json()}") raise ValueError(f"{query_type} query '{query}' failed: {error_msg}") - if records: + if return_type == "records": pkg_records = [] for pkg in result["result"]["pkgs"]: record = PackageRecord(**pkg) pkg_records.append(record) return pkg_records + # return_type == "dict" return result - -@lru_cache(maxsize=None) -class _LibMambaIndexForCondaBuild(LibMambaIndexHelper): - """ - See https://github.com/conda/conda-libmamba-solver/issues/386 - - conda-build needs to operate offline so the index doesn't get updated - accidentally during long build phases. However, this is only guaranteed - to work if https://github.com/conda/conda/pull/13357 is applied. Otherwise - the condarc configuration might be ignored, resulting in bad index configuration - and missing packages anyway. - """ - - def __init__(self, *args, **kwargs): - if VersionOrder(conda_version) <= VersionOrder("23.10.0"): - log.warning( - "conda-build requires conda >=23.11.0 for offline index support. " - "Falling back to online index. This might result in KeyError messages, " - "specially if the remote repodata is updated during the build phase. " - "See https://github.com/conda/conda-libmamba-solver/issues/386." - ) - super().__init__(*args, **kwargs) - else: - with env_var("CONDA_OFFLINE", "true", callback=reset_context): - super().__init__(*args, **kwargs) + # endregion diff --git a/conda_libmamba_solver/mamba_utils.py b/conda_libmamba_solver/mamba_utils.py index 190215c2..97a8ea31 100644 --- a/conda_libmamba_solver/mamba_utils.py +++ b/conda_libmamba_solver/mamba_utils.py @@ -11,13 +11,17 @@ from __future__ import annotations +from __future__ import annotations +import os import logging +import sys +from collections.abc import Iterable from functools import lru_cache from importlib.metadata import version from pathlib import Path from typing import TYPE_CHECKING, Iterable -import libmambapy as api +import libmambapy from conda.base.constants import ChannelPriority from conda.base.context import context from conda.common.compat import on_win @@ -26,15 +30,17 @@ from .index import _ChannelRepoInfo -log = logging.getLogger(f"conda.{__name__}") +log = logging.getLogger(f"conda.{__name__}") +_db_log = logging.getLogger("conda.libmamba.db") +_libmamba_context = None @lru_cache(maxsize=1) -def mamba_version(): +def mamba_version() -> str: return version("libmambapy") -def _get_base_url(url, name=None): +def _get_base_url(url: str, name: str | None = None): tmp = url.rsplit("/", 1)[0] if name: if tmp.endswith(name): @@ -42,102 +48,66 @@ def _get_base_url(url, name=None): return tmp -def set_channel_priorities(index: dict[str, _ChannelRepoInfo], has_priority: bool = None): - """ - This function was part of mamba.utils.load_channels originally. - We just split it to reuse it a bit better. - """ - if not index: - return index - - if has_priority is None: - has_priority = context.channel_priority in [ - ChannelPriority.STRICT, - ChannelPriority.FLEXIBLE, - ] - - subprio_index = len(index) - if has_priority: - # max channel priority value is the number of unique channels - channel_prio = len({info.channel.canonical_name for info in index.values()}) - current_channel = next(iter(index.values())).channel.canonical_name - - for info in index.values(): - # add priority here - if has_priority: - if info.channel.canonical_name != current_channel: - channel_prio -= 1 - current_channel = info.channel.canonical_name - priority = channel_prio - else: - priority = 0 - if has_priority: - # NOTE: -- this is the whole reason we are vendoring this file -- - # We are patching this from 0 to 1, starting with mamba 0.19 - # Otherwise, test_create::test_force_remove fails :shrug: - subpriority = 1 - else: - subpriority = subprio_index - subprio_index -= 1 - - if not context.json: - log.debug( - "Channel: %s, platform: %s, prio: %s : %s", - info.channel, - info.channel.subdir, - priority, - subpriority, - ) - info.repo.set_priority(priority, subpriority) - - return index - - -def init_api_context( +def init_libmamba_context( channels: Iterable[str] | None = None, platform: str = None, target_prefix: str = None, -) -> api.Context: - # This function has to be called BEFORE 1st initialization of the context - api.Context.use_default_signal_handler(False) - api_ctx = api.Context() +) -> libmambapy.Context: + global _libmamba_context + if _libmamba_context is None: + # This function has to be called BEFORE 1st initialization of the context + _libmamba_context = libmambapy.Context( + libmambapy.ContextOptions( + enable_signal_handling=False, + enable_logging=True, + ) + ) + libmamba_context = _libmamba_context # Output params - api_ctx.output_params.json = context.json - api_ctx.output_params.quiet = context.quiet - api_ctx.output_params.verbosity = context.verbosity - api_ctx.set_verbosity(context.verbosity) - if api_ctx.output_params.json: - api.cancel_json_output() + libmamba_context.output_params.json = context.json + if libmamba_context.output_params.json: + libmambapy.cancel_json_output(libmamba_context) + libmamba_context.output_params.quiet = context.quiet + libmamba_context.output_params.verbosity = context.verbosity + libmamba_context.set_log_level( + { + 4: libmambapy.LogLevel.TRACE, + 3: libmambapy.LogLevel.DEBUG, + 2: libmambapy.LogLevel.INFO, + 1: libmambapy.LogLevel.WARNING, + 0: libmambapy.LogLevel.ERROR, + }[context.verbosity] + ) # Prefix params - api_ctx.prefix_params.conda_prefix = context.conda_prefix - api_ctx.prefix_params.root_prefix = context.root_prefix - if on_win and target_prefix == "/": - # workaround for strange bug in libmamba transforming "/"" into "\\conda-bld" :shrug: - target_prefix = Path.cwd().parts[0] - target_prefix = target_prefix if target_prefix is not None else context.target_prefix - api_ctx.prefix_params.target_prefix = target_prefix + libmamba_context.prefix_params.conda_prefix = context.conda_prefix + libmamba_context.prefix_params.root_prefix = context.root_prefix + libmamba_context.prefix_params.target_prefix = str( + target_prefix if target_prefix is not None else context.target_prefix + ) # Networking params -- we always operate offline from libmamba's perspective - api_ctx.remote_fetch_params.user_agent = context.user_agent - api_ctx.local_repodata_ttl = context.local_repodata_ttl - api_ctx.offline = True - api_ctx.use_index_cache = True + libmamba_context.remote_fetch_params.user_agent = context.user_agent + libmamba_context.local_repodata_ttl = context.local_repodata_ttl + libmamba_context.offline = True + libmamba_context.use_index_cache = True # General params - api_ctx.add_pip_as_python_dependency = context.add_pip_as_python_dependency - api_ctx.always_yes = context.always_yes - api_ctx.dry_run = context.dry_run - api_ctx.envs_dirs = context.envs_dirs - api_ctx.pkgs_dirs = context.pkgs_dirs - api_ctx.use_lockfiles = False - api_ctx.use_only_tar_bz2 = context.use_only_tar_bz2 + libmamba_context.add_pip_as_python_dependency = context.add_pip_as_python_dependency + libmamba_context.always_yes = context.always_yes + libmamba_context.dry_run = context.dry_run + libmamba_context.envs_dirs = context.envs_dirs + libmamba_context.pkgs_dirs = context.pkgs_dirs + libmamba_context.use_lockfiles = False + libmamba_context.use_only_tar_bz2 = context.use_only_tar_bz2 # Channels and platforms - api_ctx.platform = platform if platform is not None else context.subdir - api_ctx.channels = list(channels) if channels is not None else context.channels - api_ctx.channel_alias = str(_get_base_url(context.channel_alias.url(with_credentials=True))) + libmamba_context.platform = platform if platform is not None else context.subdir + libmamba_context.channels = list(channels) if channels is not None else context.channels + libmamba_context.channel_alias = str( + _get_base_url(context.channel_alias.url(with_credentials=True)) + ) RESERVED_NAMES = {"local", "defaults"} additional_custom_channels = {} @@ -146,9 +116,12 @@ def init_api_context( additional_custom_channels[el] = _get_base_url( context.custom_channels[el].url(with_credentials=True), el ) - api_ctx.custom_channels = additional_custom_channels + libmamba_context.custom_channels = additional_custom_channels - additional_custom_multichannels = {} + additional_custom_multichannels = { + "local": list(context.conda_build_local_paths), + "defaults": [channel.url(with_credentials=True) for channel in context.default_channels], + } for el in context.custom_multichannels: if el not in RESERVED_NAMES: additional_custom_multichannels[el] = [] @@ -156,19 +129,56 @@ def init_api_context( additional_custom_multichannels[el].append( _get_base_url(c.url(with_credentials=True)) ) - api_ctx.custom_multichannels = additional_custom_multichannels + libmamba_context.custom_multichannels = additional_custom_multichannels - api_ctx.default_channels = [ + libmamba_context.default_channels = [ _get_base_url(x.url(with_credentials=True)) for x in context.default_channels ] - api_ctx.conda_build_local_paths = list(context.conda_build_local_paths) - if context.channel_priority is ChannelPriority.STRICT: - api_ctx.channel_priority = api.ChannelPriority.kStrict + libmamba_context.channel_priority = libmambapy.ChannelPriority.Strict elif context.channel_priority is ChannelPriority.FLEXIBLE: - api_ctx.channel_priority = api.ChannelPriority.kFlexible + libmamba_context.channel_priority = libmambapy.ChannelPriority.Flexible elif context.channel_priority is ChannelPriority.DISABLED: - api_ctx.channel_priority = api.ChannelPriority.kDisabled - - return api_ctx + libmamba_context.channel_priority = libmambapy.ChannelPriority.Disabled + + return libmamba_context + + +def logger_callback(level: libmambapy.solver.libsolv.LogLevel, msg: str, logger=_db_log): + # from libmambapy.solver.libsolv import LogLevel + # levels = { + # LogLevel.Debug: logging.DEBUG, # 0 -> 10 + # LogLevel.Warning: logging.WARNING, # 1 -> 30 + # LogLevel.Error: logging.ERROR, # 2 -> 40 + # LogLevel.Fatal: logging.FATAL, # 3 -> 50 + # } + if level.value == 0: + # This incurs a large performance hit! + logger.debug(msg) + else: + logger.log((level.value + 2) * 10, msg) + + +def palettes_and_formats() -> tuple[libmambapy.solver.ProblemsMessageFormat, libmambapy.solver.ProblemsMessageFormat]: + # _indents = ["│ ", " ", "├─ ", "└─ "] + if os.getenv("NO_COLOR"): + use_color = False + elif os.getenv("FORCE_COLOR"): + use_color = True + else: + use_color = all([sys.stdout.isatty(), sys.stdin.isatty()]) + palette_no_color = libmambapy.Palette.no_color() + problems_format_nocolor = libmambapy.solver.ProblemsMessageFormat() + problems_format_nocolor.unavailable = palette_no_color.failure + problems_format_nocolor.available = palette_no_color.success + problems_format_auto = ( + libmambapy.solver.ProblemsMessageFormat() + if use_color + else problems_format_nocolor + ) + + return problems_format_auto, problems_format_nocolor + + +problems_format_auto, problems_format_nocolor = palettes_and_formats() diff --git a/conda_libmamba_solver/plugin.py b/conda_libmamba_solver/plugin.py index f4499e4d..85cc6033 100644 --- a/conda_libmamba_solver/plugin.py +++ b/conda_libmamba_solver/plugin.py @@ -1,6 +1,10 @@ # Copyright (C) 2022 Anaconda, Inc # Copyright (C) 2023 conda # SPDX-License-Identifier: BSD-3-Clause +""" +Entry points for the conda plugin system +""" + from conda import plugins from .repoquery import configure_parser, repoquery diff --git a/conda_libmamba_solver/repoquery.py b/conda_libmamba_solver/repoquery.py index 5a2178a8..60383b50 100644 --- a/conda_libmamba_solver/repoquery.py +++ b/conda_libmamba_solver/repoquery.py @@ -2,22 +2,32 @@ # Copyright (C) 2022 Anaconda, Inc # Copyright (C) 2023 conda # SPDX-License-Identifier: BSD-3-Clause +""" +Implementation of the 'conda repoquery' subcommand. + +Based on 'mamba repoquery' for v1.x. +""" + import argparse import json import sys +from itertools import chain from conda.base.context import context from conda.cli import conda_argparse from conda.common.io import Spinner from conda.core.prefix_data import PrefixData -from libmambapy import QueryFormat +from conda.exceptions import CondaError +from conda.models.channel import Channel +from conda.models.match_spec import MatchSpec +from libmambapy import Context as LibmambaContext from .index import LibMambaIndexHelper def configure_parser(parser: argparse.ArgumentParser): package_cmds = argparse.ArgumentParser(add_help=False) - package_cmds.add_argument("package_query", help="The target package.") + package_cmds.add_argument("specs", help="The target package(s).", nargs="+") package_grp = package_cmds.add_argument_group("Subcommand options") package_grp.add_argument( "-i", @@ -26,7 +36,6 @@ def configure_parser(parser: argparse.ArgumentParser): default=True, help=argparse.SUPPRESS, ) - package_grp.add_argument( "-p", "--platform", @@ -46,6 +55,11 @@ def configure_parser(parser: argparse.ArgumentParser): action="store_true", help="Look at all channels (for depends / whoneeds).", ) + package_grp.add_argument( + "--use-cache-only", + action="store_true", + help="Search in pkgs_dirs too", + ) view_cmds = argparse.ArgumentParser(add_help=False) view_grp = view_cmds.add_argument_group("Dependency options") @@ -83,7 +97,7 @@ def configure_parser(parser: argparse.ArgumentParser): conda_argparse.add_parser_json(cmd) -def repoquery(args): +def repoquery(args: argparse.Namespace): if not args.subcmd: print("repoquery needs a subcommand (search, depends or whoneeds), e.g.:", file=sys.stderr) print(" conda repoquery search python\n", file=sys.stderr) @@ -98,7 +112,7 @@ def repoquery(args): channels = args.channel else: channels = None - if args.all_channels or (channels is None and args.subcmd == "search"): + if args.all_channels or channels is None: if channels: print("WARNING: Using all channels instead of configured channels\n", file=sys.stderr) channels = context.channels @@ -130,32 +144,79 @@ def repoquery(args): else: installed_records = () - if context.json: - query_format = QueryFormat.JSON - elif getattr(args, "tree", None): - query_format = QueryFormat.TREE - elif getattr(args, "recursive", None): - query_format = QueryFormat.RECURSIVETABLE - elif getattr(args, "pretty", None): - query_format = QueryFormat.PRETTY + if args.use_cache_only: + with Spinner( + "Collecting package metadata from pkgs_dirs", + enabled=not context.verbosity and not context.quiet, + json=context.json, + ): + index = LibMambaIndexHelper( + installed_records=(), + channels=(), + subdirs=(args.platform, "noarch"), + repodata_fn=context.repodata_fns[-1], + pkgs_dirs=context.pkgs_dirs, + ) else: - query_format = QueryFormat.TABLE - - with Spinner( - "Collecting package metadata", - enabled=not context.verbosity and not context.quiet, - json=context.json, - ): - index = LibMambaIndexHelper( - installed_records=installed_records, - channels=channels, - subdirs=(args.platform, "noarch"), - repodata_fn=context.repodata_fns[-1], - query_format=query_format, + channels_from_specs = [] + for spec in args.specs: + ms = MatchSpec(spec) + channel = ms.get_exact_value("channel") + if channel: + channels_from_specs.append(channel) + with Spinner( + "Collecting package metadata", + enabled=not context.verbosity and not context.quiet, + json=context.json, + ): + index = LibMambaIndexHelper( + installed_records=installed_records, + channels=[ + Channel(c) for c in chain(channels or (), dict.fromkeys(channels_from_specs)) + ], + subdirs=(args.platform, "noarch"), + repodata_fn=context.repodata_fns[-1], + ) + + if args.subcmd == "search": + result = index.search(args.specs, return_type="raw") + if context.json: + print(json.dumps(result.groupby("name").to_dict(), indent=2)) + elif getattr(args, "pretty", None): + print(result.pretty(show_all_builds=True)) + else: + print(result.groupby("name").table()) + elif args.subcmd == "depends": + if len(args.specs) > 1: + raise CondaError("Only one query supported for 'depends'.") + result = index.depends( + args.specs[0], + tree=getattr(args, "tree", False) or getattr(args, "recursive", False), + return_type="raw", ) - - result = getattr(index, args.subcmd)(args.package_query, records=False) - if context.json: - print(json.dumps(result, indent=2)) + if context.json: + print(json.dumps(result.to_dict(), indent=2)) + elif getattr(args, "tree", None) or getattr(args, "pretty", None): + # TODO: Report upstream + raise CondaError("--tree currently not available for this subcommand.") + print(result.tree(LibmambaContext.instance().graphics_params)) + else: + print(result.sort("name").table()) + elif args.subcmd == "whoneeds": + if len(args.specs) > 1: + raise CondaError("Only one query supported for 'whoneeds'.") + result = index.whoneeds( + args.specs[0], + tree=getattr(args, "tree", False) or getattr(args, "recursive", False), + return_type="raw", + ) + if context.json: + print(json.dumps(result.to_dict(), indent=2)) + elif getattr(args, "tree", None) or getattr(args, "pretty", None): + # TODO: Report upstream + raise CondaError("--tree currently not available for this subcommand.") + print(result.tree(LibmambaContext.instance().graphics_params)) + else: + print(result.sort("name").table()) else: - print(result) + raise CondaError(f"Unrecognized subcommand: {args.subcmd}") diff --git a/conda_libmamba_solver/solver.py b/conda_libmamba_solver/solver.py index 1fdd491b..48ee4ae7 100644 --- a/conda_libmamba_solver/solver.py +++ b/conda_libmamba_solver/solver.py @@ -1,12 +1,14 @@ # Copyright (C) 2022 Anaconda, Inc # Copyright (C) 2023 conda # SPDX-License-Identifier: BSD-3-Clause +# Copyright (C) 2024 conda +# SPDX-License-Identifier: BSD-3-Clause """ This module defines the conda.core.solve.Solver interface and its immediate helpers - -We can import from conda and libmambapy. `mamba` itself should NOT be imported here. """ +from __future__ import annotations + import json import logging import os @@ -14,69 +16,84 @@ import sys from collections import defaultdict from contextlib import suppress -from functools import lru_cache +from functools import cache from inspect import stack +from itertools import chain from textwrap import dedent -from typing import Iterable, Mapping, Optional, Sequence, Union +from typing import TYPE_CHECKING -import libmambapy as api -from boltons.setutils import IndexedSet from conda import __version__ as _conda_version from conda.base.constants import ( - DEFAULT_CHANNELS, REPODATA_FN, UNKNOWN_CHANNEL, ChannelPriority, ) from conda.base.context import context -from conda.common.compat import on_win from conda.common.constants import NULL -from conda.common.io import Spinner, timeout +from conda.common.io import Spinner, time_recorder from conda.common.path import paths_equal -from conda.common.url import join_url, percent_decode -from conda.core.package_cache_data import PackageCacheData -from conda.core.prefix_data import PrefixData from conda.core.solve import Solver from conda.exceptions import ( CondaValueError, - InvalidMatchSpec, - InvalidSpec, PackagesNotFoundError, - ParseError, UnsatisfiableError, ) from conda.models.channel import Channel from conda.models.match_spec import MatchSpec from conda.models.records import PackageRecord, PrefixRecord from conda.models.version import VersionOrder +from libmambapy.solver import Request, Solution +from libmambapy.solver.libsolv import Solver as LibsolvSolver +from libmambapy.specs import MatchSpec as LibmambaMatchSpec +from libmambapy.specs import NoArchType from . import __version__ from .exceptions import LibMambaUnsatisfiableError -from .index import LibMambaIndexHelper, _LibMambaIndexForCondaBuild -from .mamba_utils import init_api_context, mamba_version +from .index import LibMambaIndexHelper +from .mamba_utils import ( + init_libmamba_context, + mamba_version, + problems_format_auto, + problems_format_nocolor, +) from .state import SolverInputState, SolverOutputState -from .utils import is_channel_available + +if TYPE_CHECKING: + from collections.abc import Iterable, Mapping, Sequence + + from boltons.setutils import IndexedSet + from conda.auxlib import _Null + from conda.base.constants import ( + DepsModifier, + UpdateModifier, + ) + from libmambapy.solver.libsolv import Database, UnSolvable + from libmambapy.specs import PackageInfo log = logging.getLogger(f"conda.{__name__}") class LibMambaSolver(Solver): - """ - Cleaner implementation using the ``state`` module helpers. - """ - MAX_SOLVER_ATTEMPTS_CAP = 10 _uses_ssc = False + @staticmethod + @cache + def user_agent() -> str: + """ + Expose this identifier to allow conda to extend its user agent if required + """ + return f"conda-libmamba-solver/{__version__} libmambapy/{mamba_version()}" + def __init__( self, - prefix, - channels, - subdirs=(), - specs_to_add=(), - specs_to_remove=(), - repodata_fn=REPODATA_FN, - command=NULL, + prefix: os.PathLike, + channels: Iterable[Channel | str], + subdirs: Iterable[str] = (), + specs_to_add: Iterable[MatchSpec | str] = (), + specs_to_remove: Iterable[MatchSpec | str] = (), + repodata_fn: str = REPODATA_FN, + command: str | _Null = NULL, ): if specs_to_add and specs_to_remove: raise ValueError( @@ -94,53 +111,32 @@ def __init__( repodata_fn=repodata_fn, command=command, ) - if self.subdirs is NULL or not self.subdirs: self.subdirs = context.subdirs + if "noarch" not in self.subdirs: + # Problem: Conda build generates a custom index which happens to "forget" about + # noarch on purpose when creating the build/host environments, since it merges + # both as if they were all in the native subdir. This causes package-not-found + # errors because we are not using the patched index. + # Fix: just add noarch to subdirs because it should always be there anyway. + self.subdirs = (*self.subdirs, "noarch") - # These three attributes are set during ._setup_solver() - self.solver = None - self._solver_options = None - - # we want to support arbitrary repodata fns, but we ignore current_repodata - if self._repodata_fn == "current_repodata.json": - log.debug(f"Ignoring repodata_fn='current_repodata.json', defaulting to {REPODATA_FN}") - self._repodata_fn = REPODATA_FN - - # Fix bug in conda.common.arg2spec and MatchSpec.__str__ - fixed_specs = [] - for spec in specs_to_add: - if isinstance(spec, PackageRecord): - spec = MatchSpec(str(spec)) - elif isinstance(spec, MatchSpec): - spec_str = str(spec) - if "::" in spec_str: - for arg in sys.argv: - if spec_str in arg: - ms_from_arg = MatchSpec(arg) - if ms_from_arg.name == spec.name: - spec = ms_from_arg - fixed_specs.append(spec) - # MatchSpec.merge sorts before merging; keep order without dups with IndexedSet - self.specs_to_add = IndexedSet(MatchSpec.merge(s for s in fixed_specs)) - - @staticmethod - @lru_cache(maxsize=None) - def user_agent(): - """ - Expose this identifier to allow conda to extend its user agent if required - """ - return f"conda-libmamba-solver/{__version__} libmambapy/{mamba_version()}" + self._repodata_fn = self._maybe_ignore_current_repodata() + self._libmamba_context = init_libmamba_context( + channels=tuple(c.canonical_name for c in self.channels), + platform=next(s for s in self.subdirs if s != "noarch"), + target_prefix=str(self.prefix), + ) def solve_final_state( self, - update_modifier=NULL, - deps_modifier=NULL, - prune=NULL, - ignore_pinned=NULL, - force_remove=NULL, - should_retry_solve=False, - ): + update_modifier: UpdateModifier | _Null = NULL, + deps_modifier: DepsModifier | _Null = NULL, + prune: bool | _Null = NULL, + ignore_pinned: bool | _Null = NULL, + force_remove: bool | _Null = NULL, + should_retry_solve: bool = False, + ) -> IndexedSet[PackageRecord]: self._log_info() in_state = SolverInputState( prefix=self.prefix, @@ -152,94 +148,30 @@ def solve_final_state( force_remove=force_remove, command=self._command, ) - out_state = SolverOutputState(solver_input_state=in_state) # These tasks do _not_ require a solver... - none_or_final_state = out_state.early_exit() - if none_or_final_state is not None: - return none_or_final_state - - # From now on we _do_ require a solver and the index - subdirs = self.subdirs - if self._called_from_conda_build(): - log.info("Using solver via 'conda.plan.install_actions' (probably conda build)") - # Problem: Conda build generates a custom index which happens to "forget" about - # noarch on purpose when creating the build/host environments, since it merges - # both as if they were all in the native subdir. this causes package-not-found - # errors because we are not using the patched index. - # Fix: just add noarch to subdirs. - if "noarch" not in subdirs: - subdirs = *subdirs, "noarch" - # We need to recover the local dirs (conda-build's local, output_folder, etc) - # from the index. This is a bit of a hack, but it works. - conda_bld_channels = { - rec.channel: None for rec in self._index if rec.channel.scheme == "file" - } - # Cache indices for conda-build, it gets heavy otherwise - IndexHelper = _LibMambaIndexForCondaBuild - else: - IndexHelper = LibMambaIndexHelper - conda_bld_channels = () - - all_channels = [ - *conda_bld_channels, - *self.channels, - *in_state.channels_from_specs(), - ] - override = (getattr(context, "_argparse_args", None) or {}).get("override_channels") - if not os.getenv("CONDA_LIBMAMBA_SOLVER_NO_CHANNELS_FROM_INSTALLED") and not override: - # see https://github.com/conda/conda-libmamba-solver/issues/108 - all_urls = [url for c in all_channels for url in Channel(c).urls(False)] - installed_channels = in_state.channels_from_installed(seen=all_urls) - for channel in installed_channels: - # Only add to list if resource is available; check has timeout=1s - if timeout(1, is_channel_available, channel.base_url, default_return=False): - all_channels.append(channel) - all_channels.extend(in_state.maybe_free_channel()) - - # Aggregate channels and subdirs - deduped_channels = {} - for channel in all_channels: - if channel_platform := getattr(channel, "platform", None): - if channel_platform not in subdirs: - log.info( - "Channel %s defines platform %s which is not part of subdirs=%s. " - "Ignoring platform attribute...", - channel, - channel_platform, - subdirs, - ) - # Remove 'Channel.platform' to avoid missing subdirs. Channel.urls() will ignore - # our explicitly passed subdirs if .platform is defined! - channel = Channel(**{k: v for k, v in channel.dump().items() if k != "platform"}) - deduped_channels[channel] = None - all_channels = tuple(deduped_channels) - - # Now have all the info we need to initialize the libmamba context - init_api_context( - channels=[c.canonical_name for c in all_channels], - platform=next(s for s in self.subdirs if s != "noarch"), - target_prefix=str(self.prefix), - ) + maybe_final_state = out_state.early_exit() + if maybe_final_state is not None: + return maybe_final_state + channels = self._collect_channel_list(in_state) + conda_build_channels = self._collect_channels_subdirs_from_conda_build(seen=set(channels)) with Spinner( - self._spinner_msg_metadata(all_channels, conda_bld_channels=conda_bld_channels), + self._collect_all_metadata_spinner_message(channels, conda_build_channels), enabled=not context.verbosity and not context.quiet, json=context.json, ): - index = IndexHelper( - installed_records=(*in_state.installed.values(), *in_state.virtual.values()), - channels=all_channels, - subdirs=subdirs, - repodata_fn=self._repodata_fn, - load_pkgs_cache=context.offline, + index = self._collect_all_metadata( + channels=channels, + conda_build_channels=conda_build_channels, + subdirs=self.subdirs, + in_state=in_state, ) - if conda_bld_channels: - index.reload_local_channels() + out_state.check_for_pin_conflicts(index) with Spinner( - self._spinner_msg_solving(), + self._solving_loop_spinner_message(), enabled=not context.verbosity and not context.quiet, json=context.json, ): @@ -250,18 +182,28 @@ def solve_final_state( solution = out_state.current_solution # Check whether conda can be updated; this is normally done in .solve_for_diff() - # but we are doing it now so we can reuse in_state and friends + # but we are doing it now so we can reuse the index self._notify_conda_outdated(None, index, solution) return solution - def _spinner_msg_metadata(self, channels: Iterable[Channel], conda_bld_channels=()): + # region Metadata collection + ############################ + + def _collect_all_metadata_spinner_message( + self, + channels: Iterable[Channel], + conda_build_channels: Iterable[Channel | str] = (), + ) -> str: if self._called_from_conda_build(): msg = "Reloading output folder" - if conda_bld_channels: - names = [Channel(c).canonical_name for c in conda_bld_channels] + if conda_build_channels: + names = list( + dict.fromkeys([Channel(c).canonical_name for c in conda_build_channels]) + ) msg += f" ({', '.join(names)})" return msg + canonical_names = list(dict.fromkeys([c.canonical_name for c in channels])) canonical_names_dashed = "\n - ".join(canonical_names) return ( @@ -271,7 +213,76 @@ def _spinner_msg_metadata(self, channels: Iterable[Channel], conda_bld_channels= f"Collecting package metadata ({self._repodata_fn})" ) - def _spinner_msg_solving(self): + def _collect_channel_list(self, in_state: SolverInputState) -> list[Channel]: + # Aggregate channels and subdirs + deduped_channels = {} + for channel in chain( + self.channels, in_state.channels_from_specs(), in_state.maybe_free_channel() + ): + if channel_platform := getattr(channel, "platform", None): + if channel_platform not in self.subdirs: + log.info( + "Channel %s defines platform %s which is not part of subdirs=%s. " + "Ignoring platform attribute...", + channel, + channel_platform, + self.subdirs, + ) + # Remove 'Channel.platform' to avoid missing subdirs. Channel.urls() will ignore + # our explicitly passed subdirs if .platform is defined! + channel = Channel(**{k: v for k, v in channel.dump().items() if k != "platform"}) + deduped_channels[channel] = None + return list(deduped_channels) + + def _collect_channels_subdirs_from_conda_build( + self, + seen: set[Channel] | None = None, + ) -> list[Channel]: + if self._called_from_conda_build(): + seen = seen or set() + # We need to recover the local dirs (conda-build's local, output_folder, etc) + # from the index. This is a bit of a hack, but it works. + conda_build_channels = {} + for record in self._index or {}: + if record.channel.scheme == "file": + # Remove 'Channel.platform' to avoid missing subdirs. Channel.urls() + # will ignore our explicitly passed subdirs if .platform is defined! + channel = Channel( + **{k: v for k, v in record.channel.dump().items() if k != "platform"} + ) + if channel not in seen: + conda_build_channels.setdefault(channel) + return list(conda_build_channels) + return [] + + @time_recorder(module_name=__name__) + def _collect_all_metadata( + self, + channels: Iterable[Channel], + conda_build_channels: Iterable[Channel], + subdirs: Iterable[str], + in_state: SolverInputState, + ) -> LibMambaIndexHelper: + index = LibMambaIndexHelper( + channels=[*conda_build_channels, *channels], + subdirs=subdirs, + repodata_fn=self._repodata_fn, + installed_records=( + *in_state.installed.values(), + *in_state.virtual.values(), + ), + pkgs_dirs=context.pkgs_dirs if context.offline else (), + ) + for channel in conda_build_channels: + index.reload_channel(channel) + return index + + # endregion + + # region Solving + ################ + + def _solving_loop_spinner_message(self) -> str: """This shouldn't be our responsibility, but the CLI / app's...""" prefix_name = os.path.basename(self.prefix) if self._called_from_conda_build(): @@ -283,49 +294,16 @@ def _spinner_msg_solving(self): return "Getting pinned dependencies" return "Solving environment" - def _max_attempts(self, in_state: SolverInputState, default: int = 1): - from_env_var = os.environ.get("CONDA_LIBMAMBA_SOLVER_MAX_ATTEMPTS") - installed_count = len(in_state.installed) - if from_env_var: - try: - max_attempts_from_env = int(from_env_var) - except ValueError: - raise CondaValueError( - f"CONDA_LIBMAMBA_SOLVER_MAX_ATTEMPTS='{from_env_var}'. Must be int." - ) - if max_attempts_from_env < 1: - raise CondaValueError( - f"CONDA_LIBMAMBA_SOLVER_MAX_ATTEMPTS='{max_attempts_from_env}'. Must be >=1." - ) - elif max_attempts_from_env > installed_count: - log.warning( - "CONDA_LIBMAMBA_SOLVER_MAX_ATTEMPTS='%s' is higher than the number of " - "installed packages (%s). Using that one instead.", - max_attempts_from_env, - installed_count, - ) - return installed_count - else: - return max_attempts_from_env - elif in_state.update_modifier.FREEZE_INSTALLED and installed_count: - # this the default, but can be overriden with --update-specs - # we cap at MAX_SOLVER_ATTEMPTS_CAP attempts to avoid things - # getting too slow in large environments - return min(self.MAX_SOLVER_ATTEMPTS_CAP, installed_count) - else: - return default - + @time_recorder(module_name=__name__) def _solving_loop( self, in_state: SolverInputState, out_state: SolverOutputState, index: LibMambaIndexHelper, - ): - solved = False + ) -> IndexedSet[PackageRecord]: for attempt in range(1, self._max_attempts(in_state) + 1): - log.debug("Starting solver attempt %s", attempt) try: - solved = self._solve_attempt(in_state, out_state, index, attempt=attempt) + solved, outcome = self._solve_attempt(in_state, out_state, index, attempt=attempt) if solved: break except (UnsatisfiableError, PackagesNotFoundError): @@ -346,128 +324,104 @@ def _solving_loop( { name: record.to_match_spec() for name, record in in_state.installed.items() - # TODO: These conditions might not be needed here if not record.is_unmanageable - # or name not in in_state.history - # or name not in in_state.requested - # or name not in in_state.pinned } ) - # we only check this for "desperate" strategies in _specs_to_tasks - if self._command in (None, NULL): - self._command = "last_solve_attempt" - else: - self._command += "+last_solve_attempt" - solved = self._solve_attempt(in_state, out_state, index, attempt=attempt + 1) + solved, outcome = self._solve_attempt(in_state, out_state, index, attempt=attempt + 1) if not solved: - message = self._prepare_problems_message(pins=out_state.pins) + message = self._prepare_problems_message(outcome, index.db, out_state) exc = LibMambaUnsatisfiableError(message) exc.allow_retry = False raise exc # We didn't fail? Nice, let's return the calculated state - self._export_solved_records(in_state, out_state, index) + self._export_solution(index, out_state, outcome) # Run post-solve tasks out_state.post_solve(solver=self) return out_state - def _log_info(self): - log.info("conda version: %s", _conda_version) - log.info("conda-libmamba-solver version: %s", __version__) - log.info("libmambapy version: %s", mamba_version()) - log.info("Target prefix: %r", self.prefix) - log.info("Command: %s", sys.argv) - - def _setup_solver(self, pool: api.Pool): - self._solver_options = solver_options = [ - (api.SOLVER_FLAG_ALLOW_DOWNGRADE, 1), - ] - if context.channel_priority is ChannelPriority.STRICT: - solver_options.append((api.SOLVER_FLAG_STRICT_REPO_PRIORITY, 1)) - if self.specs_to_remove and self._command in ("remove", None, NULL): - solver_options.append((api.SOLVER_FLAG_ALLOW_UNINSTALL, 1)) - - self.solver = api.Solver(pool, self._solver_options) - def _solve_attempt( self, in_state: SolverInputState, out_state: SolverOutputState, index: LibMambaIndexHelper, attempt: int = 1, - ): - self._setup_solver(index._pool) - + ) -> tuple[bool, Solution | UnSolvable]: log.info("Solver attempt: #%d", attempt) - log.debug("Current conflicts (including learnt ones): %s", out_state.conflicts) - - # Check pin-spec compatibility - if attempt == 1: - out_state.check_for_pin_conflicts(index) - - # ## Create tasks for the solver - tasks = self._specs_to_tasks(in_state, out_state) - tasks_as_str = json.dumps({k[0]: v for k, v in tasks.items()}, indent=2) - log.info("Solver tasks:\n%s", tasks_as_str) - n_pins = 0 - for (task_name, task_type), specs in tasks.items(): - log.debug("Adding task %s", task_name) - if task_name == "ADD_PIN" and attempt == 1: - # pins only need to be added once; since they persist in the pool - # adding them more times results in issues like #354 - for spec in specs: - n_pins += 1 - self.solver.add_pin(spec) - out_state.pins[f"pin-{n_pins}"] = spec - else: - try: - self.solver.add_jobs(specs, task_type) - except RuntimeError as exc: - raise InvalidSpec(str(exc)) - - # ## Run solver - solved = self.solver.solve() - - if solved: + log.debug("Current conflicts (including learnt ones): %r", out_state.conflicts) + flags = self._solver_flags(in_state) + jobs = self._specs_to_request_jobs(in_state, out_state) + request = Request(jobs=jobs, flags=flags) + solver = LibsolvSolver() + outcome = solver.solve(index.db, request) + if isinstance(outcome, Solution): out_state.conflicts.clear() - return solved - - problems = self.solver.problems_to_str() + return True, outcome old_conflicts = out_state.conflicts.copy() - new_conflicts = self._maybe_raise_for_problems( - problems, old_conflicts, out_state.pins, index._channels - ) - log.debug( - "Attempt %d failed with %s conflicts:\n%s", attempt, len(new_conflicts), problems - ) + new_conflicts = self._maybe_raise_for_problems(outcome, index, out_state, old_conflicts) + if log.isEnabledFor(logging.DEBUG): + problems_as_str = outcome.problems_to_str(index.db) + log.debug( + "Attempt %d failed with %s conflicts:\n%s", + attempt, + len(new_conflicts), + problems_as_str, + ) out_state.conflicts.update(new_conflicts) - return False + return False, outcome + + def _solver_flags(self, in_state: SolverInputState) -> Request.Flags: + flags = { + "allow_downgrade": True, + # About flags.allow_uninstall = True: + # We used to set this to False on a global basis and then add jobs + # individually with ALLOW_UNINSTALL=True. Libmamba v2 has a Keep job instead now. + "allow_uninstall": True, + "force_reinstall": in_state.force_reinstall, + "keep_dependencies": not in_state.prune, + "keep_user_specs": True, + "order_request": False, # we do this ourselves + "strict_repo_priority": context.channel_priority is ChannelPriority.STRICT, + } + if log.isEnabledFor(logging.DEBUG): + log.debug("Using solver flags:\n%s", json.dumps(flags, indent=2)) + return Request.Flags(**flags) - def _specs_to_tasks(self, in_state: SolverInputState, out_state: SolverOutputState): + def _specs_to_request_jobs( + self, + in_state: SolverInputState, + out_state: SolverOutputState, + ) -> list[Request.Job]: if in_state.is_removing: - return self._specs_to_tasks_remove(in_state, out_state) + jobs = self._specs_to_request_jobs_remove(in_state, out_state) elif self._called_from_conda_build(): - return self._specs_to_tasks_conda_build(in_state, out_state) + jobs = self._specs_to_request_jobs_conda_build(in_state, out_state) else: - return self._specs_to_tasks_add(in_state, out_state) - - @staticmethod - def _spec_to_str(spec): - """ - Workarounds for Matchspec str-roundtrip limitations. - - Note: this might still fail for specs with local channels and version=*: - file://path/to/channel::package_name=*=*buildstr* - """ - if spec.original_spec_str and spec.original_spec_str.startswith("file://"): - return spec.original_spec_str - if spec.get("build") and not spec.get("version"): - spec = MatchSpec(spec, version="*") - return str(spec) - - def _specs_to_tasks_add(self, in_state: SolverInputState, out_state: SolverOutputState): + jobs = self._specs_to_request_jobs_add(in_state, out_state) + + request_jobs = [] + json_friendly = {} + for JobType, specs in jobs.items(): + for idx, conda_spec in enumerate(specs, 1): + libmamba_spec = self._conda_spec_to_libmamba_spec(conda_spec) + request_jobs.append(JobType(libmamba_spec)) + if log.isEnabledFor(logging.INFO): + json_friendly.setdefault(JobType.__name__, []).append(str(conda_spec)) + if JobType == Request.Pin: + conda_spec = MatchSpec(conda_spec) + out_state.pins[f"pin-{idx}"] = conda_spec + if log.isEnabledFor(logging.INFO): + json_str = json.dumps(json_friendly, indent=2) + log.info("The solver will handle these requests:\n%s", json_str) + return request_jobs + + def _specs_to_request_jobs_add( + self, + in_state: SolverInputState, + out_state: SolverOutputState, + ) -> dict[Request, list[MatchSpec | str]]: tasks = defaultdict(list) # Protect history and aggressive updates from being uninstalled if possible. From libsolv @@ -497,36 +451,26 @@ def _specs_to_tasks_add(self, in_state: SolverInputState, out_state: SolverOutpu if installed_python and to_be_installed_python: python_version_might_change = not to_be_installed_python.match(installed_python) - # Task types - ADD_PIN = "ADD_PIN", api.SOLVER_NOOP - INSTALL = "INSTALL", api.SOLVER_INSTALL - UPDATE = "UPDATE", api.SOLVER_UPDATE - ALLOW_UNINSTALL = "ALLOW_UNINSTALL", api.SOLVER_ALLOWUNINSTALL - USERINSTALLED = "USERINSTALLED", api.SOLVER_USERINSTALLED - LOCK = "LOCK", api.SOLVER_LOCK | api.SOLVER_WEAK - for name in out_state.specs: installed: PackageRecord = in_state.installed.get(name) if installed: - installed_spec_str = self._spec_to_str( - self._check_spec_compat(installed.to_match_spec()) - ) + installed_spec = self._check_spec_compat(installed.to_match_spec()) else: - installed_spec_str = None + installed_spec = None requested: MatchSpec = self._check_spec_compat(in_state.requested.get(name)) history: MatchSpec = self._check_spec_compat(in_state.history.get(name)) pinned: MatchSpec = self._check_spec_compat(in_state.pinned.get(name)) conflicting: MatchSpec = self._check_spec_compat(out_state.conflicts.get(name)) if name in user_installed and not in_state.prune and not conflicting: - tasks[USERINSTALLED].append(installed_spec_str) + tasks[Request.Keep].append(installed_spec) # These specs are explicit in some sort of way if pinned and not pinned.is_name_only_spec: # these are the EXPLICIT pins; conda also uses implicit pinning to # constrain updates too but those can be overridden in case of conflicts. # name-only pins are treated as locks when installed, see below - tasks[ADD_PIN].append(self._spec_to_str(pinned)) + tasks[Request.Pin].append(pinned) # in libmamba, pins and installs are compatible tasks (pin only constrains, # does not 'request' a package). In classic, pins were actually targeted installs # so they were exclusive @@ -535,35 +479,46 @@ def _specs_to_tasks_add(self, in_state: SolverInputState, out_state: SolverOutpu # for name-only specs, this is a no-op; we already added the pin above # but we will constrain it again in the install task to have better # error messages if not solvable - spec_str = self._spec_to_str(pinned) + spec = pinned else: - spec_str = self._spec_to_str(requested) + spec = requested if installed: - tasks[UPDATE].append(spec_str) - tasks[ALLOW_UNINSTALL].append(name) + tasks[Request.Update].append(spec) + if name not in (MatchSpec(spec).name for spec in tasks[Request.Keep]): + tasks[Request.Keep].append(name) else: - tasks[INSTALL].append(spec_str) + tasks[Request.Install].append(spec) elif name in in_state.always_update: - tasks[UPDATE].append(name) - tasks[ALLOW_UNINSTALL].append(name) + tasks[Request.Update].append(name) # These specs are "implicit"; the solver logic massages them for better UX # as long as they don't cause trouble elif in_state.prune: continue elif name == "python" and installed and not pinned: pyver = ".".join(installed.version.split(".")[:2]) - tasks[ADD_PIN].append(f"python {pyver}.*") + tasks[Request.Pin].append(f"python {pyver}.*") elif history: if conflicting and history.strictness == 3: # relax name-version-build (strictness=3) history specs that cause conflicts # this is called neutering and makes test_neutering_of_historic_specs pass - spec = f"{name} {history.version}.*" if history.version else name - tasks[INSTALL].append(spec) + version = str(history.version or "") + if version.startswith("=="): + spec_str = f"{name} {version[2:]}" + elif version.startswith(("!=", ">", "<")): + spec_str = f"{name} {version}" + elif version: + spec_str = f"{name} {version}.*" + else: + spec_str = name + tasks[Request.Install].append(spec_str) else: - tasks[INSTALL].append(self._spec_to_str(history)) + tasks[Request.Install].append(history) elif installed: if conflicting: - tasks[ALLOW_UNINSTALL].append(name) + # NOTE: We don't do anything now with conflicting installed. + # We rely on Flags.allow_uninstall = True doing the right thing. + # We are protecting important things with Keep or Freeze instead. + pass else: # we freeze everything else as installed lock = in_state.update_modifier.FREEZE_INSTALLED @@ -576,24 +531,29 @@ def _specs_to_tasks_add(self, in_state: SolverInputState, out_state: SolverOutpu lock = False break if lock: - tasks[LOCK].append(installed_spec_str) + tasks[Request.Freeze].append(installed_spec) + # enabling this else branch makes + # conda/conda's tests/core/test_solve.py::test_freeze_deps_1[libmamba] fail + # else: + # tasks[Request.Keep].append(name) # Sort tasks by priority # This ensures that more important tasks are added to the solver first returned_tasks = {} for task_type in ( - ADD_PIN, - INSTALL, - UPDATE, - ALLOW_UNINSTALL, - USERINSTALLED, - LOCK, + Request.Pin, + Request.Install, + Request.Update, + Request.Keep, + Request.Freeze, ): if task_type in tasks: returned_tasks[task_type] = tasks[task_type] return returned_tasks - def _specs_to_tasks_remove(self, in_state: SolverInputState, out_state: SolverOutputState): + def _specs_to_request_jobs_remove( + self, in_state: SolverInputState, out_state: SolverOutputState + ) -> dict[Request, list[MatchSpec | str]]: # TODO: Consider merging add/remove in a single logic this so there's no split tasks = defaultdict(list) @@ -604,67 +564,105 @@ def _specs_to_tasks_remove(self, in_state: SolverInputState, out_state: SolverOu # MatchSpecs constructed from PackageRecords get parsed too # strictly if exported via str(). Use .conda_build_form() directly. spec = record.to_match_spec().conda_build_form() - tasks[("USERINSTALLED", api.SOLVER_USERINSTALLED)].append(spec) + tasks[Request.Keep].append(spec) # No complications here: delete requested and their deps # TODO: There are some flags to take care of here, namely: # --all # --no-deps # --deps-only - ERASE = ("ERASE | CLEANDEPS", api.SOLVER_ERASE | api.SOLVER_CLEANDEPS) for name, spec in in_state.requested.items(): spec = self._check_spec_compat(spec) - tasks[ERASE].append(str(spec)) + tasks[Request.Remove].append(str(spec)) return dict(tasks) - def _specs_to_tasks_conda_build( + def _specs_to_request_jobs_conda_build( self, in_state: SolverInputState, out_state: SolverOutputState - ): + ) -> dict[Request, list[MatchSpec | str]]: tasks = defaultdict(list) - INSTALL = "INSTALL", api.SOLVER_INSTALL for name, spec in in_state.requested.items(): if name.startswith("__"): continue spec = self._check_spec_compat(spec) spec = self._fix_version_field_for_conda_build(spec) - tasks[INSTALL].append(spec.conda_build_form()) + tasks[Request.Install].append(spec.conda_build_form()) return dict(tasks) - @staticmethod - def _fix_version_field_for_conda_build(spec: MatchSpec): - """Fix taken from mambabuild""" - if spec.version: - only_dot_or_digit_re = re.compile(r"^[\d\.]+$") - version_str = str(spec.version) - if re.match(only_dot_or_digit_re, version_str): - spec_fields = spec.conda_build_form().split() - if version_str.count(".") <= 1: - spec_fields[1] = version_str + ".*" - else: - spec_fields[1] = version_str + "*" - return MatchSpec(" ".join(spec_fields)) - return spec + # endregion - @staticmethod - def _str_to_matchspec(spec: Union[str, Sequence[str]]): - try: - if isinstance(spec, str): - name, version, build = spec.rsplit("-", 2) - return MatchSpec(name=name, version=version, build=build) - else: - kwargs = {"name": spec[0].rstrip(",")} - if len(spec) >= 2: - kwargs["version"] = spec[1].rstrip(",") - if len(spec) == 3: - kwargs["build"] = spec[2].rstrip(",") - return MatchSpec(**kwargs) - except Exception as exc: - raise ValueError(f"Could not parse spec: {spec}") from exc + # region Export to conda + ######################## + + def _export_solution( + self, + index: LibMambaIndexHelper, + out_state: SolverOutputState, + solution: Solution, + ) -> SolverOutputState: + for action in solution.actions: + record_to_install: PackageInfo = getattr(action, "install", None) + record_to_remove: PackageInfo = getattr(action, "remove", None) + if record_to_install: + if record_to_install.name.startswith("__"): + continue + record = self._package_info_to_package_record(record_to_install, index) + out_state.records[record.name] = record + elif record_to_remove: + if record_to_remove.name.startswith("__"): + continue + record = self._package_info_to_package_record(record_to_remove, index) + out_state.records.pop(record.name, None) + return out_state + + def _package_info_to_package_record( + self, + pkg: PackageInfo, + index: LibMambaIndexHelper, + ) -> PackageRecord: + if pkg.noarch == NoArchType.Python: + noarch = "python" + elif pkg.noarch == NoArchType.Generic: + noarch = "generic" + else: + noarch = None + # The package download logic needs the URL with credentials + for repo_info in index.repos: + if pkg.package_url.startswith(repo_info.url_no_cred): + url = pkg.package_url.replace(repo_info.url_no_cred, repo_info.url_w_cred) + break + else: + url = pkg.package_url + return PackageRecord( + name=pkg.name, + version=pkg.version, + build=pkg.build_string, # NOTE: Different attribute name + build_number=pkg.build_number, + channel=pkg.channel, + url=url, + subdir=pkg.platform, # NOTE: Different attribute name + fn=pkg.filename, # NOTE: Different attribute name + license=pkg.license, + md5=pkg.md5, + sha256=pkg.sha256, + signatures=pkg.signatures, + track_features=pkg.track_features, + depends=pkg.dependencies, # NOTE: Different attribute name + constrains=pkg.constrains, + defaulted_keys=pkg.defaulted_keys, + noarch=noarch, + size=pkg.size, + timestamp=pkg.timestamp, + ) + + # endregion + + # region Error reporting + ######################## @classmethod - def _parse_problems(cls, problems: str) -> Mapping[str, MatchSpec]: + def _parse_problems(cls, unsolvable: UnSolvable, db: Database) -> Mapping[str, MatchSpec]: """ Problems can signal either unsatisfiability or unavailability. First will raise LibmambaUnsatisfiableError. @@ -672,33 +670,48 @@ def _parse_problems(cls, problems: str) -> Mapping[str, MatchSpec]: Libmamba can return spec strings in two formats: - With dashes, e.g. package-1.2.3-h5487548_0 - - à la conda-build, e.g. package 1.2.* + - à la conda-build, e.g. package 1.2.* * - just names, e.g. package """ conflicts = [] not_found = [] - problem_lines = problems.splitlines()[1:] - for line in problem_lines: - line = line.strip() + problems = [] + has_unsupported = False + for problem in unsolvable.problems(db): + if problem == "unsupported request": + has_unsupported = True + else: + problems.append(problem) + if has_unsupported: # we put it at the end to prioritize other more meaningful problems + problems.append("unsupported request") + + try: + explained_problems = unsolvable.explain_problems(db, problems_format_nocolor) + except Exception as exc: + log.debug("Cannot explain problems", exc_info=exc) + explained_problems = "" + for line in problems: words = line.split() - if not line.startswith("- "): - continue if "none of the providers can be installed" in line: - if words[1] != "package" or words[3] != "requires": + if words[0] != "package" or words[2] != "requires": raise ValueError(f"Unknown message: {line}") - conflicts.append(cls._str_to_matchspec(words[2])) + conflicts.append(cls._matchspec_from_error_str(words[1])) end = words.index("but") - conflicts.append(cls._str_to_matchspec(words[4:end])) - elif "- nothing provides" in line: - marker = next((i for (i, w) in enumerate(words) if w == "needed"), None) - if marker: - conflicts.append(cls._str_to_matchspec(words[-1])) - start = 3 if marker == 4 else 4 - not_found.append(cls._str_to_matchspec(words[start:marker])) + conflicts.append(cls._matchspec_from_error_str(words[3:end])) + elif "nothing provides" in line: + start, marker = None, None + for i, word in enumerate(words): + if word == "needed": + marker = i + elif word == "provides": + start = i + 1 + if marker is not None: + conflicts.append(cls._matchspec_from_error_str(words[-1])) + not_found.append(cls._matchspec_from_error_str(words[start:marker])) elif "has constraint" in line and "conflicting with" in line: # package libzlib-1.2.11-h4e544f5_1014 has constraint zlib 1.2.11 *_1014 # conflicting with zlib-1.2.13-h998d150_0 - conflicts.append(cls._str_to_matchspec(words[-1])) + conflicts.append(cls._matchspec_from_error_str(words[-1])) elif "cannot install both pin-" in line and "and pin-" in line: # a pin is in conflict with another pin pin_a = words[3].rsplit("-", 1)[0] @@ -707,7 +720,17 @@ def _parse_problems(cls, problems: str) -> Mapping[str, MatchSpec]: conflicts.append(MatchSpec(pin_b)) elif "is excluded by strict repo priority" in line: # package python-3.7.6-h0371630_2 is excluded by strict repo priority - conflicts.append(cls._str_to_matchspec(words[2])) + conflicts.append(cls._matchspec_from_error_str(words[1])) + elif line == "unsupported request": + # libmamba v2 has this message for package not found errors + # we need to double check with the explained problem + for explained_line in explained_problems.splitlines(): + explained_line = explained_line.lstrip("│├└─ ").strip() + explained_words = explained_line.split() + if "does not exist" in explained_line and "which" not in explained_line: + end = explained_words.index("does") + not_found.append(cls._matchspec_from_error_str(explained_words[:end])) + break else: log.debug("! Problem line not recognized: %s", line) @@ -718,37 +741,29 @@ def _parse_problems(cls, problems: str) -> Mapping[str, MatchSpec]: def _maybe_raise_for_problems( self, - problems: Optional[Union[str, Mapping]] = None, + unsolvable: UnSolvable, + index: LibMambaIndexHelper, + out_state: SolverOutputState, previous_conflicts: Mapping[str, MatchSpec] = None, - pins: Mapping[str, MatchSpec] = None, - channels: Iterable[Channel] = (), ): - if self.solver is None: - raise RuntimeError("Solver is not initialized. Call `._setup_solver()` first.") - - if problems is None: - problems = self.solver.problems_to_str() - if isinstance(problems, str): - parsed_problems = self._parse_problems(problems) - + parsed_problems = self._parse_problems(unsolvable, index.db) # We allow conda-build (if present) to process the exception early self._maybe_raise_for_conda_build( {**parsed_problems["conflicts"], **parsed_problems["not_found"]}, - message=self._prepare_problems_message(), + message=self._prepare_problems_message(unsolvable, index.db, out_state), ) unsatisfiable = parsed_problems["conflicts"] not_found = parsed_problems["not_found"] if not unsatisfiable and not_found: - log.debug( - "Inferred PackagesNotFoundError %s from conflicts:\n%s", - tuple(not_found.keys()), - problems, - ) + if log.isEnabledFor(logging.DEBUG): + log.debug( + "Inferred PackagesNotFoundError %s from conflicts:\n%s", + tuple(not_found.keys()), + unsolvable.explain_problems(index.db, problems_format_nocolor), + ) # This is not a conflict, but a missing package in the channel - exc = PackagesNotFoundError( - tuple(not_found.values()), tuple(dict.fromkeys(channels or self.channels)) - ) + exc = PackagesNotFoundError(tuple(not_found.values()), tuple(index.channels)) exc.allow_retry = False raise exc @@ -764,24 +779,22 @@ def _maybe_raise_for_problems( if (previous and (previous_set == current_set)) or len(diff) >= 10: # We have same or more (up to 10) unsatisfiable now! Abort to avoid recursion - message = self._prepare_problems_message(pins=pins) + message = self._prepare_problems_message(unsolvable, index.db, out_state) exc = LibMambaUnsatisfiableError(message) # do not allow conda.cli.install to try more things exc.allow_retry = False raise exc return unsatisfiable - def _prepare_problems_message(self, pins=None): - message = self.solver.problems_to_str() + def _prepare_problems_message( + self, unsolvable: UnSolvable, db: Database, out_state: SolverOutputState + ) -> str: + message = unsolvable.problems_to_str(db) explain = True if " - " not in message: # This makes 'explain_problems()' crash. Anticipate. message = "Failed with empty error message." explain = False - elif "unsupported request" in message: - # This error makes 'explain_problems()' crash. Anticipate. - log.info("Failed to explain problems. Unsupported request.") - explain = False elif "is excluded by strict repo priority" in message: # This will cause a lot of warnings until implemented in detail explanations log.info("Skipping error explanation. Excluded by strict repo priority.") @@ -789,16 +802,15 @@ def _prepare_problems_message(self, pins=None): if explain: try: - explained_errors = self.solver.explain_problems() + explained_errors = unsolvable.explain_problems(db, problems_format_auto) message += "\n" + explained_errors except Exception as exc: log.warning("Failed to explain problems", exc_info=exc) - - if pins and " pin-" in message: # add info about pins for easier debugging + if out_state.pins and "pin on " in message: # add info about pins for easier debugging pin_message = "Pins seem to be involved in the conflict. Currently pinned specs:\n" - for pin_name, spec in pins.items(): - pin_message += f" - {spec} (labeled as '{pin_name}')\n" - return f"{message}\n\n{pin_message}" + for _, spec in out_state.pins.items(): + pin_message += f" - {spec}\n" + message += f"\n\n{pin_message}" return message def _maybe_raise_for_conda_build( @@ -813,7 +825,8 @@ def _maybe_raise_for_conda_build( # right away to let conda build handle it if not self._called_from_conda_build(): return - + if not conflicting_specs: + return from .conda_build_exceptions import ExplainedDependencyNeedsBuildingError # the patched index should contain the arch we are building this env for @@ -827,204 +840,19 @@ def _maybe_raise_for_conda_build( ) raise exc - def _export_solved_records( - self, - in_state: SolverInputState, - out_state: SolverOutputState, - index: LibMambaIndexHelper, - ): - if self.solver is None: - raise RuntimeError("Solver is not initialized. Call `._setup_solver()` first.") - - transaction = api.Transaction( - index._pool, - self.solver, - api.MultiPackageCache(context.pkgs_dirs), - ) - (names_to_add, names_to_remove), to_link, to_unlink = transaction.to_conda() - - for _, filename in to_unlink: - for name, record in in_state.installed.items(): - if record.is_unmanageable: - # ^ Do not try to unlink virtual pkgs, virtual eggs, etc - continue - if record.fn == filename: # match! - out_state.records.pop(name, None) - break - else: - log.warning("Tried to unlink %s but it is not installed or manageable?", filename) - - for_conda_build = self._called_from_conda_build() - for channel, filename, json_payload in to_link: - record = self._package_record_from_json_payload( - index, channel, filename, json_payload, for_conda_build=for_conda_build - ) - # We need this check below to make sure noarch package get reinstalled - # record metadata coming from libmamba is incomplete and won't pass the - # noarch checks -- to fix it, we swap the metadata-only record with its locally - # installed counterpart (richer in info) - already_installed_record = in_state.installed.get(record.name) - if ( - already_installed_record - and record.subdir == "noarch" - and already_installed_record.subdir == "noarch" - and record.version == already_installed_record.version - and record.build == already_installed_record.build - ): - # Replace repodata-only record with local-info-rich record counterpart - record = already_installed_record - - out_state.records[record.name] = record - - # Fixes conda-build tests/test_api_build.py::test_croot_with_spaces - if on_win and for_conda_build: - for record in out_state.records.values(): - if "%" not in str(record): - continue - if record.channel.location: # multichannels like 'defaults' have no location - record.channel.location = percent_decode(record.channel.location) - record.channel.name = percent_decode(record.channel.name) - - def _package_record_from_json_payload( - self, - index: LibMambaIndexHelper, - channel: str, - pkg_filename: str, - json_payload: str, - for_conda_build: bool = False, - ) -> PackageRecord: - """ - The libmamba transactions cannot return full-blown objects from the C/C++ side. - Instead, it returns the instructions to build one on the Python side: - - channel_info: dict - Channel datas, as built in .index.LibmambaIndexHelper._fetch_channel() - This is retrieved from the .index._index mapping, keyed by channel URLs - pkg_filename: str - The filename (.tar.bz2 or .conda) of the selected record. - json_payload: str - A str-encoded JSON payload with the PackageRecord kwargs. - """ - try: - kwargs = json.loads(json_payload) - except (TypeError, ValueError, json.JSONDecodeError) as exc: - channel_name = Channel(channel).canonical_name - msg = f"Could not parse JSON payload for {channel_name}::{pkg_filename}" - raise ParseError(msg) from exc - - # conda-lock will inject virtual packages, but these are not in the index - if pkg_filename.startswith("__") and "/@/" in channel: - return PackageRecord(**kwargs) - - try: - channel_info = index.get_info(channel) - except KeyError: - # this channel was never used to build the remote index, which - # can mean two things: it comes from pkgs_dirs (offline) - # or we obtained an already installed PackageRecord - # whose metadata contains a channel that doesn't exist - # in both cases, we can return the record from the correct object - if context.offline: - for path in context.pkgs_dirs: - pcd = PackageCacheData(path) - pcd.load() - record = next((r for r in pcd.values() if r.fn == pkg_filename), None) - if record: - return record - pd = PrefixData(self.prefix) - record = pd.get(kwargs["name"], default=None) - if record and record.fn == pkg_filename: - return record - # No luck? Cross our fingers and return the record from the JSON payload straight - return PackageRecord(**kwargs) - - # Otherwise, these are records from the index - kwargs["fn"] = pkg_filename - kwargs["channel"] = channel_info.channel - if for_conda_build: - # conda-build expects multichannel instances in the Dist->PackageRecord mapping - # see https://github.com/conda/conda-libmamba-solver/issues/363 - for multichannel_name, mc_channels in context.custom_multichannels.items(): - urls = [url for c in mc_channels for url in c.urls(with_credentials=False)] - if channel_info.noauth_url in urls: - kwargs["channel"] = multichannel_name - break - kwargs["url"] = join_url(channel_info.full_url, pkg_filename) - if not kwargs.get("subdir"): # missing in old channels - kwargs["subdir"] = channel_info.channel.subdir - if kwargs["subdir"] == "noarch": - # libmamba doesn't keep 'noarch' type around, so infer for now - if any(dep.split()[0] in ("python", "pypy") for dep in kwargs.get("depends", ())): - kwargs["noarch"] = "python" - else: - kwargs["noarch"] = "generic" - return PackageRecord(**kwargs) - - def _check_spec_compat(self, match_spec: Union[MatchSpec, None]) -> Union[MatchSpec, None]: - """ - Make sure we are not silently ingesting MatchSpec fields we are not - doing anything with! + # endregion - TODO: We currently allow `subdir` but we are not handling it right now. - """ - if match_spec is None: - return None - supported = "name", "version", "build", "channel", "subdir" - droppable = ("url", "md5", "sha256") - unsupported_but_set = [] - to_drop = set() - to_keep = {} - for field in match_spec.FIELD_NAMES: - value = match_spec.get_raw_value(field) - if value: - if ( - (field == "channel" and str(value) == "") - or (field == "subdir" and "channel" in to_drop) - or field in droppable - ): - # These make libmamba segfault but don't add useful info - to_drop.add(field) - elif field not in supported: - unsupported_but_set.append(field) - else: - to_keep[field] = value - if unsupported_but_set: - raise InvalidMatchSpec( - match_spec, - "Libmamba only supports a subset of the MatchSpec interface for now. " - f"You can only use {supported}, but you tried to use " - f"{tuple(unsupported_but_set)}.", - ) - if to_drop: - log.debug("Dropping unsupported fields from %s: %s", match_spec, sorted(to_drop)) - match_spec = MatchSpec(**to_keep) - if ( - match_spec.get_raw_value("channel") == "defaults" - and context.default_channels == DEFAULT_CHANNELS - ): - # !!! Temporary !!! - # Apply workaround for defaults::pkg-name specs. - # We need to replace it with the actual channel name (main, msys2, r) - # Instead of searching in the index, we apply a simple heuristic: - # - R packages are [_]r-*, mro-*, rpy or rstudio - # - Msys2 packages are m2-*, m2w64-*, or msys2-* - # - Everything else is in main - name = match_spec.name.lower() - if name in ("r", "rpy2", "rstudio") or name.startswith(("r-", "_r-", "mro-")): - channel = "pkgs/r" - elif name.startswith(("m2-", "m2w64-", "msys2-")): - channel = "pkgs/msys2" - else: - channel = "pkgs/main" - match_spec = MatchSpec(match_spec, channel=channel) - - return match_spec + # region General helpers + ######################## - def _reset(self): - self.solver = None - self._solver_options = None + def _log_info(self): + log.info("conda version: %s", _conda_version) + log.info("conda-libmamba-solver version: %s", __version__) + log.info("libmambapy version: %s", mamba_version()) + log.info("Target prefix: %r", self.prefix) + log.info("Command: %s", sys.argv) - def _called_from_conda_build(self): + def _called_from_conda_build(self) -> bool: """ conda build calls the solver via `conda.plan.install_actions`, which overrides Solver._index (populated in the classic solver, but empty for us) @@ -1044,11 +872,107 @@ def _called_from_conda_build(self): and {"install_actions", "get_install_actions"} <= {frame[3] for frame in stack()} ) + def _check_spec_compat(self, spec: MatchSpec | None) -> MatchSpec | None: + if spec is None: + return + spec_fields = {} + for field in spec.FIELD_NAMES: + value = spec.get_raw_value(field) + if value: + if field == "channel" and str(value) == "": + continue + spec_fields[field] = value + return MatchSpec(**spec_fields) + + def _conda_spec_to_libmamba_spec(self, spec: MatchSpec) -> LibmambaMatchSpec: + return LibmambaMatchSpec.parse(str(spec)) + + @staticmethod + def _fix_version_field_for_conda_build(spec: MatchSpec) -> MatchSpec: + """Fix taken from mambabuild""" + if spec.version: + only_dot_or_digit_re = re.compile(r"^[\d\.]+$") + version_str = str(spec.version) + if re.match(only_dot_or_digit_re, version_str): + spec_fields = spec.conda_build_form().split() + if version_str.count(".") <= 1: + spec_fields[1] = version_str + ".*" + else: + spec_fields[1] = version_str + "*" + return MatchSpec(" ".join(spec_fields)) + return spec + + @staticmethod + def _matchspec_from_error_str(spec: str | Sequence[str]) -> MatchSpec: + try: + if isinstance(spec, str): + name, version, build = spec.rsplit("-", 2) + return MatchSpec(name=name, version=version, build=build) + else: + kwargs = {"name": spec[0].rstrip(",")} + if len(spec) >= 2 and spec[1] != "=*": + if spec[1].startswith("==") or not spec[1].startswith("="): + kwargs["version"] = spec[1].rstrip(",") + else: + kwargs["version"] = spec[1][1:].rstrip(",") + ".*" + if len(spec) == 3 and spec[2] != "*": + kwargs["build"] = spec[2].rstrip(",") + return MatchSpec(**kwargs) + except Exception as exc: + raise ValueError(f"Could not parse spec: {spec}") from exc + + def _maybe_ignore_current_repodata(self) -> str: + is_repodata_fn_set = False + for config in context.collect_all().values(): + for key, value in config.items(): + if key == "repodata_fns" and value: + is_repodata_fn_set = True + break + if self._repodata_fn == "current_repodata.json" and not is_repodata_fn_set: + log.debug( + "Ignoring repodata_fn='current_repodata.json', defaulting to %s", + REPODATA_FN, + ) + return REPODATA_FN + return self._repodata_fn + + def _max_attempts(self, in_state: SolverInputState, default: int = 1) -> int: + from_env_var = os.environ.get("CONDA_LIBMAMBA_SOLVER_MAX_ATTEMPTS") + installed_count = len(in_state.installed) + if from_env_var: + try: + max_attempts_from_env = int(from_env_var) + except ValueError: + raise CondaValueError( + f"CONDA_LIBMAMBA_SOLVER_MAX_ATTEMPTS='{from_env_var}'. Must be int." + ) + if max_attempts_from_env < 1: + raise CondaValueError( + f"CONDA_LIBMAMBA_SOLVER_MAX_ATTEMPTS='{max_attempts_from_env}'. Must be >=1." + ) + elif max_attempts_from_env > installed_count: + log.warning( + "CONDA_LIBMAMBA_SOLVER_MAX_ATTEMPTS='%s' is higher than the number of " + "installed packages (%s). Using that one instead.", + max_attempts_from_env, + installed_count, + ) + return installed_count + else: + return max_attempts_from_env + elif in_state.update_modifier.FREEZE_INSTALLED and installed_count: + # this the default, but can be overriden with --update-specs + # we cap at MAX_SOLVER_ATTEMPTS_CAP attempts to avoid things + # getting too slow in large environments + return min(self.MAX_SOLVER_ATTEMPTS_CAP, installed_count) + else: + return default + def _notify_conda_outdated( self, - link_precs, - index: LibMambaIndexHelper = None, - final_state: Iterable[PackageRecord] = None, + link_precs: Iterable[PackageRecord], + index: LibMambaIndexHelper | None = None, + final_state: Iterable[PackageRecord] | None = None, ): """ We are overriding the base class implementation, which gets called in @@ -1098,7 +1022,7 @@ def _notify_conda_outdated( # only check the loaded index if it contains the channel conda should come from # otherwise ignore - index_channels = {getattr(chn, "canonical_name", chn) for chn in index._channels} + index_channels = {getattr(chn, "canonical_name", chn) for chn in index.channels} if channel_name not in index_channels: return @@ -1135,3 +1059,5 @@ def _notify_conda_outdated( ), file=sys.stderr, ) + + # endregion diff --git a/conda_libmamba_solver/state.py b/conda_libmamba_solver/state.py index 9bb1cb3a..aed7fc19 100644 --- a/conda_libmamba_solver/state.py +++ b/conda_libmamba_solver/state.py @@ -59,13 +59,13 @@ which allow to keep the reasons _why_ those specs or records were added to the mappings, as well as richer logging for each action. """ - # TODO: This module could be part of conda-core once if we refactor the classic logic +from __future__ import annotations + import logging -from os import PathLike from types import MappingProxyType -from typing import Dict, Iterable, Optional, Tuple, Type, Union +from typing import TYPE_CHECKING from boltons.setutils import IndexedSet from conda.auxlib import NULL @@ -80,24 +80,18 @@ from conda.models.channel import Channel from conda.models.match_spec import MatchSpec from conda.models.prefix_graph import PrefixGraph -from conda.models.records import PackageRecord - -from .utils import EnumAsBools, compatible_specs -log = logging.getLogger(f"conda.{__name__}") +if TYPE_CHECKING: + from collections.abc import Iterable + from os import PathLike + from typing import Any + from conda.core.solve import Solver + from conda.models.records import PackageRecord -class IndexHelper: - """ - The _index_ refers to the combination of all configured channels and their - platform-corresponding subdirectories. It provides the sources for available - packages that can become part of a prefix state, eventually. - - Subclass this helper to add custom repodata fetching if needed. - """ +from .utils import EnumAsBools, compatible_specs - def explicit_pool(self, specs: Iterable[MatchSpec]) -> Iterable[str]: - raise NotImplementedError +log = logging.getLogger(f"conda.{__name__}") class SolverInputState: @@ -164,16 +158,16 @@ class SolverInputState: def __init__( self, - prefix: Union[str, bytes, PathLike], - requested: Optional[Iterable[Union[str, MatchSpec]]] = (), - update_modifier: Optional[UpdateModifier] = UpdateModifier.UPDATE_SPECS, - deps_modifier: Optional[DepsModifier] = DepsModifier.NOT_SET, - ignore_pinned: Optional[bool] = None, - force_remove: Optional[bool] = False, - force_reinstall: Optional[bool] = False, - prune: Optional[bool] = False, - command: Optional[str] = None, - _pip_interop_enabled: Optional[bool] = None, + prefix: str | bytes | PathLike, + requested: Iterable[str | MatchSpec] | None = (), + update_modifier: UpdateModifier | None = UpdateModifier.UPDATE_SPECS, + deps_modifier: DepsModifier | None = DepsModifier.NOT_SET, + ignore_pinned: bool | None = None, + force_remove: bool | None = False, + force_reinstall: bool | None = False, + prune: bool | None = False, + command: str | None = None, + _pip_interop_enabled: bool | None = None, ): self.prefix = prefix self._prefix_data = PrefixData(prefix, pip_interop_enabled=_pip_interop_enabled) @@ -208,7 +202,7 @@ def __init__( # special cases self._do_not_remove = {p: MatchSpec(p) for p in self._DO_NOT_REMOVE_NAMES} - def _default_to_context_if_null(self, name, value, context=context): + def _default_to_context_if_null(self, name, value, context=context) -> Any: "Obtain default value from the context if value is set to NULL; otherwise leave as is" return getattr(context, name) if value is NULL else self._ENUM_STR_MAP.get(value, value) @@ -223,7 +217,7 @@ def prefix_data(self) -> PrefixData: # Prefix state pools @property - def installed(self) -> Dict[str, PackageRecord]: + def installed(self) -> dict[str, PackageRecord]: """ This exposes the installed packages in the prefix. Note that a ``PackageRecord`` can generate an equivalent ``MatchSpec`` object with ``.to_match_spec()``. @@ -232,7 +226,7 @@ def installed(self) -> Dict[str, PackageRecord]: return MappingProxyType(dict(sorted(self.prefix_data._prefix_records.items()))) @property - def history(self) -> Dict[str, MatchSpec]: + def history(self) -> dict[str, MatchSpec]: """ These are the specs that the user explicitly asked for in previous operations on the prefix. See :class:`History` for more details. @@ -240,7 +234,7 @@ def history(self) -> Dict[str, MatchSpec]: return MappingProxyType(self._history) @property - def pinned(self) -> Dict[str, MatchSpec]: + def pinned(self) -> dict[str, MatchSpec]: """ These specs represent hard constrains on what package versions can be installed on the environment. The packages here returned don't need to be already installed. @@ -252,7 +246,7 @@ def pinned(self) -> Dict[str, MatchSpec]: return MappingProxyType(self._pinned) @property - def virtual(self) -> Dict[str, MatchSpec]: + def virtual(self) -> dict[str, MatchSpec]: """ System properties exposed as virtual packages (e.g. ``__glibc=2.17``). These packages cannot be (un)installed, they only represent constrains for other packages. By convention, @@ -261,7 +255,7 @@ def virtual(self) -> Dict[str, MatchSpec]: return MappingProxyType(dict(sorted(self._virtual.items()))) @property - def aggressive_updates(self) -> Dict[str, MatchSpec]: + def aggressive_updates(self) -> dict[str, MatchSpec]: """ Packages that the solver will always try to update. As such, they will never have an associated version or build constrain. Note that the packages here returned do not need to @@ -270,7 +264,7 @@ def aggressive_updates(self) -> Dict[str, MatchSpec]: return MappingProxyType(self._aggressive_updates) @property - def always_update(self) -> Dict[str, MatchSpec]: + def always_update(self) -> dict[str, MatchSpec]: """ Merged lists of packages that should always be updated, depending on the flags, including: - aggressive_updates @@ -290,7 +284,7 @@ def always_update(self) -> Dict[str, MatchSpec]: return MappingProxyType(pkgs) @property - def do_not_remove(self) -> Dict[str, MatchSpec]: + def do_not_remove(self) -> dict[str, MatchSpec]: """ Packages that are protected by the solver so they are not accidentally removed. This list is not configurable, but hardcoded for legacy reasons. @@ -298,7 +292,7 @@ def do_not_remove(self) -> Dict[str, MatchSpec]: return MappingProxyType(self._do_not_remove) @property - def requested(self) -> Dict[str, MatchSpec]: + def requested(self) -> dict[str, MatchSpec]: """ Packages that the user has explicitly asked for in this operation. """ @@ -397,25 +391,6 @@ def channels_from_specs(self) -> Iterable[Channel]: channel = Channel(spec.original_spec_str.split("::")[0]) yield channel - def channels_from_installed(self, seen=None) -> Iterable[Channel]: - seen_urls = set(seen or []) - # See https://github.com/conda/conda/issues/11790 - for record in self.installed.values(): - if record.channel.auth or record.channel.token: - # skip if the channel has authentication info, because - # it might cause issues with expired tokens and what not - continue - if record.channel.name in ("@", "", "pypi"): - # These "channels" are not really channels, more like - # metadata placeholders - continue - if record.channel.base_url is None: - continue - if record.channel.subdir_url in seen_urls: - continue - seen_urls.add(record.channel.subdir_url) - yield record.channel - def maybe_free_channel(self) -> Iterable[Channel]: if context.restore_free_channel: yield Channel.from_url("https://repo.anaconda.com/pkgs/free") @@ -447,7 +422,7 @@ class SolverOutputState: relaxation of the version and build constrains. If not provided, their default value is a blank mapping. pins - Packages that ended up being pinned. Mostly used for reporting and debugging. + Packages that ended up being pinned. Mostly used for reporting and debugging. Deprecated. Notes ----- @@ -476,21 +451,21 @@ def __init__( self, *, solver_input_state: SolverInputState, - records: Optional[Dict[str, PackageRecord]] = None, - for_history: Optional[Dict[str, MatchSpec]] = None, - neutered: Optional[Dict[str, MatchSpec]] = None, - conflicts: Optional[Dict[str, MatchSpec]] = None, - pins: Optional[Dict[str, MatchSpec]] = None, + records: dict[str, PackageRecord] | None = None, + for_history: dict[str, MatchSpec] | None = None, + neutered: dict[str, MatchSpec] | None = None, + conflicts: dict[str, MatchSpec] | None = None, + pins: dict[str, MatchSpec] | None = None, ): self.solver_input_state: SolverInputState = solver_input_state - self.records: Dict[str, PackageRecord] = records or dict(solver_input_state.installed) - self.for_history: Dict[str, MatchSpec] = for_history or dict(solver_input_state.requested) - self.neutered: Dict[str, MatchSpec] = neutered or {} - self.conflicts: Dict[str, MatchSpec] = conflicts or {} - self.pins: Dict[str, MatchSpec] = pins or {} + self.records: dict[str, PackageRecord] = records or dict(solver_input_state.installed) + self.for_history: dict[str, MatchSpec] = for_history or dict(solver_input_state.requested) + self.neutered: dict[str, MatchSpec] = neutered or {} + self.conflicts: dict[str, MatchSpec] = conflicts or {} + self.pins: dict[str, MatchSpec] = pins or {} @property - def current_solution(self): + def current_solution(self) -> IndexedSet[PackageRecord]: """ Massage currently stored records so they can be returned as the type expected by the solver API. This is what you should return in ``Solver.solve_final_state()``. @@ -498,7 +473,7 @@ def current_solution(self): return IndexedSet(PrefixGraph(self.records.values()).graph) @property - def specs(self): + def specs(self) -> dict[str, MatchSpec]: """ Merge all possible sources of input package specs, sorted by their input category and strictness. It's just meant to be an enumeration of all possible inputs, not a ready-to-use @@ -524,20 +499,20 @@ def specs(self): return specs_by_strictness @property - def real_specs(self): + def real_specs(self) -> dict[str, MatchSpec]: """ Specs that are _not_ virtual. """ return {name: spec for name, spec in self.specs.items() if not name.startswith("__")} @property - def virtual_specs(self): + def virtual_specs(self) -> dict[str, MatchSpec]: """ Specs that are virtual. """ return {name: spec for name, spec in self.specs.items() if name.startswith("__")} - def early_exit(self) -> Dict[str, PackageRecord]: + def early_exit(self) -> IndexedSet[PackageRecord]: """ Operations that do not need a solver and might result in returning early are collected here. @@ -614,7 +589,7 @@ def check_for_pin_conflicts(self, index): exc.allow_retry = False raise exc - def post_solve(self, solver: Type["Solver"]): + def post_solve(self, solver: type[Solver]): """ These tasks are performed _after_ the solver has done its work. It essentially post-processes the ``records`` mapping. @@ -791,7 +766,7 @@ def post_solve(self, solver: Type["Solver"]): self.records.update({record.name: record for record in graph.graph}) -def sort_by_spec_strictness(key_value_tuple: Tuple[str, MatchSpec]): +def sort_by_spec_strictness(key_value_tuple: tuple[str, MatchSpec]) -> tuple[int, str]: """ Helper function to sort a list of (key, value) tuples by spec strictness """ diff --git a/conda_libmamba_solver/utils.py b/conda_libmamba_solver/utils.py index b1f7eb3a..6d9ae493 100644 --- a/conda_libmamba_solver/utils.py +++ b/conda_libmamba_solver/utils.py @@ -1,23 +1,32 @@ # Copyright (C) 2022 Anaconda, Inc # Copyright (C) 2023 conda # SPDX-License-Identifier: BSD-3-Clause -from enum import Enum -from functools import lru_cache +""" +Miscellaneous utilities +""" + +from __future__ import annotations + from logging import getLogger -from pathlib import Path +from typing import TYPE_CHECKING from urllib.parse import quote -from conda.base.context import context from conda.common.compat import on_win -from conda.common.path import url_to_path from conda.common.url import urlparse from conda.exceptions import PackagesNotFoundError -from conda.gateways.connection import session as gateway_session + +if TYPE_CHECKING: + from collections.abc import Iterable + from enum import Enum + + from conda.models.match_spec import MatchSpec + + from .index import LibMambaIndexHelper log = getLogger(f"conda.{__name__}") -def escape_channel_url(channel): +def escape_channel_url(channel: str) -> str: if channel.startswith("file:"): if "%" in channel: # it's escaped already return channel @@ -40,26 +49,9 @@ def escape_channel_url(channel): return channel -@lru_cache(maxsize=None) -def is_channel_available(channel_url) -> bool: - if context.offline: - # We don't know where the channel might be (even file:// might be a network share) - # so we play it safe and assume it's not available - return False - try: - if channel_url.startswith("file://"): - return Path(url_to_path(channel_url)).is_dir() - if hasattr(gateway_session, "get_session"): - session = gateway_session.get_session(channel_url) - else: - session = gateway_session.CondaSession() - return session.head(f"{channel_url}/noarch/repodata.json").ok - except Exception as exc: - log.debug("Failed to check if channel %s is available", channel_url, exc_info=exc) - return False - - -def compatible_specs(index, specs, raise_not_found=True): +def compatible_specs( + index: type[LibMambaIndexHelper], specs: Iterable[MatchSpec], raise_not_found: bool = True +): """ Assess whether the given specs are compatible with each other. This is done by querying the index for each spec and taking the @@ -78,7 +70,7 @@ def compatible_specs(index, specs, raise_not_found=True): results = set(index.search(spec)) if not results: if raise_not_found: - exc = PackagesNotFoundError([spec], index._channels) + exc = PackagesNotFoundError([spec], index.channels) exc.allow_retry = False raise exc return False diff --git a/dev/requirements.txt b/dev/requirements.txt index 771b697f..56dba1a9 100644 --- a/dev/requirements.txt +++ b/dev/requirements.txt @@ -3,7 +3,8 @@ pip # run-time boltons>=23.0.0 conda>=23.7.3 -libmamba>=1.5.6,<2.0.0dev0 -libmambapy>=1.5.6,<2.0.0dev0 -# be explicit about sqlite because sometimes it's removed from the env :shrug: -sqlite +conda-forge::libmamba>=2.0.0 +conda-forge::libmambapy>=2.0.0 +# NOTE: jaimergp/label/conda-libmamba-solver-for-libmamba-v2 is a temporary hack +# to allow upgrades to libmamba v2. Remove once CLS with v2 compat is released. +jaimergp/label/conda-libmamba-solver-for-libmamba-v2::conda-libmamba-solver diff --git a/docs/user-guide/configuration.md b/docs/user-guide/configuration.md index 2dfd99cf..dbe82ef6 100644 --- a/docs/user-guide/configuration.md +++ b/docs/user-guide/configuration.md @@ -10,4 +10,4 @@ Additionally, conda-libmamba-solver can be further configured via special enviro We do not recomment using these options in production environments. Their behavior might change in the future, or they can be entirely removed without prior notice. * `CONDA_LIBMAMBA_SOLVER_MAX_ATTEMPTS`: Maximum number of attempts to find a solution. By default, this is set to the number of installed packages in the environment. In commands that involve a large number of changes in a large environment, it can take a bit to relax the constraints enough to find a solution. This option can be used to reduce the number of attempts and "give up" earlier. -* `CONDA_LIBMAMBA_SOLVER_NO_CHANNELS_FROM_INSTALLED`: Do not inject the channels defined in the installed packages in the configured index. +* `CONDA_LIBMAMBA_SOLVER_DEBUG_LIBSOLV`: Enable verbose logging from `libsolv`. Only has an effect if combined with `-vvv` in the CLI. Note that this will incur a big performance overhead. Only use when debugging solver issues. diff --git a/docs/user-guide/libmamba-vs-classic.md b/docs/user-guide/libmamba-vs-classic.md index a6e9656c..8e14190c 100644 --- a/docs/user-guide/libmamba-vs-classic.md +++ b/docs/user-guide/libmamba-vs-classic.md @@ -38,7 +38,7 @@ You can always use `--solver=classic` to re-enable the `classic` solver temporar With the release of `conda-libmamba-solver`, we took the opportunity to improve some aspects of the solver experience that were not possible to change in `classic` due to backwards compatibility restraints. The main ones are: -* `conda-libmamba-solver` does not use `current_repodata.json` by default. Instead, it always uses the full `repodata.json` files. +* `conda-libmamba-solver` does not use `current_repodata.json` by default. Instead, it always uses the full `repodata.json` files. You can still use it by setting `--repodata-fn current_repodata.json` explicitly. * `conda-libmamba-solver` does not retry with `--freeze-installed` by default. Instead, it has a tighter retry logic that progressively relaxes the constraints on the conflicting packages. * `conda-libmamba-solver` does not allow the user to override the configured [pinned specs](https://docs.conda.io/projects/conda/en/stable/user-guide/tasks/manage-pkgs.html#preventing-packages-from-updating-pinning) by specifying incompatible constraints in the CLI. Instead, it will error early. To override pinned specs, it needs to be done explicitly in the relevant configuration file(s) (e.g. temporarily commenting out the pin spec, or modifying the pin for a more recent version). @@ -127,16 +127,16 @@ We ended up with an implementation a bit higher up in the abstraction tree: `--freeze-installed` attempts closer to the solver so we don't have to re-run the preparation steps. -* `conda_libmamba_solver.solver.LibmambaSolver`: +* `conda_libmamba_solver.solver.LibMambaSolver`: A `conda.core.solve.Solver` subclass that completely replaces the `Solver.solve_final_state()` method. We used this opportunity to refactor some of the pre-solver logic (spread across different layers in `classic`) into a solver-agnostic module (`conda_libmamba_solver.state`) with nicer-to-work-with helper objects. Our subclass instantiates the `libmamba` objects. -* `libmamba.api.Solver`: +* `libmambapy.Solver`: - The `libmamba.api` Python module is generated by `pybind11` bindings to the underlying `libmamba` C++ library. - Some of the objects we rely on are `api.Solver` (interfaces with `libsolv`), `api.Context` (reimplementation of `conda.base.context`) and the `api.{Pool,Repo}` stack (handles the channel metadata and target environment state). + The `libmambapy` Python package is generated by `pybind11` bindings to the underlying `libmamba` C++ library. + Some of the objects we rely on are `Solver` (interfaces with `libsolv`), and the `Database` object (handles the channel metadata and target environment state). * `libsolv`: @@ -210,10 +210,9 @@ We don't need a separate attempt to disable `--freeze-installed` because our retry logic handles conflicts and frozen packages in the same way. Additionally, this retry logic can also be disabled or reduced with an -environment variable for extreme cases (very large environments). We also -ignore `current_repodata.json` altogether. All of these changes make the -overall logic simpler and faster, which compounds on top of the -lightning-fast `libmamba` implementation. +environment variable for extreme cases (very large environments). +This makes the overall logic simpler and faster, which compounds +on top of the lightning-fast `libmamba` implementation. #### SAT algorithms @@ -265,7 +264,7 @@ version), express that explicitly: `conda install scipy=1.0` instead of `classic` prunes the channel metadata (internally referred to as the "index") in every `Resolve.solve()` call. This reduces the search space by excluding packages that won't ever be needed by the current set of input constraints. -Conversely, this performance optimization step can longer and longer the larger the index gets. +Conversely, this performance optimization step can get longer and longer the larger the index gets. In `libsolv`, pruning is part of the filtering, sorting and selection mechanism that informs the solver (see [`policy.c`][policy.c] and [`selection.c`][selection.c]). It runs in C, using memory-efficient data structures. diff --git a/docs/user-guide/performance.md b/docs/user-guide/performance.md index b5658ff0..03d4bffb 100644 --- a/docs/user-guide/performance.md +++ b/docs/user-guide/performance.md @@ -17,7 +17,7 @@ These tips apply to both solvers: ## For `conda-libmamba-solver` * Experimental: `CONDA_LIBMAMBA_SOLVER_MAX_ATTEMPTS=0`. - Setting this environment variable will disable the retry loop, making it behave more like `mamba`. + Setting this environment variable will disable the retry loop, making it behave more like `micromamba`. ## For conda `classic` diff --git a/news/457-libmamba-v2 b/news/457-libmamba-v2 new file mode 100644 index 00000000..ace707a4 --- /dev/null +++ b/news/457-libmamba-v2 @@ -0,0 +1,22 @@ +### Enhancements + +* Require `libmambapy` v2. This is a big refactor in `libmamba` internals, which allowed us to remove a lot of code in `conda-libmamba-solver` too. (#457) + +### Bug fixes + +* + +### Deprecations + +* `CONDA_LIBMAMBA_SOLVER_NO_CHANNELS_FROM_INSTALLED` has no effect anymore. Channels coming from installed packages are no longer added to the channel list. (#411 via #457) +* Removed `conda_libmamba_solver.state.BaseIndexHelper`. The base class is now `conda_libmamba_solver.index.IndexHelper`. (#457) +* Verbose logging in `libsolv` has a big overhead in `libmamba` v2, so we have disabled it by default (even if the user adds `-vvv` flags to the CLI). To opt-in, please set `CONDA_LIBMAMBA_SOLVER_DEBUG_LIBSOLV` to a truthy value. (#457) +* Python 3.8 is no longer supported. The minimum version is now 3.9. (#457) + +### Docs + +* + +### Other + +* diff --git a/pyproject.toml b/pyproject.toml index 0b05ca47..48eb24b2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,17 +14,17 @@ classifiers = [ "License :: OSI Approved :: BSD License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy" ] -requires-python = ">=3.8" +requires-python = ">=3.9" dependencies = [ "conda >=23.7.3", - "libmambapy >=1.5.6,<2.0.0dev0", + "libmambapy >=2", "boltons >=23.0.0", ] dynamic = [ @@ -42,7 +42,7 @@ source = "vcs" [tool.black] line-length = 99 -target-version = ['py37', 'py38', 'py39', 'py310'] +target-version = ['py38', 'py39', 'py310', 'py311', 'py312'] exclude = ''' ( ^/conda_libmamba_solver/_libmamba\.py @@ -63,9 +63,39 @@ markers = [ "slow: slow running tests", ] -[tool.isort] -profile = "black" -filter_files = true - [tool.ruff] +exclude = ["conda_libmamba_solver/mamba_utils.py", "tests/data/"] +target-version = "py39" line-length = 99 + +[tool.ruff.lint] +# E, W = pycodestyle errors and warnings +# F = pyflakes +# I = isort +# D = pydocstyle +# UP = pyupgrade +# ISC = flake8-implicit-str-concat +# TCH = flake8-type-checking +# T10 = flake8-debugger +# FA = flake8-future-annotations +# see also https://docs.astral.sh/ruff/rules/ +select = ["E", "W", "F", "I", "D1", "UP", "ISC", "TCH", "T10", "FA"] +# E402 module level import not at top of file +# E501 line too long +# E722 do not use bare 'except' +# E731 do not assign a lambda expression, use a def +# D101 Missing docstring in public class +# D102 Missing docstring in public method +# D103 Missing docstring in public function +# D104 Missing docstring in public package +# D105 Missing docstring in magic method +# D107 Missing docstring in `__init__` +ignore = ["E402", "E501", "E722", "E731", "D101", "D102", "D103", "D104", "D105", "D107"] +extend-per-file-ignores = {"docs/*" = ["D1"], "tests/*" = ["D1"]} +pydocstyle = {convention = "pep257"} +flake8-type-checking = {exempt-modules = [], strict = true} + +# [tool.pyright] +# include = ["conda_libmamba_solver"] +# ignore = ["conda_libmamba_solver/*_v1.py"] +# strict = ["**/"] diff --git a/recipe/meta.yaml b/recipe/meta.yaml index 8e414018..2a84ac79 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -11,25 +11,25 @@ source: build: noarch: python number: 0 - script: "{{ PYTHON }} -m pip install src/ -vv" + script: "{{ PYTHON }} -m pip install src/ -vv --no-deps --no-build-isolation" requirements: host: - - python >=3.8 + - python >=3.9 - pip - hatchling - hatch-vcs run: - - python >=3.8 + - python >=3.9 - conda >=23.7.3 - - libmambapy >=1.5.6,<2.0.0dev0 + - libmambapy >=2.0.0 - boltons >=23.0.0 test: imports: - conda_libmamba_solver commands: - - CONDA_SOLVER=libmamba conda create -n test --dry-run scipy + - conda create -n test --dry-run scipy --solver=libmamba - >- python -c "import conda_libmamba_solver as cls; diff --git a/tests/channel_testing/helpers.py b/tests/channel_testing/helpers.py index 549e05f7..1620a9f0 100644 --- a/tests/channel_testing/helpers.py +++ b/tests/channel_testing/helpers.py @@ -2,12 +2,13 @@ # Copyright (C) 2022 Anaconda, Inc # Copyright (C) 2023 conda # SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + import os import pathlib import socket import subprocess import sys -from typing import Tuple import pytest from conda.testing.integration import _get_temp_prefix, run_command @@ -60,6 +61,7 @@ def startup_check(self): return not error logfile = xprocess.ensure(name, Starter) + print("Logfile at", logfile) if user and password: yield f"http://{user}:{password}@localhost:{port}" @@ -154,7 +156,7 @@ def create_with_channel( ) -def create_with_channel_in_process(channel, solver="libmamba", **kwargs) -> Tuple[str, str, int]: +def create_with_channel_in_process(channel, solver="libmamba", **kwargs) -> tuple[str, str, int]: stdout, stderr, returncode = run_command( "create", _get_temp_prefix(), diff --git a/tests/channel_testing/reposerver.py b/tests/channel_testing/reposerver.py index b058d47e..5d73c31d 100644 --- a/tests/channel_testing/reposerver.py +++ b/tests/channel_testing/reposerver.py @@ -11,6 +11,7 @@ See data/mamba_repo/LICENSE for full details """ + import argparse import base64 import glob @@ -260,6 +261,7 @@ def do_GET(self) -> None: class PackagesHeadersHandler(RepodataHeadersHandler): "Same as RepodataHeadersHandler, but it fails when tarballs are requested" + path_suffix_to_debug = ".tar.bz2" diff --git a/tests/repodata_time_machine.py b/tests/repodata_time_machine.py index c050ab36..70d628db 100644 --- a/tests/repodata_time_machine.py +++ b/tests/repodata_time_machine.py @@ -11,6 +11,7 @@ - Apply the patches - Generate a ready-to-use local channel """ + import bz2 import json import os diff --git a/tests/test_channels.py b/tests/test_channels.py index 0352a826..906af561 100644 --- a/tests/test_channels.py +++ b/tests/test_channels.py @@ -22,11 +22,13 @@ ) from conda.testing.integration import run_command as conda_inprocess -from .channel_testing.helpers import http_server_auth_basic # noqa: F401 -from .channel_testing.helpers import http_server_auth_basic_email # noqa: F401 -from .channel_testing.helpers import http_server_auth_none # noqa: F401 -from .channel_testing.helpers import http_server_auth_token # noqa: F401 -from .channel_testing.helpers import create_with_channel +from .channel_testing.helpers import ( + create_with_channel, + http_server_auth_basic, # noqa: F401 + http_server_auth_basic_email, # noqa: F401 + http_server_auth_none, # noqa: F401 + http_server_auth_token, # noqa: F401 +) from .utils import conda_subprocess, write_env_config DATA = Path(__file__).parent / "data" @@ -125,7 +127,7 @@ def _setup_channels_custom(prefix, force=False): write_env_config( prefix, force=force, - channels=["conda-forge", "defaults"], + channels=["conda-forge"], custom_channels={ "conda-forge": "https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud", }, @@ -139,7 +141,7 @@ def _setup_channels_custom(prefix, force=False): _setup_channels_custom, ), ) -def test_mirrors_do_not_leak_channels(config_env): +def test_mirrors_do_not_leak_channels(config_env, tmp_path, tmp_env): """ https://github.com/conda/conda-libmamba-solver/issues/108 @@ -151,7 +153,7 @@ def test_mirrors_do_not_leak_channels(config_env): is undesirable. """ - with env_vars({"CONDA_PKGS_DIRS": _get_temp_prefix()}), make_temp_env() as prefix: + with env_vars({"CONDA_PKGS_DIRS": tmp_path}), tmp_env() as prefix: assert (Path(prefix) / "conda-meta" / "history").exists() # Setup conda configuration @@ -159,10 +161,10 @@ def test_mirrors_do_not_leak_channels(config_env): common = ["-yp", prefix, "--solver=libmamba", "--json", "-vv"] env = os.environ.copy() - env["CONDA_PREFIX"] = prefix # fake activation so config is loaded + env["CONDA_PREFIX"] = str(prefix) # fake activation so config is loaded # Create an environment using mirrored channels only - p = conda_subprocess("install", *common, "python", "pip", env=env) + p = conda_subprocess("install", *common, "ca-certificates", env=env) result = json.loads(p.stdout) if p.stderr: assert "conda.anaconda.org" not in p.stderr @@ -175,7 +177,7 @@ def test_mirrors_do_not_leak_channels(config_env): ), pkg # Make a change to that channel - p = conda_subprocess("install", *common, "pytest", env=env) + p = conda_subprocess("install", *common, "zlib", env=env) # Ensure that the loaded channels are ONLY the mirrored ones result = json.loads(p.stdout) @@ -264,23 +266,23 @@ def test_conda_build_with_aliased_channels(tmp_path): condarc.unlink() -def test_http_server_auth_none(http_server_auth_none): +def test_http_server_auth_none(http_server_auth_none): # noqa: F811 create_with_channel(http_server_auth_none) -def test_http_server_auth_basic(http_server_auth_basic): +def test_http_server_auth_basic(http_server_auth_basic): # noqa: F811 create_with_channel(http_server_auth_basic) -def test_http_server_auth_basic_email(http_server_auth_basic_email): +def test_http_server_auth_basic_email(http_server_auth_basic_email): # noqa: F811 create_with_channel(http_server_auth_basic_email) -def test_http_server_auth_token(http_server_auth_token): +def test_http_server_auth_token(http_server_auth_token): # noqa: F811 create_with_channel(http_server_auth_token) -def test_http_server_auth_token_in_defaults(http_server_auth_token): +def test_http_server_auth_token_in_defaults(http_server_auth_token): # noqa: F811 condarc = Path.home() / ".condarc" condarc_contents = condarc.read_text() if condarc.is_file() else None try: diff --git a/tests/test_experimental.py b/tests/test_experimental.py index d82f4f16..5f27f2b0 100644 --- a/tests/test_experimental.py +++ b/tests/test_experimental.py @@ -4,6 +4,7 @@ """ Ensure experimental features work accordingly. """ + import os import sys from subprocess import run diff --git a/tests/test_index.py b/tests/test_index.py new file mode 100644 index 00000000..e7bfe5b8 --- /dev/null +++ b/tests/test_index.py @@ -0,0 +1,111 @@ +# Copyright (C) 2022 Anaconda, Inc +# Copyright (C) 2023 conda +# SPDX-License-Identifier: BSD-3-Clause +import json +import os +import shutil +import time +from pathlib import Path + +import pytest +from conda.base.context import reset_context +from conda.common.compat import on_win +from conda.core.subdir_data import SubdirData +from conda.gateways.logging import initialize_logging +from conda.models.channel import Channel + +from conda_libmamba_solver.index import LibMambaIndexHelper + +initialize_logging() +DATA = Path(__file__).parent / "data" + + +def test_given_channels(monkeypatch: pytest.MonkeyPatch, tmp_path: os.PathLike): + monkeypatch.setenv("CONDA_PKGS_DIRS", str(tmp_path)) + reset_context() + libmamba_index = LibMambaIndexHelper.from_platform_aware_channel( + channel=Channel("conda-test/noarch") + ) + assert libmamba_index.db.repo_count() == 1 + + conda_index = SubdirData(Channel("conda-test/noarch")) + conda_index.load() + + assert libmamba_index.db.package_count() == len(tuple(conda_index.iter_records())) + + +@pytest.mark.parametrize( + "only_tar_bz2", + ( + pytest.param("1", id="CONDA_USE_ONLY_TAR_BZ2=true"), + pytest.param("", id="CONDA_USE_ONLY_TAR_BZ2=false"), + ), +) +def test_defaults_use_only_tar_bz2(monkeypatch: pytest.MonkeyPatch, only_tar_bz2: bool): + """ + Defaults is particular in the sense that it offers both .tar.bz2 and .conda for LOTS + of packages. SubdirData ignores .tar.bz2 entries if they have a .conda counterpart. + So if we count all the packages in each implementation, libmamba's has way more. + To remain accurate, we test this with `use_only_tar_bz2`: + - When true, we only count .tar.bz2 + - When false, we only count .conda + """ + monkeypatch.setenv("CONDA_USE_ONLY_TAR_BZ2", only_tar_bz2) + reset_context() + libmamba_index = LibMambaIndexHelper( + channels=[Channel("defaults")], + subdirs=("noarch",), + installed_records=(), # do not load installed + pkgs_dirs=(), # do not load local cache as a channel + ) + n_repos = 3 if on_win else 2 + assert len(libmamba_index.repos) == n_repos + + libmamba_dot_conda_total = libmamba_index.n_packages( + filter_=lambda pkg: pkg.package_url.endswith(".conda") + ) + libmamba_tar_bz2_total = libmamba_index.n_packages( + filter_=lambda pkg: pkg.package_url.endswith(".tar.bz2") + ) + + conda_dot_conda_total = 0 + conda_tar_bz2_total = 0 + for channel_url in Channel("defaults/noarch").urls(subdirs=("noarch",)): + conda_index = SubdirData(Channel(channel_url)) + conda_index.load() + for pkg in conda_index.iter_records(): + if pkg["url"].endswith(".conda"): + conda_dot_conda_total += 1 + elif pkg["url"].endswith(".tar.bz2"): + conda_tar_bz2_total += 1 + else: + raise RuntimeError(f"Unrecognized package URL: {pkg['url']}") + + if only_tar_bz2: + assert conda_tar_bz2_total == libmamba_tar_bz2_total + assert libmamba_dot_conda_total == conda_dot_conda_total == 0 + else: + assert conda_dot_conda_total == libmamba_dot_conda_total + assert conda_tar_bz2_total == libmamba_tar_bz2_total + + +def test_reload_channels(tmp_path: Path): + (tmp_path / "noarch").mkdir(parents=True, exist_ok=True) + shutil.copy(DATA / "mamba_repo" / "noarch" / "repodata.json", tmp_path / "noarch") + initial_repodata = (tmp_path / "noarch" / "repodata.json").read_text() + index = LibMambaIndexHelper(channels=[Channel(str(tmp_path))]) + initial_count = index.n_packages() + SubdirData._cache_.clear() + + data = json.loads(initial_repodata) + package = data["packages"]["test-package-0.1-0.tar.bz2"] + data["packages"]["test-package-copy-0.1-0.tar.bz2"] = {**package, "name": "test-package-copy"} + modified_repodata = json.dumps(data) + (tmp_path / "noarch" / "repodata.json").write_text(modified_repodata) + + assert initial_repodata != modified_repodata + # TODO: Remove this sleep after addressing + # https://github.com/conda/conda/issues/13783 + time.sleep(1) + index.reload_channel(Channel(str(tmp_path))) + assert index.n_packages() == initial_count + 1 diff --git a/tests/test_performance.py b/tests/test_performance.py index eb2a10cf..152fe6da 100644 --- a/tests/test_performance.py +++ b/tests/test_performance.py @@ -4,6 +4,7 @@ """ Measure the speed and memory usage of the different backend solvers """ + import os import shutil @@ -44,7 +45,10 @@ def _tmp_prefix_safe(): return _get_temp_prefix(use_restricted_unicode=True).replace(" ", "") -@pytest.fixture(scope="module", params=os.listdir(TEST_DATA_DIR)) +@pytest.fixture( + scope="module", + params=[f for f in os.listdir(TEST_DATA_DIR) if f.endswith(".lock")], +) def prefix_and_channels(request): lockfile = os.path.join(TEST_DATA_DIR, request.param) lock_platform = lockfile.split(".")[-2] diff --git a/tests/test_repoquery.py b/tests/test_repoquery.py index 8c6e4b13..ed1c0896 100644 --- a/tests/test_repoquery.py +++ b/tests/test_repoquery.py @@ -3,6 +3,8 @@ # SPDX-License-Identifier: BSD-3-Clause import json +from conda.models.channel import Channel + from conda_libmamba_solver.index import LibMambaIndexHelper from .utils import conda_subprocess @@ -23,7 +25,7 @@ def test_repoquery(): def test_query_search(): - index = LibMambaIndexHelper(channels=["conda-forge"]) + index = LibMambaIndexHelper(channels=[Channel("conda-forge")]) for query in ( "ca-certificates", "ca-certificates =2022.9.24", diff --git a/tests/test_solvers.py b/tests/test_solver.py similarity index 94% rename from tests/test_solvers.py rename to tests/test_solver.py index e6e16072..2e90494a 100644 --- a/tests/test_solvers.py +++ b/tests/test_solver.py @@ -22,42 +22,12 @@ package_is_installed, run_command, ) -from conda.testing.solver_helpers import SolverTests -from conda_libmamba_solver import LibMambaSolver from conda_libmamba_solver.exceptions import LibMambaUnsatisfiableError from .utils import conda_subprocess -class TestLibMambaSolver(SolverTests): - @property - def solver_class(self): - return LibMambaSolver - - @property - def tests_to_skip(self): - return { - "LibMambaSolver does not support track-features/features": [ - "test_iopro_mkl", - "test_iopro_nomkl", - "test_mkl", - "test_accelerate", - "test_scipy_mkl", - "test_pseudo_boolean", - "test_no_features", - "test_surplus_features_1", - "test_surplus_features_2", - # this one below only fails reliably on windows; - # it passes Linux on CI, but not locally? - "test_unintentional_feature_downgrade", - ], - "LibMambaSolver installs numpy with mkl while we were expecting no-mkl numpy": [ - "test_remove", - ], - } - - def test_python_downgrade_reinstalls_noarch_packages(): """ Reported in https://github.com/conda/conda/issues/11346 @@ -435,6 +405,9 @@ def test_ca_certificates_pins(): raise AssertionError("ca-certificates not found in LINK actions") +@pytest.mark.skipif( + context.subdir == "osx-arm64", reason="python=2.7 not available in this platform" +) def test_python_update_should_not_uninstall_history(): """ https://github.com/conda/conda-libmamba-solver/issues/341 @@ -491,12 +464,12 @@ def test_python_downgrade_with_pins_removes_truststore(): "--json", "python=3.9", env=env, + check=False, ) - assert p.returncode == 0 data = json.loads(p.stdout) + assert p.returncode == 0 assert data.get("success") assert data.get("dry_run") - assertions = 0 link_dict = {pkg["name"]: pkg for pkg in data["actions"]["LINK"]} unlink_dict = {pkg["name"]: pkg for pkg in data["actions"]["UNLINK"]} assert link_dict["python"]["version"].startswith("3.9.") diff --git a/tests/test_solver_differences.py b/tests/test_solver_differences.py index 86805357..1779a152 100644 --- a/tests/test_solver_differences.py +++ b/tests/test_solver_differences.py @@ -5,6 +5,7 @@ This module collects examples of environments that were hard to solve, required workarounds or didn't meet users' expectations... specially if compared to conda classic. """ + import json import os diff --git a/tests/test_workarounds.py b/tests/test_workarounds.py index df4c0697..032991f9 100644 --- a/tests/test_workarounds.py +++ b/tests/test_workarounds.py @@ -66,6 +66,7 @@ def test_build_string_filters(): @pytest.mark.parametrize("stage", ["Collecting package metadata", "Solving environment"]) def test_ctrl_c(stage): + TIMEOUT = 10 # Used twice in total, so account for double the amount p = sp.Popen( [ sys.executable, @@ -87,7 +88,7 @@ def test_ctrl_c(stage): t0 = time.time() while stage not in p.stdout.readline(): time.sleep(0.1) - if time.time() - t0 > 30: + if time.time() - t0 > TIMEOUT: raise RuntimeError("Timeout") # works around Windows' awkward CTRL-C signal handling @@ -99,13 +100,12 @@ def test_ctrl_c(stage): kernel.AttachConsole(p.pid) kernel.SetConsoleCtrlHandler(None, 1) kernel.GenerateConsoleCtrlEvent(0, 0) - p.wait(timeout=30) - assert p.returncode != 0 - assert "KeyboardInterrupt" in p.stdout.read() + p.stderr.read() + p.wait(timeout=TIMEOUT) finally: kernel.SetConsoleCtrlHandler(None, 0) else: p.send_signal(signal.SIGINT) - p.wait(timeout=30) - assert p.returncode != 0 - assert "KeyboardInterrupt" in p.stdout.read() + p.stderr.read() + p.wait(timeout=TIMEOUT) + + assert p.returncode != 0 + assert "KeyboardInterrupt" in p.stdout.read() + p.stderr.read()