diff --git a/AGENTS.md b/AGENTS.md index 6f90a2bb..37f14e41 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -24,6 +24,7 @@ ## Style and Documentation - All code comments, README entries, and changelog notes **must be written in English**. - Keep imports tidy—remove unused symbols and respect the Ruff isort grouping so the Home Assistant package stays first-party under `custom_components/pollenlevels`. +- Translation source of truth is `custom_components/pollenlevels/translations/en.json`. Keep all other locale files in sync with it and do not add or rely on a `strings.json` file. Note: When Home Assistant raises its Python floor to 3.14, this guidance will be updated; until then, treat Python 3.13 as the compatibility target for integration code. diff --git a/CHANGELOG.md b/CHANGELOG.md index 26c333b3..8df88078 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,213 @@ -# Changelog +## [1.9.3] - 2026-02-14 +### Fixed +- Aligned config-flow API validation with runtime parsing by requiring `dailyInfo` + to be a non-empty list of objects during setup validation. +- Prevented test cross-contamination in setup tests by using scoped monkeypatching + for coordinator/client stubs instead of persistent module reassignment. +- Prevented disabled per-day sensors from being re-created during sensor setup by + skipping `*_d1`/`*_d2` keys when effective forecast options disable them. +- Hardened coordinator parsing for malformed `dailyInfo` payloads by treating + non-list/non-dict structures as invalid and preserving the last successful + dataset when available. +- Normalized stored forecast sensor mode values during integration setup so + legacy or whitespace-padded values no longer degrade silently to `none`. +- Ensured deterministic current-day plant sensor creation by sorting plant codes. +- Reject whitespace-only API keys at setup (defensive validation) and raise `ConfigEntryAuthFailed` with a clearer "Invalid API key" message. +- Mask API key input fields in config flow (password selector). +- Cleared entry runtime data when platform forwarding fails to avoid leaving a partially initialized state. +- Hardened `forecast_days` parsing during coordinator and sensor setup to tolerate malformed stored values without crashing. +- Improved test isolation by avoiding unconditional replacement of the global `aiohttp` module stub. +- Accepted numeric-string RGB channels from API color payloads by relying on shared + channel normalization, while still ignoring non-numeric strings. +- Hardened HTTP 429 backoff by validating `Retry-After` values (rejecting non-finite, + negative, and stale date-based delays) and clamping retry sleep to a safe bounded + range. + +### Changed +- Switched sensor setup iteration to use a validated local data snapshot for + clearer and more consistent entity creation flow. +- Preserved legacy 4-decimal coordinate unique-id formatting to keep existing + duplicate-location detection behavior stable across upgrades. +- Expanded regression coverage for disabled per-day sensor creation, malformed + `dailyInfo` handling, setup mode normalization, and legacy duplicate + detection behavior for coordinate-based unique IDs. +- Simplified plant parsing by removing redundant code checks (non-empty by construction). +- Deduplicated defensive integer parsing into a shared utility and aligned diagnostics + with runtime/config-flow rules to reject non-finite or decimal values consistently. +- Clarified the per-day TYPE sensor range option text (D+2 creates both D+1 and D+2 sensors) across translations. + +## [1.9.2] - 2026-02-13 +### Fixed +- Re-raised `asyncio.CancelledError` during coordinator updates to avoid wrapping + shutdown/reload cancellations as `UpdateFailed`. +- Validated config-entry coordinates as finite and in-range values before setup + to avoid malformed requests and retry with `ConfigEntryNotReady` when invalid. + +### Changed +- Reduced coordinator parsing overhead by building per-day type/plant lookup maps + in a single pass and reusing them across forecast extraction. +- Reused cached day-0 plant maps for current-day plant sensors and sorted type-code + processing for deterministic sensor key ordering. +- Normalized plant-code matching across days to keep plant forecast attributes + populated even when API casing differs between days. +- Clamped diagnostics `request_params_example.days` to supported forecast ranges + and handled non-finite values defensively. +- Hardened numeric parsing guards for config/options inputs and color channels + to safely reject non-finite values without raising runtime errors. +- Dropped non-finite rounded coordinates in diagnostics request examples to keep + support payloads consistent and safe. +- Kept plant `code` attributes in their day-0 API form while using normalized + keys internally for cross-day forecast matching stability. +- Enforced integer-only parsing for numeric config/options fields to reject + decimal inputs consistently. +- Strengthened diagnostics tests with active redaction behavior checks for secret + fields in support payloads. +- Aligned runtime integer parsing with config-flow rules (reject non-integer + numeric values) and reduced coordinator forecast overhead by reusing offset + maps and plant-key lists during attribute enrichment. + +## [1.9.1] - 2026-01-10 +### Fixed +- Preserved the last successful coordinator data when the API response omits + `dailyInfo`, avoiding empty entities after transient API glitches. +- Redacted API keys from config flow error placeholders derived from API responses + to prevent secrets from appearing in setup errors. +- Cleared stale setup error placeholders when per-day sensor options are + incompatible with the selected forecast days. +- Clarified diagnostics redaction flow: `async_redact_data` is a synchronous + helper in HA Core (despite the name), ensuring diagnostics return the final + redacted payload. +- Trimmed diagnostics payloads to avoid listing all data keys and to hide precise + coordinates while keeping rounded location context for support. +- Updated the `pollenlevels.force_update` service to request a coordinator + refresh and wait for completion before returning. +- Sanitized `pollenlevels.force_update` failure logging to avoid exposing raw + exception details in warnings. +- Handled cancelled `force_update` refresh results explicitly to keep service + logging and control flow consistent during shutdown/reload paths. +- Re-raised coordinator `CancelledError` during updates so shutdown/reload + cancellations are not wrapped as `UpdateFailed`. + +### Changed +- **Breaking change:** removed the `color_raw` attribute from pollen sensors to + reduce state size; use `color_hex` or `color_rgb` instead. +- Refactored config flow HTTP validation to reduce duplicated error handling + logic without changing behavior. +- Updated README attributes list to remove `color_raw` now that it is no longer + exposed by sensors. +- Removed unused internal `color_raw` coordinator payload fields to reduce + update payload size while keeping `color_hex`/`color_rgb` behavior unchanged. +- Clarified diagnostics coordinator access when `runtime_data` is missing, + without changing diagnostics output. + +## [1.9.0-rc1] - 2025-12-31 +### Changed +- Moved runtime state to config entry `runtime_data` with a shared + `GooglePollenApiClient` per entry while keeping existing sensor behaviour and + identifiers unchanged. +- Updated sensors, diagnostics, and the `pollenlevels.force_update` service to + read coordinators from runtime data so each entry reuses a single API client + for Google Pollen requests. +- Treated HTTP 401 responses like 403 to surface `invalid_auth` during setup + validation and runtime calls instead of generic connection errors. +- Restored API key validation during setup to raise `ConfigEntryAuthFailed` + when the key is missing instead of retrying endlessly. +- Centralized config entry title normalization during setup so the cleaned + device titles are reused across all sensors. +- Simplified metadata sensors by relying on inherited `unique_id` and + `device_info` properties instead of redefining them. +- Updated the `force_update` service to queue coordinator refreshes via + `async_request_refresh` and added service coverage for entries lacking + runtime data. +- Cleared config entry `runtime_data` after unload to drop stale coordinator + references and keep teardown tidy. +- Enabled forecast day count and per-day sensor mode selection during initial + setup using dropdown selectors shared with the options flow to keep + validation consistent. +- Enhanced setup validation to surface HTTP 401/403 API messages safely via the + form error placeholders without exposing secrets. +- Updated the options flow to use selectors while normalizing numeric fields to + integers and keeping existing validation rules and defaults intact. +- Runtime HTTP 403 responses now surface detailed messages without triggering + reauthentication, keeping setup and update behavior aligned. +- Deduplicated HTTP error message extraction into a shared helper used by + config validation and the runtime client to keep diagnostics consistent. +- Updated the options flow regression test to expect the new + `invalid_forecast_days` error code for out-of-range values. +- Consolidated numeric options validation in the options flow through a shared + helper to reduce duplication for interval and forecast day checks. +- Centralized the pollen client retry count into a shared `MAX_RETRIES` + constant to simplify future tuning without touching request logic. +- Tightened error extraction typing to expect `aiohttp.ClientResponse` while + guarding the import so environments without aiohttp can still run tests. +- Reduced debug log volume by summarizing coordinator refreshes and sensor + creation details instead of logging full payloads. +- Reformatted the codebase with Black and Ruff to keep imports and styling + consistent with repository standards. +- Expanded translation coverage tests to include section titles and service + metadata keys, ensuring locales stay aligned with `en.json`. +- Coordinator Module Extraction: The PollenDataUpdateCoordinator class and its + associated helper functions have been moved from sensor.py to a new, + dedicated coordinator.py module. This significantly improves modularity and + separation of concerns within the component. +- Removed optional HTTP Referer (website restriction) support to simplify configuration, + as it is not suitable for server-side integrations (existing entries are + migrated to remove legacy `http_referer` values). +- Documented the 1–24 update interval range in the README options list. + +### Fixed +- Avoid pre-filling the API key field when the form is re-displayed after + validation errors. +- Added a fallback error message when unexpected client exceptions are raised to + avoid empty UpdateFailed errors in the UI. +- Fixed options flow to preserve the stored per-day sensor mode when no override + is set in entry options, preventing unintended resets to "none". +- Sanitized update interval defaults in setup and options forms to clamp + malformed stored values within supported bounds. +- Rejected update interval submissions above 24 hours to match selector limits. +- Sanitized setup and options defaults for forecast days and per-day sensor mode + selectors to keep UI defaults within supported choices. +- Normalized invalid stored per-day sensor mode values in the options flow to + avoid persisting unsupported selector choices. +- Simplified the per-day sensor mode fallback during options submission to reuse + the normalized current mode and prevent regressions. +- Migrated per-day sensor mode to entry options when stored in entry data to + prevent option resets after upgrades. +- Centralized per-day sensor mode normalization to avoid duplicate validation + logic across migration and options handling. +- Normalized invalid stored per-day sensor mode values already stored in entry + options during migration to keep options consistent. +- Versioned config entries to ensure the per-day sensor mode migration runs once + and is not repeated on every restart. +- Ensured unversioned entries run the per-day sensor mode migration and that + option presence is respected even when the stored value is None. +- Moved the optional API key section directly below the API key field in the + setup flow for improved visibility. +- Hardened HTTP client timeout handling and normalized non-string per-day sensor + mode values defensively. +- Added entry context to migration failure logs for easier debugging. +- Removed a mutable default from the API key options schema to avoid shared + state across config flow instances. +- Normalized whitespace-only per-day sensor mode values and preserved fallback + to entry data when options explicitly store None. +- Removed redundant timeout handling in the HTTP client error path. +- Fixed the force_update service to await coordinator refresh coroutines safely + without passing None into asyncio.gather. +- Hardened parsing of update interval and forecast days to tolerate malformed + stored values while keeping defaults intact. +- Hardened numeric parsing to handle non-finite values without crashing setup. +- Clamped update interval and forecast days in setup to supported ranges. +- Limited update interval to a maximum of 24 hours in setup and options. +- Clamped forecast day handling in sensor setup to the supported 1–5 range for + consistent cleanup decisions. +- Avoided treating empty indexInfo objects as valid forecast indices. +- Added force_update service name/description for better UI discoverability. +- Ensured migrations clean up legacy keys even when entries are already at the + target version. +- Always removed per-day sensor mode from entry data during migration to avoid + duplicated settings. +- Corrected Chinese setup description punctuation in zh-Hans and zh-Hant. + ## [1.8.6] - 2025-12-09 ### Changed - Parallelized the `force_update` service to refresh all entry coordinators concurrently @@ -250,23 +459,25 @@ ## [1.7.9] - 2025-09-06 ### Fixed -- **Date sensor**: Return a `datetime.date` object for `device_class: date` (was a string). Ensures - correct UI formatting and automation compatibility. +- **Date sensor**: Return a `datetime.date` object for `device_class: date` (was + a string). Ensures correct UI formatting and automation compatibility. ## [1.7.8] - 2025-09-05 ### Changed -- **Date sensor**: Set `device_class: date` so Home Assistant treats the value as a calendar date - (UI semantics/formatting). No functional impact. -- > Note: 1.7.8 set `device_class: date` but still returned a string. This was corrected in 1.7.9 to - return a proper `date` object. +- **Date sensor**: Set `device_class: date` so Home Assistant treats the value + as a calendar date (UI semantics/formatting). No functional impact. +- > Note: 1.7.8 set `device_class: date` but still returned a string. This was + corrected in 1.7.9 to return a proper `date` object. ## [1.7.7] - 2025-09-05 ### Changed - **Performance/cleanup**: Precompute static attributes for metadata sensors: - - Set `_attr_unique_id` and `_attr_icon` in `RegionSensor`, `DateSensor`, and `LastUpdatedSensor`. + - Set `_attr_unique_id` and `_attr_icon` in `RegionSensor`, `DateSensor`, and + `LastUpdatedSensor`. - Set `_attr_device_info` once in `_BaseMetaSensor`. - Also set `_attr_unique_id` in `PollenSensor` for consistency. - These changes avoid repeated property calls and align with modern HA entity patterns. No functional impact. + These changes avoid repeated property calls and align with modern HA entity + patterns. No functional impact. ## [1.7.6] - 2025-09-05 ### Changed @@ -320,8 +531,9 @@ ## [1.6.5] - 2025-08-26 ### Fixed -- Timeouts: catch built-in **`TimeoutError`** in Config Flow and Coordinator. - On Python 3.14 this also covers `asyncio.TimeoutError`, so listing both is unnecessary (and auto-removed by ruff/pyupgrade). +- Timeouts: catch built-in **`TimeoutError`** in Config Flow and Coordinator. + On Python 3.14 this also covers `asyncio.TimeoutError`, so listing both is + unnecessary (and auto-removed by ruff/pyupgrade). - Added missing `options.error` translations across all locales so **Options Flow** errors display localized. - **Security**: Config Flow now sanitizes exception messages (including connection/timeout errors) @@ -337,11 +549,13 @@ - Improved wording for `create_forecast_sensors` across all locales: - Field label now clarifies it’s the **range** for per-day TYPE sensors. - Step description explains each choice with plain language: - - **Only today (none)**, **Through tomorrow (D+1)**, **Through day after tomorrow (D+2)** (and local equivalents). + - **Only today (none)**, **Through tomorrow (D+1)**, + **Through day after tomorrow (D+2)** (and local equivalents). ### Changed - Minimal safe backoff in coordinator: single retry on transient failures (**TimeoutError**, `aiohttp.ClientError`, `5xx`). - For **429**, honor numeric `Retry-After` seconds (capped at **5s**) or fall back to ~**2s** plus small jitter. + For **429**, honor numeric `Retry-After` seconds (capped at **5s**) or fall + back to ~**2s** plus small jitter. ## [1.6.4] - 2025-08-22 ### Fixed diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..c02090e7 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# Contributing + +- Follow Home Assistant's current Python floor for integration code (Python 3.13). Tooling is pinned to Python 3.14, but + integration logic must stay compatible with 3.13 syntax and standard library features. +- Format code with Black (line length 88, target-version `py314`) and sort/lint imports with Ruff (`ruff check --fix --select I` followed + by `ruff check`). +- The translation source of truth is `custom_components/pollenlevels/translations/en.json`. Keep every other locale file in + sync with it. +- Do not add or rely on a `strings.json` file; translation updates should flow from `en.json` to the other language files. +- Preserve the existing coordinator-driven architecture and avoid introducing blocking I/O in the event loop. diff --git a/README.md b/README.md index 64f694bb..9f57dcc6 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,8 @@ Get sensors for **grass**, **tree**, **weed** pollen, plus individual plants lik - `forecast` list with `{offset, date, has_index, value, category, description, color_*}` - Convenience: `tomorrow_*` and `d2_*` - Derived: `trend` and `expected_peak` - - **Per-day sensors:** remain **TYPES-only** (optional `D+1` / `D+2`). + - **Per-day sensors:** remain **TYPES-only** with selector options `none`, `D+1`, + or `D+1+2` (creates both `(D+1)` and `(D+2)` sensors). **PLANTS** expose forecast **as attributes only** (no extra entities). - **Smart grouping** — Organizes sensors into: - **Pollen Types** (Grass / Tree / Weed) @@ -37,7 +38,8 @@ Get sensors for **grass**, **tree**, **weed** pollen, plus individual plants lik - **Configurable updates** — Change update interval, language, forecast days, and per-day sensors without reinstalling. - **Manual refresh** — Call `pollenlevels.force_update` to trigger an immediate update and reset the timer. - **Last Updated sensor** — Shows timestamp of last successful update. -- **Rich attributes** — Includes `inSeason`, index `description`, health `advice`, `color_hex`, `color_rgb`, `color_raw`, and plant details. +- **Rich attributes** — Includes `inSeason`, index `description`, health `advice`, + `color_hex`, `color_rgb`, and plant details. - **Resilient startup** — Retries setup automatically when the first API response lacks daily pollen info (`dailyInfo` types/plants), ensuring entities appear once data is ready. --- @@ -55,7 +57,7 @@ Get sensors for **grass**, **tree**, **weed** pollen, plus individual plants lik You can change: -- **Update interval (hours)** +- **Update interval (hours)** (1–24) - **API response language code** - **Forecast days** (`1–5`) for pollen TYPES - **Per-day TYPE sensors** via `create_forecast_sensors`: @@ -67,6 +69,10 @@ You can change: > - `D+1` requires `forecast_days ≥ 2` > - `D+1+2` requires `forecast_days ≥ 3` +The config and options flows use modern Home Assistant selectors and include +links to Google’s API key setup and security best practices so you can follow +the recommended restrictions. + > **After saving Options:** if per-day sensors are disabled or `forecast_days` becomes insufficient, the integration **removes** any stale D+1/D+2 entities from the **Entity Registry** automatically. No manual cleanup needed. Go to **Settings → Devices & Services → Pollen Levels → Configure**. @@ -84,13 +90,26 @@ You need a valid Google Cloud API key with access to the **Maps Pollen API**. 4. Go to **APIs & Services → Credentials → Create credentials → API key**. 5. **Restrict your key** (recommended): - **API restrictions** → **Restrict key** → select **Maps Pollen API** only. - - **Application restrictions** (optional but recommended): - - **HTTP referrers** (for frontend usages) or - - **IP addresses** (for server-side usage, e.g. your HA host). -6. **Copy** the key and paste it in the integration setup. + - **Application restrictions** (optional): + - Prefer **IP addresses** for server-side usage (your HA host). + - If your IP is dynamic, consider **no application restriction** and rely on + the API restriction above. +6. **Copy** the key and paste it in the integration setup. + +The setup form also links directly to the Google documentation for obtaining +an API key and best-practice restrictions. 👉 See the **[FAQ](FAQ.md)** for **quota tips**, rate-limit behavior, and best practices to avoid exhausting your free tier. +HTTP referrer (website) restrictions are intended for browser-based apps and +are not supported by this integration. + +### Troubleshooting 403 errors + +403 responses during setup or updates now include the API’s reason (when +available). They often indicate billing is disabled, the Pollen API is not +enabled, or your key restrictions do not match your Home Assistant host. + --- ## 🔧 Showing colors in the UI @@ -137,6 +156,14 @@ severity: ### 🧩 Custom cards (for real dynamic color binding) +**Pollen dashboard card (recommended): pollenprognos-card** + +If you want a dedicated pollen Lovelace card with forecast visualizations and a visual editor UI, +**pollenprognos-card** supports this integration since **v2.9.0**. + +- Repo: [pollenprognos-card](https://github.com/krissen/pollenprognos-card) +- Install: HACS → Frontend + If you need the icon/badge to follow the **exact** API color (`color_hex`): **Mushroom (mushroom-template-card)** diff --git a/custom_components/pollenlevels/__init__.py b/custom_components/pollenlevels/__init__.py index 6fd3b4ba..bd930a67 100644 --- a/custom_components/pollenlevels/__init__.py +++ b/custom_components/pollenlevels/__init__.py @@ -1,7 +1,7 @@ """Initialize Pollen Levels integration. Notes: -- Adds a top-level INFO log when the force_update service is invoked to aid debugging. +- Adds a top-level DEBUG log when the force_update service is invoked to aid debugging. - Registers an options update listener to reload the entry so interval/language changes take effect immediately without reinstalling. """ @@ -10,6 +10,7 @@ import asyncio import logging +import math from collections.abc import Awaitable from typing import Any @@ -18,44 +19,149 @@ from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant, ServiceCall from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady +from homeassistant.helpers.aiohttp_client import async_get_clientsession -from .const import DOMAIN +from .client import GooglePollenApiClient +from .const import ( + CONF_API_KEY, + CONF_CREATE_FORECAST_SENSORS, + CONF_FORECAST_DAYS, + CONF_LANGUAGE_CODE, + CONF_LATITUDE, + CONF_LONGITUDE, + CONF_UPDATE_INTERVAL, + DEFAULT_ENTRY_TITLE, + DEFAULT_FORECAST_DAYS, + DEFAULT_UPDATE_INTERVAL, + DOMAIN, + MAX_FORECAST_DAYS, + MAX_UPDATE_INTERVAL_HOURS, + MIN_FORECAST_DAYS, + MIN_UPDATE_INTERVAL_HOURS, +) +from .coordinator import PollenDataUpdateCoordinator +from .runtime import PollenLevelsConfigEntry, PollenLevelsRuntimeData +from .sensor import ForecastSensorMode +from .util import normalize_sensor_mode, redact_api_key, safe_parse_int # Ensure YAML config is entry-only for this domain (no YAML schema). CONFIG_SCHEMA = cv.config_entry_only_config_schema(DOMAIN) _LOGGER = logging.getLogger(__name__) +TARGET_ENTRY_VERSION = 3 # ---- Service ------------------------------------------------------------- +async def async_migrate_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: + """Migrate config entry data to options when needed.""" + try: + target_version = TARGET_ENTRY_VERSION + current_version_raw = getattr(entry, "version", 1) + current_version = ( + current_version_raw if isinstance(current_version_raw, int) else 1 + ) + legacy_key = "http_referer" + existing_data = entry.data or {} + existing_options = entry.options or {} + cleanup_needed = ( + legacy_key in existing_data + or legacy_key in existing_options + or CONF_CREATE_FORECAST_SENSORS in existing_data + ) + if not cleanup_needed and CONF_CREATE_FORECAST_SENSORS in existing_options: + stored_mode = existing_options.get(CONF_CREATE_FORECAST_SENSORS) + stored_mode_raw = getattr(stored_mode, "value", stored_mode) + if stored_mode_raw is not None: + stored_mode_raw = str(stored_mode_raw) + cleanup_needed = ( + normalize_sensor_mode(stored_mode_raw, _LOGGER) != stored_mode_raw + ) + if current_version >= target_version and not cleanup_needed: + return True + + new_data = dict(existing_data) + new_options = dict(existing_options) + mode = new_options.get(CONF_CREATE_FORECAST_SENSORS) + if mode is None: + mode = new_data.get(CONF_CREATE_FORECAST_SENSORS) + new_data.pop(CONF_CREATE_FORECAST_SENSORS, None) + + mode_raw = getattr(mode, "value", mode) + if mode_raw is not None: + mode_raw = str(mode_raw) + normalized_mode = normalize_sensor_mode(mode_raw, _LOGGER) + if new_options.get(CONF_CREATE_FORECAST_SENSORS) != normalized_mode: + new_options[CONF_CREATE_FORECAST_SENSORS] = normalized_mode + else: + new_options.pop(CONF_CREATE_FORECAST_SENSORS, None) + + new_data.pop(legacy_key, None) + new_options.pop(legacy_key, None) + + new_version = max(current_version, target_version) + if new_data != existing_data or new_options != existing_options: + hass.config_entries.async_update_entry( + entry, data=new_data, options=new_options, version=new_version + ) + else: + hass.config_entries.async_update_entry(entry, version=new_version) + return True + except asyncio.CancelledError: + raise + except Exception: # noqa: BLE001 + _LOGGER.exception( + "Failed to migrate per-day sensor mode to entry options for entry %s " + "(version=%s)", + entry.entry_id, + getattr(entry, "version", None), + ) + return False + + async def async_setup(hass: HomeAssistant, config: dict[str, Any]) -> bool: """Register force_update service.""" _LOGGER.debug("PollenLevels async_setup called") async def handle_force_update_service(call: ServiceCall) -> None: """Refresh pollen data for all entries.""" - # Added: top-level log to confirm manual trigger for easier debugging. - _LOGGER.info("Executing force_update service for all Pollen Levels entries") + _LOGGER.debug("Executing force_update service for all Pollen Levels entries") entries = list(hass.config_entries.async_entries(DOMAIN)) tasks: list[Awaitable[None]] = [] task_entries: list[ConfigEntry] = [] for entry in entries: - coordinator = hass.data.get(DOMAIN, {}).get(entry.entry_id) + runtime = getattr(entry, "runtime_data", None) + coordinator = getattr(runtime, "coordinator", None) if coordinator: - _LOGGER.info("Trigger manual refresh for entry %s", entry.entry_id) - tasks.append(coordinator.async_refresh()) + tasks.append(coordinator.async_request_refresh()) task_entries.append(entry) + else: + _LOGGER.debug( + "Skipping force_update for entry %s (no coordinator)", + entry.entry_id, + ) + + if not tasks: + _LOGGER.debug("No coordinators available for force_update") + return - if tasks: - results = await asyncio.gather(*tasks, return_exceptions=True) - for entry, result in zip(task_entries, results, strict=False): - if isinstance(result, Exception): - _LOGGER.warning( - "Manual refresh failed for entry %s: %r", - entry.entry_id, - result, - ) + results = await asyncio.gather(*tasks, return_exceptions=True) + for entry, result in zip(task_entries, results, strict=False): + if isinstance(result, asyncio.CancelledError): + _LOGGER.debug( + "Manual refresh cancelled for entry %s", + entry.entry_id, + ) + continue + if isinstance(result, Exception): + api_key = (entry.data or {}).get(CONF_API_KEY) + safe_message = redact_api_key(result, api_key) + _LOGGER.warning( + "Manual refresh failed for entry %s (%s): %s", + entry.entry_id, + type(result).__name__, + safe_message or "no error details", + ) # Enforce empty payload for the service; reject unknown fields for clearer errors. hass.services.async_register( @@ -64,7 +170,9 @@ async def handle_force_update_service(call: ServiceCall) -> None: return True -async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: +async def async_setup_entry( + hass: HomeAssistant, entry: PollenLevelsConfigEntry +) -> bool: """Forward config entry to sensor platform and register options listener.""" _LOGGER.debug( "PollenLevels async_setup_entry for entry_id=%s title=%s", @@ -72,15 +180,113 @@ async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: entry.title, ) + options = entry.options or {} + + parsed_hours = safe_parse_int( + options.get( + CONF_UPDATE_INTERVAL, + entry.data.get(CONF_UPDATE_INTERVAL, DEFAULT_UPDATE_INTERVAL), + ) + ) + hours = parsed_hours if parsed_hours is not None else DEFAULT_UPDATE_INTERVAL + hours = max(MIN_UPDATE_INTERVAL_HOURS, min(MAX_UPDATE_INTERVAL_HOURS, hours)) + parsed_forecast_days = safe_parse_int( + options.get( + CONF_FORECAST_DAYS, + entry.data.get(CONF_FORECAST_DAYS, DEFAULT_FORECAST_DAYS), + ) + ) + forecast_days = ( + parsed_forecast_days + if parsed_forecast_days is not None + else DEFAULT_FORECAST_DAYS + ) + forecast_days = max(MIN_FORECAST_DAYS, min(MAX_FORECAST_DAYS, forecast_days)) + language = options.get(CONF_LANGUAGE_CODE, entry.data.get(CONF_LANGUAGE_CODE)) + raw_mode = options.get( + CONF_CREATE_FORECAST_SENSORS, + entry.data.get(CONF_CREATE_FORECAST_SENSORS, ForecastSensorMode.NONE), + ) + normalized_mode = normalize_sensor_mode(raw_mode, _LOGGER) try: - await hass.config_entries.async_forward_entry_setups(entry, ["sensor"]) + mode = ForecastSensorMode(normalized_mode) + except (ValueError, TypeError): + mode = ForecastSensorMode.NONE + create_d1 = ( + mode in (ForecastSensorMode.D1, ForecastSensorMode.D1_D2) and forecast_days >= 2 + ) + create_d2 = mode == ForecastSensorMode.D1_D2 and forecast_days >= 3 + + api_key = entry.data.get(CONF_API_KEY) + if not isinstance(api_key, str) or not api_key.strip(): + raise ConfigEntryAuthFailed("Invalid API key") + api_key = api_key.strip() + + raw_lat = entry.data.get(CONF_LATITUDE) + raw_lon = entry.data.get(CONF_LONGITUDE) + try: + lat = float(raw_lat) + lon = float(raw_lon) + except (TypeError, ValueError) as err: + _LOGGER.warning( + "Invalid config entry coordinates for entry %s", + entry.entry_id, + ) + raise ConfigEntryNotReady from err + + if ( + not math.isfinite(lat) + or not math.isfinite(lon) + or not (-90.0 <= lat <= 90.0) + or not (-180.0 <= lon <= 180.0) + ): + _LOGGER.warning( + "Out-of-range or non-finite coordinates for entry %s", + entry.entry_id, + ) + raise ConfigEntryNotReady + + raw_title = entry.title or "" + clean_title = raw_title.strip() or DEFAULT_ENTRY_TITLE + + session = async_get_clientsession(hass) + client = GooglePollenApiClient(session, api_key) + + coordinator = PollenDataUpdateCoordinator( + hass=hass, + api_key=api_key, + lat=lat, + lon=lon, + hours=hours, + language=language, + entry_id=entry.entry_id, + entry_title=clean_title, + forecast_days=forecast_days, + create_d1=create_d1, + create_d2=create_d2, + client=client, + ) + + try: + await coordinator.async_config_entry_first_refresh() except ConfigEntryAuthFailed: raise except ConfigEntryNotReady: raise except Exception as err: + _LOGGER.exception("Error during initial data refresh: %s", err) + raise ConfigEntryNotReady from err + + entry.runtime_data = PollenLevelsRuntimeData(coordinator=coordinator, client=client) + + try: + await hass.config_entries.async_forward_entry_setups(entry, ["sensor"]) + except (ConfigEntryAuthFailed, ConfigEntryNotReady): + entry.runtime_data = None + raise + except Exception as err: + entry.runtime_data = None _LOGGER.exception("Error forwarding entry setups: %s", err) - # Surfaced as ConfigEntryNotReady so HA can retry later. raise ConfigEntryNotReady from err # Ensure options updates (interval/language/forecast settings) trigger reload. @@ -96,8 +302,8 @@ async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: "PollenLevels async_unload_entry called for entry_id=%s", entry.entry_id ) unloaded = await hass.config_entries.async_unload_platforms(entry, ["sensor"]) - if unloaded and DOMAIN in hass.data and entry.entry_id in hass.data[DOMAIN]: - hass.data[DOMAIN].pop(entry.entry_id) + if unloaded: + entry.runtime_data = None return unloaded diff --git a/custom_components/pollenlevels/client.py b/custom_components/pollenlevels/client.py new file mode 100644 index 00000000..f03cb6f3 --- /dev/null +++ b/custom_components/pollenlevels/client.py @@ -0,0 +1,229 @@ +from __future__ import annotations + +import asyncio +import logging +import math +import random +from typing import Any + +from aiohttp import ClientError, ClientSession, ClientTimeout + +try: # pragma: no cover - fallback for environments with stubbed aiohttp + from aiohttp import ContentTypeError +except ImportError: # pragma: no cover - tests stub aiohttp without ContentTypeError + ContentTypeError = ValueError # type: ignore[misc,assignment] +from homeassistant.exceptions import ConfigEntryAuthFailed +from homeassistant.helpers.update_coordinator import UpdateFailed +from homeassistant.util import dt as dt_util + +from .const import MAX_RETRIES, POLLEN_API_TIMEOUT, is_invalid_api_key_message +from .util import extract_error_message, redact_api_key + +_LOGGER = logging.getLogger(__name__) + + +def _format_http_message(status: int, raw_message: str | None) -> str: + """Format an HTTP status and optional message consistently.""" + + if raw_message: + return f"HTTP {status}: {raw_message}" + return f"HTTP {status}" + + +class GooglePollenApiClient: + """Thin async client wrapper for the Google Pollen API.""" + + def __init__(self, session: ClientSession, api_key: str) -> None: + self._session = session + self._api_key = api_key + + def _parse_retry_after(self, retry_after_raw: str) -> float: + """Translate a Retry-After header into a delay in seconds.""" + + try: + parsed = float(retry_after_raw) + if math.isfinite(parsed) and parsed > 0: + return parsed + return 2.0 + except (TypeError, ValueError): + retry_at = dt_util.parse_http_date(retry_after_raw) + if retry_at is not None: + delay = (retry_at - dt_util.utcnow()).total_seconds() + if math.isfinite(delay) and delay > 0: + return delay + + return 2.0 + + async def _async_backoff( + self, + *, + attempt: int, + max_retries: int, + message: str, + base_args: tuple[Any, ...] = (), + ) -> None: + """Log a retry warning with jittered backoff and sleep.""" + + delay = 0.8 * (2**attempt) + random.uniform(0.0, 0.3) + _LOGGER.warning(message, *base_args, delay, attempt + 1, max_retries) + await asyncio.sleep(delay) + + async def async_fetch_pollen_data( + self, + *, + latitude: float, + longitude: float, + days: int, + language_code: str | None, + ) -> dict[str, Any]: + """Perform the HTTP call and return the decoded payload.""" + + url = "https://pollen.googleapis.com/v1/forecast:lookup" + params = { + "key": self._api_key, + "location.latitude": f"{latitude:.6f}", + "location.longitude": f"{longitude:.6f}", + "days": days, + } + if language_code: + params["languageCode"] = language_code + + _LOGGER.debug( + "Fetching forecast (days=%s, lang_set=%s)", days, bool(language_code) + ) + + max_retries = MAX_RETRIES + for attempt in range(0, max_retries + 1): + try: + async with self._session.get( + url, + params=params, + timeout=ClientTimeout(total=POLLEN_API_TIMEOUT), + ) as resp: + if resp.status == 401: + raw_message = redact_api_key( + await extract_error_message(resp, default=""), self._api_key + ) + message = _format_http_message(resp.status, raw_message or None) + raise ConfigEntryAuthFailed(message) + + if resp.status == 403: + raw_message = redact_api_key( + await extract_error_message(resp, default=""), self._api_key + ) + message = _format_http_message(resp.status, raw_message or None) + if is_invalid_api_key_message(raw_message): + raise ConfigEntryAuthFailed(message) + raise UpdateFailed(message) + + if resp.status == 429: + if attempt < max_retries: + retry_after_raw = resp.headers.get("Retry-After") + delay = 2.0 + if retry_after_raw: + delay = self._parse_retry_after(retry_after_raw) + delay = delay + random.uniform(0.0, 0.4) + delay = max(0.0, min(delay, 5.0)) + _LOGGER.warning( + "Pollen API 429 — retrying in %.2fs (attempt %d/%d)", + delay, + attempt + 1, + max_retries, + ) + await asyncio.sleep(delay) + continue + raw_message = redact_api_key( + await extract_error_message(resp, default=""), self._api_key + ) + message = _format_http_message(resp.status, raw_message or None) + raise UpdateFailed(message) + + if 500 <= resp.status <= 599: + if attempt < max_retries: + await self._async_backoff( + attempt=attempt, + max_retries=max_retries, + message=( + "Pollen API HTTP %s — retrying in %.2fs " + "(attempt %d/%d)" + ), + base_args=(resp.status,), + ) + continue + raw_message = redact_api_key( + await extract_error_message(resp, default=""), self._api_key + ) + message = _format_http_message(resp.status, raw_message or None) + raise UpdateFailed(message) + + if 400 <= resp.status < 500 and resp.status not in (403, 429): + raw_message = redact_api_key( + await extract_error_message(resp, default=""), self._api_key + ) + message = _format_http_message(resp.status, raw_message or None) + raise UpdateFailed(message) + + if resp.status != 200: + raw_message = redact_api_key( + await extract_error_message(resp, default=""), self._api_key + ) + message = _format_http_message(resp.status, raw_message or None) + raise UpdateFailed(message) + + try: + try: + payload = await resp.json(content_type=None) + except TypeError: + payload = await resp.json() + except (ContentTypeError, TypeError, ValueError) as err: + raise UpdateFailed( + "Unexpected API response: invalid JSON" + ) from err + + if not isinstance(payload, dict): + raise UpdateFailed( + "Unexpected API response: expected JSON object" + ) + + return payload + + except ConfigEntryAuthFailed: + raise + except TimeoutError as err: + if attempt < max_retries: + await self._async_backoff( + attempt=attempt, + max_retries=max_retries, + message=( + "Pollen API timeout — retrying in %.2fs " "(attempt %d/%d)" + ), + ) + continue + msg = ( + redact_api_key(err, self._api_key) + or "Google Pollen API call timed out" + ) + raise UpdateFailed(f"Timeout: {msg}") from err + except ClientError as err: + if attempt < max_retries: + await self._async_backoff( + attempt=attempt, + max_retries=max_retries, + message=( + "Network error to Pollen API — retrying in %.2fs " + "(attempt %d/%d)" + ), + ) + continue + msg = redact_api_key(err, self._api_key) or ( + "Network error while calling the Google Pollen API" + ) + raise UpdateFailed(msg) from err + except UpdateFailed: + raise + except Exception as err: # noqa: BLE001 + msg = redact_api_key(err, self._api_key) + if not msg: + msg = "Unexpected error while calling the Google Pollen API" + _LOGGER.error("Pollen API error: %s", msg) + raise UpdateFailed(msg) from err diff --git a/custom_components/pollenlevels/config_flow.py b/custom_components/pollenlevels/config_flow.py index b26e416c..d0723b49 100644 --- a/custom_components/pollenlevels/config_flow.py +++ b/custom_components/pollenlevels/config_flow.py @@ -6,6 +6,9 @@ - Redacts API keys in debug logs. - Timeout handling: on Python 3.14, built-in `TimeoutError` also covers `asyncio.TimeoutError`, so catching `TimeoutError` is sufficient and preferred. + +IMPORTANT: +- Keep schema construction centralized so defaults are applied consistently. """ from __future__ import annotations @@ -21,7 +24,19 @@ from homeassistant import config_entries from homeassistant.const import CONF_LATITUDE, CONF_LOCATION, CONF_LONGITUDE, CONF_NAME from homeassistant.helpers.aiohttp_client import async_get_clientsession -from homeassistant.helpers.selector import LocationSelector, LocationSelectorConfig +from homeassistant.helpers.selector import ( + LocationSelector, + LocationSelectorConfig, + NumberSelector, + NumberSelectorConfig, + NumberSelectorMode, + SelectSelector, + SelectSelectorConfig, + SelectSelectorMode, + TextSelector, + TextSelectorConfig, + TextSelectorType, +) from .const import ( CONF_API_KEY, @@ -35,12 +50,27 @@ DOMAIN, FORECAST_SENSORS_CHOICES, MAX_FORECAST_DAYS, + MAX_UPDATE_INTERVAL_HOURS, MIN_FORECAST_DAYS, + MIN_UPDATE_INTERVAL_HOURS, + POLLEN_API_KEY_URL, + POLLEN_API_TIMEOUT, + RESTRICTING_API_KEYS_URL, + is_invalid_api_key_message, +) +from .util import ( + extract_error_message, + normalize_sensor_mode, + redact_api_key, + safe_parse_int, ) -from .util import redact_api_key _LOGGER = logging.getLogger(__name__) +FORECAST_DAYS_OPTIONS = [ + str(i) for i in range(MIN_FORECAST_DAYS, MAX_FORECAST_DAYS + 1) +] + # BCP-47-ish regex (common patterns, not full grammar). LANGUAGE_CODE_REGEX = re.compile( r"^[A-Za-z]{2,3}" @@ -51,17 +81,6 @@ ) -STEP_USER_DATA_SCHEMA = vol.Schema( - { - vol.Required(CONF_API_KEY): str, - vol.Optional(CONF_UPDATE_INTERVAL, default=DEFAULT_UPDATE_INTERVAL): vol.All( - vol.Coerce(int), vol.Range(min=1) - ), - vol.Optional(CONF_LANGUAGE_CODE): str, - } -) - - def is_valid_language_code(value: str) -> str: """Validate language code format; return normalized (trimmed) value.""" if not isinstance(value, str): @@ -77,7 +96,6 @@ def is_valid_language_code(value: str) -> str: def _language_error_to_form_key(error: vol.Invalid) -> str: """Convert voluptuous validation errors into form error keys.""" - message = getattr(error, "error_message", "") if message == "empty": return "empty" @@ -88,7 +106,6 @@ def _language_error_to_form_key(error: vol.Invalid) -> str: def _safe_coord(value: float | None, *, lat: bool) -> float | None: """Return a validated latitude/longitude or None if unset/invalid.""" - try: if lat: return cv.latitude(value) @@ -97,37 +114,91 @@ def _safe_coord(value: float | None, *, lat: bool) -> float | None: return None -def _get_location_schema(hass: Any) -> vol.Schema: - """Return schema for name + location with defaults from HA config.""" +def _build_step_user_schema(hass: Any, user_input: dict[str, Any] | None) -> vol.Schema: + """Build the full step user schema without flattening nested sections.""" + user_input = user_input or {} - default_name = getattr(hass.config, "location_name", "") or DEFAULT_ENTRY_TITLE - default_lat = _safe_coord(getattr(hass.config, "latitude", None), lat=True) - default_lon = _safe_coord(getattr(hass.config, "longitude", None), lat=False) + default_name = str( + user_input.get(CONF_NAME) + or getattr(hass.config, "location_name", "") + or DEFAULT_ENTRY_TITLE + ) - if default_lat is not None and default_lon is not None: - location_field = vol.Required( - CONF_LOCATION, - default={ - CONF_LATITUDE: default_lat, - CONF_LONGITUDE: default_lon, - }, - ) + location_default = None + if isinstance(user_input.get(CONF_LOCATION), dict): + location_default = user_input[CONF_LOCATION] + else: + lat = _safe_coord(getattr(hass.config, "latitude", None), lat=True) + lon = _safe_coord(getattr(hass.config, "longitude", None), lat=False) + if lat is not None and lon is not None: + location_default = {CONF_LATITUDE: lat, CONF_LONGITUDE: lon} + + if location_default is not None: + location_field = vol.Required(CONF_LOCATION, default=location_default) else: location_field = vol.Required(CONF_LOCATION) - return vol.Schema( + update_interval_raw = user_input.get(CONF_UPDATE_INTERVAL, DEFAULT_UPDATE_INTERVAL) + interval_default = _sanitize_update_interval_for_default(update_interval_raw) + forecast_days_default = _sanitize_forecast_days_for_default( + user_input.get(CONF_FORECAST_DAYS, DEFAULT_FORECAST_DAYS) + ) + sensor_mode_default = _sanitize_forecast_mode_for_default( + user_input.get(CONF_CREATE_FORECAST_SENSORS, FORECAST_SENSORS_CHOICES[0]) + ) + + schema = vol.Schema( { + vol.Required(CONF_API_KEY): TextSelector( + TextSelectorConfig(type=TextSelectorType.PASSWORD) + ), vol.Required(CONF_NAME, default=default_name): str, location_field: LocationSelector(LocationSelectorConfig(radius=False)), + vol.Optional( + CONF_UPDATE_INTERVAL, + default=interval_default, + ): NumberSelector( + NumberSelectorConfig( + min=MIN_UPDATE_INTERVAL_HOURS, + max=MAX_UPDATE_INTERVAL_HOURS, + step=1, + mode=NumberSelectorMode.BOX, + unit_of_measurement="h", + ) + ), + vol.Optional( + CONF_LANGUAGE_CODE, + default=user_input.get( + CONF_LANGUAGE_CODE, getattr(hass.config, "language", "") + ), + ): TextSelector(TextSelectorConfig(type=TextSelectorType.TEXT)), + vol.Optional( + CONF_FORECAST_DAYS, + default=forecast_days_default, + ): SelectSelector( + SelectSelectorConfig( + mode=SelectSelectorMode.DROPDOWN, + options=FORECAST_DAYS_OPTIONS, + ) + ), + vol.Optional( + CONF_CREATE_FORECAST_SENSORS, + default=sensor_mode_default, + ): SelectSelector( + SelectSelectorConfig( + mode=SelectSelectorMode.DROPDOWN, + options=FORECAST_SENSORS_CHOICES, + ) + ), } ) + return schema def _validate_location_dict( location: dict[str, Any] | None, ) -> tuple[float, float] | None: """Validate location dict and return (lat, lon) or None on error.""" - if not isinstance(location, dict): return None @@ -146,10 +217,70 @@ def _validate_location_dict( return lat, lon +def _parse_int_option( + value: Any, + default: int, + *, + min_value: int | None = None, + max_value: int | None = None, + error_key: str | None = None, +) -> tuple[int, str | None]: + """Parse a numeric option to int and enforce bounds.""" + parsed = safe_parse_int(value if value is not None else default) + if parsed is None: + return default, error_key + + if min_value is not None and parsed < min_value: + return parsed, error_key + + if max_value is not None and parsed > max_value: + return parsed, error_key + + return parsed, None + + +def _parse_update_interval(value: Any, default: int) -> tuple[int, str | None]: + """Parse and validate the update interval in hours.""" + return _parse_int_option( + value, + default=default, + min_value=MIN_UPDATE_INTERVAL_HOURS, + max_value=MAX_UPDATE_INTERVAL_HOURS, + error_key="invalid_update_interval", + ) + + +def _sanitize_update_interval_for_default(raw_value: Any) -> int: + """Parse and clamp an update interval value to be used as a UI default.""" + parsed, _ = _parse_update_interval(raw_value, DEFAULT_UPDATE_INTERVAL) + return max(MIN_UPDATE_INTERVAL_HOURS, min(MAX_UPDATE_INTERVAL_HOURS, parsed)) + + +def _sanitize_forecast_days_for_default(raw_value: Any) -> str: + """Parse and clamp forecast days to be used as a UI default.""" + parsed, _ = _parse_int_option( + raw_value, + DEFAULT_FORECAST_DAYS, + min_value=MIN_FORECAST_DAYS, + max_value=MAX_FORECAST_DAYS, + error_key="invalid_forecast_days", + ) + parsed = max(MIN_FORECAST_DAYS, min(MAX_FORECAST_DAYS, parsed)) + return str(parsed) + + +def _sanitize_forecast_mode_for_default(raw_value: Any) -> str: + """Normalize forecast sensor mode to be used as a UI default.""" + mode = normalize_sensor_mode(raw_value, _LOGGER) + if mode in FORECAST_SENSORS_CHOICES: + return mode + return FORECAST_SENSORS_CHOICES[0] + + class PollenLevelsConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Config flow for Pollen Levels.""" - VERSION = 1 + VERSION = 3 def __init__(self) -> None: """Initialize the config flow state.""" @@ -168,13 +299,55 @@ async def _async_validate_input( description_placeholders: dict[str, Any] | None = None, ) -> tuple[dict[str, str], dict[str, Any] | None]: """Validate user or reauth input and return normalized data.""" - - placeholders = description_placeholders + placeholders = ( + description_placeholders if description_placeholders is not None else {} + ) errors: dict[str, str] = {} normalized: dict[str, Any] = dict(user_input) normalized.pop(CONF_NAME, None) normalized.pop(CONF_LOCATION, None) + api_key = str(user_input.get(CONF_API_KEY, "")) if user_input else "" + api_key = api_key.strip() + + if not api_key: + errors[CONF_API_KEY] = "empty" + return errors, None + + interval_value, interval_error = _parse_update_interval( + normalized.get(CONF_UPDATE_INTERVAL), + default=DEFAULT_UPDATE_INTERVAL, + ) + normalized[CONF_UPDATE_INTERVAL] = interval_value + if interval_error: + errors[CONF_UPDATE_INTERVAL] = interval_error + placeholders.pop("error_message", None) + return errors, None + + forecast_days, days_error = _parse_int_option( + normalized.get(CONF_FORECAST_DAYS), + DEFAULT_FORECAST_DAYS, + min_value=MIN_FORECAST_DAYS, + max_value=MAX_FORECAST_DAYS, + error_key="invalid_forecast_days", + ) + normalized[CONF_FORECAST_DAYS] = forecast_days + if days_error: + errors[CONF_FORECAST_DAYS] = days_error + placeholders.pop("error_message", None) + return errors, None + + mode = normalized.get(CONF_CREATE_FORECAST_SENSORS, FORECAST_SENSORS_CHOICES[0]) + if mode not in FORECAST_SENSORS_CHOICES: + mode = FORECAST_SENSORS_CHOICES[0] + normalized[CONF_CREATE_FORECAST_SENSORS] = mode + needed = {"D+1": 2, "D+1+2": 3}.get(mode, 1) + if forecast_days < needed: + errors[CONF_CREATE_FORECAST_SENSORS] = "invalid_option_combo" + placeholders.pop("error_message", None) + return errors, None + normalized[CONF_CREATE_FORECAST_SENSORS] = mode + latlon = None if CONF_LOCATION in user_input: latlon = _validate_location_dict(user_input.get(CONF_LOCATION)) @@ -183,6 +356,7 @@ async def _async_validate_input( "Invalid coordinates provided (values redacted): parsing failed" ) errors[CONF_LOCATION] = "invalid_coordinates" + placeholders.pop("error_message", None) return errors, None else: try: @@ -193,8 +367,8 @@ async def _async_validate_input( _LOGGER.debug( "Invalid coordinates provided (values redacted): parsing failed" ) - # Legacy lat/lon path (e.g., reauth) has no CONF_LOCATION field on the form errors["base"] = "invalid_coordinates" + placeholders.pop("error_message", None) return errors, None lat, lon = latlon @@ -202,6 +376,8 @@ async def _async_validate_input( normalized[CONF_LONGITUDE] = lon if check_unique_id: + # Keep unique_id formatting aligned with legacy entries for + # duplicate detection compatibility across upgrades. uid = f"{lat:.4f}_{lon:.4f}" try: await self.async_set_unique_id(uid, raise_on_progress=False) @@ -209,12 +385,13 @@ async def _async_validate_input( except Exception as err: # defensive _LOGGER.exception( "Unique ID setup failed for coordinates (values redacted): %s", - redact_api_key(err, user_input.get(CONF_API_KEY)), + redact_api_key(err, api_key), ) raise + normalized[CONF_API_KEY] = api_key + try: - # Allow blank language; if present, validate & normalize raw_lang = user_input.get(CONF_LANGUAGE_CODE, "") lang = raw_lang.strip() if isinstance(raw_lang, str) else "" if lang: @@ -222,7 +399,7 @@ async def _async_validate_input( session = async_get_clientsession(self.hass) params = { - "key": user_input[CONF_API_KEY], + "key": api_key, "location.latitude": f"{lat:.6f}", "location.longitude": f"{lon:.6f}", "days": 1, @@ -232,28 +409,29 @@ async def _async_validate_input( url = "https://pollen.googleapis.com/v1/forecast:lookup" - # SECURITY: Avoid logging URL+params (contains coordinates/key) _LOGGER.debug("Validating Pollen API (days=%s, lang_set=%s)", 1, bool(lang)) - # Add explicit timeout to prevent UI hangs on provider issues async with session.get( - url, params=params, timeout=aiohttp.ClientTimeout(total=10) + url, + params=params, + timeout=aiohttp.ClientTimeout(total=POLLEN_API_TIMEOUT), ) as resp: status = resp.status - if status == 403: - _LOGGER.debug("Validation HTTP 403 (body omitted)") - errors["base"] = "invalid_auth" - elif status == 429: - _LOGGER.debug("Validation HTTP 429 (body omitted)") - errors["base"] = "quota_exceeded" - elif status != 200: + if status != 200: _LOGGER.debug("Validation HTTP %s (body omitted)", status) - errors["base"] = "cannot_connect" - if placeholders is not None: - # Keep user-facing message generic; HTTP status is logged above - placeholders["error_message"] = ( - "Unable to validate the API key with the pollen service." - ) + raw_msg = await extract_error_message(resp, f"HTTP {status}") + placeholders["error_message"] = redact_api_key(raw_msg, api_key) + if status == 401: + errors["base"] = "invalid_auth" + elif status == 403: + if is_invalid_api_key_message(raw_msg): + errors["base"] = "invalid_auth" + else: + errors["base"] = "cannot_connect" + elif status == 429: + errors["base"] = "quota_exceeded" + else: + errors["base"] = "cannot_connect" else: raw = await resp.read() try: @@ -263,19 +441,28 @@ async def _async_validate_input( _LOGGER.debug( "Validation HTTP %s — %s", status, - redact_api_key(body_str, user_input.get(CONF_API_KEY)), + redact_api_key(body_str, api_key), ) try: data = json.loads(body_str) if body_str else {} except Exception: data = {} - if not data.get("dailyInfo"): - _LOGGER.warning("Validation: 'dailyInfo' missing") + + daily_info = ( + data.get("dailyInfo") if isinstance(data, dict) else None + ) + daily_is_valid = isinstance(daily_info, list) and bool(daily_info) + if daily_is_valid: + daily_is_valid = all( + isinstance(item, dict) for item in daily_info + ) + + if not daily_is_valid: + _LOGGER.warning("Validation: 'dailyInfo' missing or invalid") errors["base"] = "cannot_connect" - if placeholders is not None: - placeholders["error_message"] = ( - "API response missing expected pollen forecast information." - ) + placeholders["error_message"] = ( + "API response missing expected pollen forecast information." + ) if errors: return errors, None @@ -290,48 +477,52 @@ async def _async_validate_input( ve, ) errors[CONF_LANGUAGE_CODE] = _language_error_to_form_key(ve) + placeholders.pop("error_message", None) except TimeoutError as err: - # Catch built-in TimeoutError; on Python 3.14 this also covers asyncio.TimeoutError. _LOGGER.warning( - "Validation timeout (10s): %s", - redact_api_key(err, user_input.get(CONF_API_KEY)), + "Validation timeout (%ss): %s", + POLLEN_API_TIMEOUT, + redact_api_key(err, api_key), ) errors["base"] = "cannot_connect" - if placeholders is not None: - redacted = redact_api_key(err, user_input.get(CONF_API_KEY)) - placeholders["error_message"] = ( - redacted or "Validation request timed out (10 seconds)." - ) + redacted = redact_api_key(err, api_key) + placeholders["error_message"] = ( + redacted + or f"Validation request timed out ({POLLEN_API_TIMEOUT} seconds)." + ) except aiohttp.ClientError as err: _LOGGER.error( "Connection error: %s", - redact_api_key(err, user_input.get(CONF_API_KEY)), + redact_api_key(err, api_key), ) errors["base"] = "cannot_connect" - if placeholders is not None: - redacted = redact_api_key(err, user_input.get(CONF_API_KEY)) - placeholders["error_message"] = ( - redacted or "Network error while connecting to the pollen service." - ) + redacted = redact_api_key(err, api_key) + placeholders["error_message"] = ( + redacted or "Network error while connecting to the pollen service." + ) except Exception as err: # defensive _LOGGER.exception( "Unexpected error in Pollen Levels config flow while validating input: %s", - redact_api_key(err, user_input.get(CONF_API_KEY)), + redact_api_key(err, api_key), ) errors["base"] = "unknown" - if placeholders is not None: - placeholders.pop("error_message", None) + placeholders.pop("error_message", None) return errors, None async def async_step_user(self, user_input=None): """Handle initial step.""" errors: dict[str, str] = {} - description_placeholders: dict[str, Any] = {} + description_placeholders: dict[str, Any] = { + "api_key_url": POLLEN_API_KEY_URL, + "restricting_api_keys_url": RESTRICTING_API_KEYS_URL, + } if user_input: + sanitized_input: dict[str, Any] = dict(user_input) + errors, normalized = await self._async_validate_input( - user_input, + sanitized_input, check_unique_id=True, description_placeholders=description_placeholders, ) @@ -340,36 +531,15 @@ async def async_step_user(self, user_input=None): title = entry_name or DEFAULT_ENTRY_TITLE return self.async_create_entry(title=title, data=normalized) - base_schema = STEP_USER_DATA_SCHEMA.schema.copy() - base_schema.update(_get_location_schema(self.hass).schema) - - suggested_values = { - CONF_LANGUAGE_CODE: self.hass.config.language, - CONF_NAME: getattr(self.hass.config, "location_name", "") - or DEFAULT_ENTRY_TITLE, - } - - lat = _safe_coord(getattr(self.hass.config, "latitude", None), lat=True) - lon = _safe_coord(getattr(self.hass.config, "longitude", None), lat=False) - if lat is not None and lon is not None: - suggested_values[CONF_LOCATION] = { - CONF_LATITUDE: lat, - CONF_LONGITUDE: lon, - } - return self.async_show_form( step_id="user", - data_schema=self.add_suggested_values_to_schema( - vol.Schema(base_schema), - {**suggested_values, **(user_input or {})}, - ), + data_schema=_build_step_user_schema(self.hass, user_input), errors=errors, description_placeholders=description_placeholders, ) async def async_step_reauth(self, entry_data: dict[str, Any]): """Handle re-authentication when credentials become invalid.""" - entry = self.hass.config_entries.async_get_entry(self.context["entry_id"]) if entry is None: return self.async_abort(reason="reauth_failed") @@ -379,13 +549,14 @@ async def async_step_reauth(self, entry_data: dict[str, Any]): async def async_step_reauth_confirm(self, user_input: dict[str, Any] | None = None): """Prompt for a refreshed API key and validate it.""" - assert self._reauth_entry is not None errors: dict[str, str] = {} placeholders = { "latitude": f"{self._reauth_entry.data.get(CONF_LATITUDE)}", "longitude": f"{self._reauth_entry.data.get(CONF_LONGITUDE)}", + "api_key_url": POLLEN_API_KEY_URL, + "restricting_api_keys_url": RESTRICTING_API_KEYS_URL, } if user_input: @@ -406,12 +577,11 @@ async def async_step_reauth_confirm(self, user_input: dict[str, Any] | None = No { vol.Required( CONF_API_KEY, - default=self._reauth_entry.data.get(CONF_API_KEY, ""), - ): str + default="", + ): TextSelector(TextSelectorConfig(type=TextSelectorType.PASSWORD)) } ) - # Ensure the form posts back to this handler. return self.async_show_form( step_id="reauth_confirm", data_schema=schema, @@ -431,49 +601,118 @@ async def async_step_init(self, user_input=None): errors: dict[str, str] = {} placeholders = {"title": self.entry.title or DEFAULT_ENTRY_TITLE} + current_interval_raw = self.entry.options.get( + CONF_UPDATE_INTERVAL, + self.entry.data.get(CONF_UPDATE_INTERVAL, DEFAULT_UPDATE_INTERVAL), + ) + current_interval = _sanitize_update_interval_for_default(current_interval_raw) + current_lang = self.entry.options.get( + CONF_LANGUAGE_CODE, + self.entry.data.get(CONF_LANGUAGE_CODE, self.hass.config.language), + ) + current_days_raw = self.entry.options.get( + CONF_FORECAST_DAYS, + self.entry.data.get(CONF_FORECAST_DAYS, DEFAULT_FORECAST_DAYS), + ) + current_days_default = _sanitize_forecast_days_for_default(current_days_raw) + current_days = int(current_days_default) + current_mode = self.entry.options.get(CONF_CREATE_FORECAST_SENSORS) + if current_mode is None: + current_mode = self.entry.data.get(CONF_CREATE_FORECAST_SENSORS, "none") + current_mode = _sanitize_forecast_mode_for_default(current_mode) + + options_schema = vol.Schema( + { + vol.Optional( + CONF_UPDATE_INTERVAL, default=current_interval + ): NumberSelector( + NumberSelectorConfig( + min=MIN_UPDATE_INTERVAL_HOURS, + max=MAX_UPDATE_INTERVAL_HOURS, + step=1, + mode=NumberSelectorMode.BOX, + unit_of_measurement="h", + ) + ), + vol.Optional(CONF_LANGUAGE_CODE, default=current_lang): TextSelector( + TextSelectorConfig(type=TextSelectorType.TEXT) + ), + vol.Optional( + CONF_FORECAST_DAYS, default=current_days_default + ): SelectSelector( + SelectSelectorConfig( + mode=SelectSelectorMode.DROPDOWN, + options=FORECAST_DAYS_OPTIONS, + ) + ), + vol.Optional( + CONF_CREATE_FORECAST_SENSORS, default=current_mode + ): SelectSelector( + SelectSelectorConfig( + mode=SelectSelectorMode.DROPDOWN, + options=FORECAST_SENSORS_CHOICES, + ) + ), + } + ) + if user_input is not None: + normalized_input: dict[str, Any] = {**self.entry.options, **user_input} + interval_value, interval_error = _parse_update_interval( + normalized_input.get(CONF_UPDATE_INTERVAL, current_interval), + current_interval, + ) + normalized_input[CONF_UPDATE_INTERVAL] = interval_value + if interval_error: + errors[CONF_UPDATE_INTERVAL] = interval_error + + if errors.get(CONF_UPDATE_INTERVAL): + return self.async_show_form( + step_id="init", + data_schema=options_schema, + errors=errors, + description_placeholders=placeholders, + ) + + forecast_days, days_error = _parse_int_option( + normalized_input.get(CONF_FORECAST_DAYS, current_days), + current_days, + min_value=MIN_FORECAST_DAYS, + max_value=MAX_FORECAST_DAYS, + error_key="invalid_forecast_days", + ) + normalized_input[CONF_FORECAST_DAYS] = forecast_days + if days_error: + errors[CONF_FORECAST_DAYS] = days_error + try: - # Language: allow empty; if provided, validate & normalize. - raw_lang = user_input.get( + raw_lang = normalized_input.get( CONF_LANGUAGE_CODE, self.entry.options.get( - CONF_LANGUAGE_CODE, self.entry.data.get(CONF_LANGUAGE_CODE, "") + CONF_LANGUAGE_CODE, + self.entry.data.get(CONF_LANGUAGE_CODE, ""), ), ) lang = raw_lang.strip() if isinstance(raw_lang, str) else "" if lang: lang = is_valid_language_code(lang) - user_input[CONF_LANGUAGE_CODE] = lang # persist normalized - - # forecast_days within 1..5 - days = int( - user_input.get( - CONF_FORECAST_DAYS, - self.entry.options.get( - CONF_FORECAST_DAYS, DEFAULT_FORECAST_DAYS - ), - ) - ) - if days < MIN_FORECAST_DAYS or days > MAX_FORECAST_DAYS: - errors[CONF_FORECAST_DAYS] = "invalid_option_combo" + normalized_input[CONF_LANGUAGE_CODE] = lang - # per-day sensors vs number of days - mode = user_input.get( + days = normalized_input[CONF_FORECAST_DAYS] + mode = normalized_input.get( CONF_CREATE_FORECAST_SENSORS, - self.entry.options.get(CONF_CREATE_FORECAST_SENSORS, "none"), + current_mode, ) - needed = 1 - if mode == "D+1": - needed = 2 - elif mode == "D+1+2": - needed = 3 + mode = normalize_sensor_mode(mode, _LOGGER) + normalized_input[CONF_CREATE_FORECAST_SENSORS] = mode + needed = {"D+1": 2, "D+1+2": 3}.get(mode, 1) if days < needed: errors[CONF_CREATE_FORECAST_SENSORS] = "invalid_option_combo" except vol.Invalid as ve: _LOGGER.warning( "Options language validation failed for '%s': %s", - user_input.get(CONF_LANGUAGE_CODE), + normalized_input.get(CONF_LANGUAGE_CODE), ve, ) errors[CONF_LANGUAGE_CODE] = _language_error_to_form_key(ve) @@ -485,39 +724,11 @@ async def async_step_init(self, user_input=None): errors["base"] = "unknown" if not errors: - return self.async_create_entry(title="", data=user_input) - - # Defaults: prefer options, fallback to data/HA config - current_interval = self.entry.options.get( - CONF_UPDATE_INTERVAL, - self.entry.data.get(CONF_UPDATE_INTERVAL, DEFAULT_UPDATE_INTERVAL), - ) - current_lang = self.entry.options.get( - CONF_LANGUAGE_CODE, - self.entry.data.get(CONF_LANGUAGE_CODE, self.hass.config.language), - ) - current_days = self.entry.options.get( - CONF_FORECAST_DAYS, - self.entry.data.get(CONF_FORECAST_DAYS, DEFAULT_FORECAST_DAYS), - ) - current_mode = self.entry.options.get(CONF_CREATE_FORECAST_SENSORS, "none") + return self.async_create_entry(title="", data=normalized_input) return self.async_show_form( step_id="init", - data_schema=vol.Schema( - { - vol.Optional( - CONF_UPDATE_INTERVAL, default=current_interval - ): vol.All(vol.Coerce(int), vol.Range(min=1)), - vol.Optional(CONF_LANGUAGE_CODE, default=current_lang): str, - vol.Optional(CONF_FORECAST_DAYS, default=current_days): vol.In( - list(range(MIN_FORECAST_DAYS, MAX_FORECAST_DAYS + 1)) - ), - vol.Optional( - CONF_CREATE_FORECAST_SENSORS, default=current_mode - ): vol.In(FORECAST_SENSORS_CHOICES), - } - ), + data_schema=options_schema, errors=errors, description_placeholders=placeholders, ) diff --git a/custom_components/pollenlevels/const.py b/custom_components/pollenlevels/const.py index 5ab18177..d487b672 100644 --- a/custom_components/pollenlevels/const.py +++ b/custom_components/pollenlevels/const.py @@ -1,3 +1,5 @@ +from __future__ import annotations + # Define constants for Pollen Levels integration DOMAIN = "pollenlevels" @@ -16,10 +18,38 @@ # Defaults DEFAULT_UPDATE_INTERVAL = 6 +MIN_UPDATE_INTERVAL_HOURS = 1 +MAX_UPDATE_INTERVAL_HOURS = 24 DEFAULT_FORECAST_DAYS = 2 # today + 1 (tomorrow) DEFAULT_ENTRY_TITLE = "Pollen Levels" MAX_FORECAST_DAYS = 5 MIN_FORECAST_DAYS = 1 +POLLEN_API_TIMEOUT = 10 +MAX_RETRIES = 1 +POLLEN_API_KEY_URL = ( + "https://developers.google.com/maps/documentation/pollen/get-api-key" +) +RESTRICTING_API_KEYS_URL = ( + "https://developers.google.com/maps/api-security-best-practices" +) # Allowed values for create_forecast_sensors selector -FORECAST_SENSORS_CHOICES = ["none", "D+1", "D+1+2"] +FORECAST_SENSORS_CHOICES: list[str] = ["none", "D+1", "D+1+2"] +ATTRIBUTION = "Data provided by Google Maps Pollen API" + + +def is_invalid_api_key_message(message: str | None) -> bool: + """Return True if *message* strongly indicates an invalid API key.""" + + if not message: + return False + + msg = message.casefold() + signals = ( + "api key not valid", + "invalid api key", + "api_key_invalid", + "apikeynotvalid", + "api key is not valid", + ) + return any(signal in msg for signal in signals) diff --git a/custom_components/pollenlevels/coordinator.py b/custom_components/pollenlevels/coordinator.py new file mode 100644 index 00000000..8e160bf3 --- /dev/null +++ b/custom_components/pollenlevels/coordinator.py @@ -0,0 +1,528 @@ +"""Pollen data update coordinator.""" + +from __future__ import annotations + +import asyncio +import logging +import math +from datetime import timedelta +from typing import TYPE_CHECKING, Any + +from homeassistant.exceptions import ConfigEntryAuthFailed +from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed +from homeassistant.util import dt as dt_util + +from .client import GooglePollenApiClient +from .const import ( + DEFAULT_ENTRY_TITLE, + DOMAIN, + MAX_FORECAST_DAYS, + MIN_FORECAST_DAYS, +) +from .util import redact_api_key, safe_parse_int + +if TYPE_CHECKING: + from homeassistant.core import HomeAssistant + + +_LOGGER = logging.getLogger(__name__) + + +def _normalize_channel(v: Any) -> int | None: + """Normalize a single channel to 0..255 (accept 0..1 or 0..255 inputs). + + Returns None if the value cannot be interpreted as a number. + """ + try: + f = float(v) + except (TypeError, ValueError, OverflowError): + return None + if not math.isfinite(f): + return None + if 0.0 <= f <= 1.0: + f *= 255.0 + return max(0, min(255, int(round(f)))) + + +def _rgb_from_api(color: dict[str, Any] | None) -> tuple[int, int, int] | None: + """Build an (R, G, B) tuple from API color dict. + + Rules: + - If color is not a dict, or an empty dict, return None + (meaning "no color provided by API"). + - If only some channels are present, missing ones are treated as 0 (black baseline) + but ONLY when at least one channel exists. This preserves partial colors like + {green, blue} without inventing a color for {}. + """ + if not isinstance(color, dict) or not color: + return None + + r = _normalize_channel(color.get("red")) + g = _normalize_channel(color.get("green")) + b = _normalize_channel(color.get("blue")) + + # If all channels are None, treat as no color + if r is None and g is None and b is None: + return None + + # Replace missing channels with 0 (only when at least one exists) + return (r or 0, g or 0, b or 0) + + +def _rgb_to_hex_triplet(rgb: tuple[int, int, int] | None) -> str | None: + """Convert (R,G,B) 0..255 to #RRGGBB.""" + if rgb is None: + return None + r, g, b = rgb + return f"#{r:02X}{g:02X}{b:02X}" + + +def _normalize_plant_code(code: Any) -> str: + """Normalize plant code for cross-day map lookups.""" + if code is None: + return "" + return str(code).strip().upper() + + +class PollenDataUpdateCoordinator(DataUpdateCoordinator): + """Coordinate pollen data fetch with forecast support for TYPES and PLANTS.""" + + def __init__( + self, + hass: HomeAssistant, + api_key: str, + lat: float, + lon: float, + hours: int, + language: str | None, + entry_id: str, + forecast_days: int, + create_d1: bool, + create_d2: bool, + client: GooglePollenApiClient, + entry_title: str = DEFAULT_ENTRY_TITLE, + ): + """Initialize coordinator with configuration and interval.""" + super().__init__( + hass, + _LOGGER, + name=f"{DOMAIN}_{entry_id}", + update_interval=timedelta(hours=hours), + ) + self.api_key = api_key + self.lat = lat + self.lon = lon + + # Normalize language once at runtime: + # - Trim whitespace + # - Use None if empty after normalization (skip sending languageCode) + if isinstance(language, str): + language = language.strip() + self.language = language if language else None + + self.entry_id = entry_id + self.entry_title = entry_title or DEFAULT_ENTRY_TITLE + # Clamp defensively for legacy/manual entries to supported range. + parsed_days = safe_parse_int(forecast_days) + if parsed_days is None: + parsed_days = MIN_FORECAST_DAYS + self.forecast_days = max(MIN_FORECAST_DAYS, min(MAX_FORECAST_DAYS, parsed_days)) + self.create_d1 = create_d1 + self.create_d2 = create_d2 + self._client = client + self._missing_dailyinfo_warned = False + + self.data: dict[str, dict] = {} + self.last_updated = None + + # ------------------------------ + # DRY helper for forecast attrs + # ------------------------------ + def _process_forecast_attributes( + self, base: dict[str, Any], forecast_list: list[dict[str, Any]] + ) -> dict[str, Any]: + """Attach common forecast attributes to a base sensor dict. + + This keeps TYPE and PLANT processing consistent without duplicating code. + + Adds: + - 'forecast' list + - Convenience: tomorrow_* / d2_* + - Derived: trend, expected_peak + + Does NOT touch per-day TYPE sensor creation (kept elsewhere). + """ + base["forecast"] = forecast_list + forecast_by_offset = {item.get("offset"): item for item in forecast_list} + + def _set_convenience(prefix: str, off: int) -> None: + f = forecast_by_offset.get(off) + base[f"{prefix}_has_index"] = f.get("has_index") if f else False + base[f"{prefix}_value"] = ( + f.get("value") if f and f.get("has_index") else None + ) + base[f"{prefix}_category"] = ( + f.get("category") if f and f.get("has_index") else None + ) + base[f"{prefix}_description"] = ( + f.get("description") if f and f.get("has_index") else None + ) + base[f"{prefix}_color_hex"] = ( + f.get("color_hex") if f and f.get("has_index") else None + ) + + _set_convenience("tomorrow", 1) + _set_convenience("d2", 2) + + # Trend (today vs tomorrow) + now_val = base.get("value") + tomorrow_val = base.get("tomorrow_value") + if isinstance(now_val, (int, float)) and isinstance(tomorrow_val, (int, float)): + if tomorrow_val > now_val: + base["trend"] = "up" + elif tomorrow_val < now_val: + base["trend"] = "down" + else: + base["trend"] = "flat" + else: + base["trend"] = None + + # Expected peak (excluding today) + peak = None + for f in forecast_list: + if f.get("has_index") and isinstance(f.get("value"), (int, float)): + if peak is None or f["value"] > peak["value"]: + peak = f + base["expected_peak"] = ( + { + "offset": peak["offset"], + "date": peak["date"], + "value": peak["value"], + "category": peak["category"], + } + if peak + else None + ) + return base + + async def _async_update_data(self): + """Fetch pollen data and extract sensors for current day and forecast.""" + try: + payload = await self._client.async_fetch_pollen_data( + latitude=self.lat, + longitude=self.lon, + days=self.forecast_days, + language_code=self.language, + ) + except ConfigEntryAuthFailed: + raise + except UpdateFailed: + raise + except asyncio.CancelledError: + raise + except Exception as err: # Keep previous behavior for unexpected errors + msg = redact_api_key(err, self.api_key) + _LOGGER.error("Pollen API error: %s", msg) + raise UpdateFailed(msg) from err + + new_data: dict[str, dict] = {} + + # region + if region := payload.get("regionCode"): + new_data["region"] = {"source": "meta", "value": region} + + daily_raw = payload.get("dailyInfo") + daily = daily_raw if isinstance(daily_raw, list) else None + # Keep day offsets stable: if any element is invalid, treat the payload as + # malformed instead of compacting/reindexing list positions. + if daily is not None and any(not isinstance(item, dict) for item in daily): + daily = None + + if not daily: + if self.data: + if not self._missing_dailyinfo_warned: + _LOGGER.warning( + "API response missing or invalid dailyInfo; " + "keeping last successful data" + ) + self._missing_dailyinfo_warned = True + return self.data + raise UpdateFailed("API response missing or invalid dailyInfo") + self._missing_dailyinfo_warned = False + + # date (today) + first_day = daily[0] + date_obj = first_day.get("date", {}) or {} + if all(k in date_obj for k in ("year", "month", "day")): + new_data["date"] = { + "source": "meta", + "value": f"{date_obj['year']:04d}-{date_obj['month']:02d}-{date_obj['day']:02d}", + } + + type_codes: set[str] = set() + type_by_day_code: list[dict[str, dict[str, Any]]] = [] + plant_by_day_code: list[dict[str, dict[str, Any]]] = [] + for day in daily: + day_types: dict[str, dict[str, Any]] = {} + for item in day.get("pollenTypeInfo", []) or []: + if not isinstance(item, dict): + continue + code = (item.get("code") or "").upper() + if code: + day_types[code] = item + type_codes.add(code) + type_by_day_code.append(day_types) + + day_plants: dict[str, dict[str, Any]] = {} + for item in day.get("plantInfo", []) or []: + if not isinstance(item, dict): + continue + code = _normalize_plant_code(item.get("code")) + if code: + day_plants[code] = item + plant_by_day_code.append(day_plants) + + # Current-day TYPES + for tcode in sorted(type_codes): + titem = type_by_day_code[0].get(tcode) or {} + idx_raw = titem.get("indexInfo") + idx = idx_raw if isinstance(idx_raw, dict) else {} + rgb = _rgb_from_api(idx.get("color")) + key = f"type_{tcode.lower()}" + new_data[key] = { + "source": "type", + "value": idx.get("value"), + "category": idx.get("category"), + "displayName": titem.get("displayName", tcode), + "inSeason": titem.get("inSeason"), + "description": idx.get("indexDescription"), + "advice": titem.get("healthRecommendations"), + "color_hex": _rgb_to_hex_triplet(rgb), + "color_rgb": list(rgb) if rgb is not None else None, + } + + plant_keys: list[str] = [] + + # Current-day PLANTS + for _norm_code, pitem in sorted(plant_by_day_code[0].items()): + # NOTE: plant_by_day_code[0] is built using normalized, non-empty plant codes as keys, + # so `_norm_code` is guaranteed to be a stable non-empty identifier. + # We still derive `code` from the raw API field (stripped) for attributes, while + # using lowercased `code` for the sensor key to keep entity creation deterministic. + idx_raw = pitem.get("indexInfo") + idx = idx_raw if isinstance(idx_raw, dict) else {} + desc_raw = pitem.get("plantDescription") + desc = desc_raw if isinstance(desc_raw, dict) else {} + rgb = _rgb_from_api(idx.get("color")) + raw_code = pitem.get("code") + code = str(raw_code).strip() if raw_code is not None else "" + key = f"plants_{code.lower()}" + new_data[key] = { + "source": "plant", + "value": idx.get("value"), + "category": idx.get("category"), + "displayName": pitem.get("displayName", code), + "code": code, + "inSeason": pitem.get("inSeason"), + "type": desc.get("type"), + "family": desc.get("family"), + "season": desc.get("season"), + "cross_reaction": desc.get("crossReaction"), + "description": idx.get("indexDescription"), + "advice": pitem.get("healthRecommendations"), + "color_hex": _rgb_to_hex_triplet(rgb), + "color_rgb": list(rgb) if rgb is not None else None, + "picture": desc.get("picture"), + "picture_closeup": desc.get("pictureCloseup"), + } + plant_keys.append(key) + + # Forecast for TYPES + def _extract_day_info(day: dict) -> tuple[str | None, dict | None]: + d = day.get("date") or {} + if not all(k in d for k in ("year", "month", "day")): + return None, None + return f"{d['year']:04d}-{d['month']:02d}-{d['day']:02d}", d + + for tcode in sorted(type_codes): + type_key = f"type_{tcode.lower()}" + existing = new_data.get(type_key) + needs_skeleton = not existing or ( + existing.get("source") == "type" + and existing.get("value") is None + and existing.get("category") is None + and existing.get("description") is None + ) + base = existing or {} + if needs_skeleton: + base = { + "source": "type", + "displayName": tcode, + "inSeason": None, + "advice": None, + "value": None, + "category": None, + "description": None, + "color_hex": None, + "color_rgb": None, + } + + candidate = None + for day_idx, _day_data in enumerate(daily): + candidate = type_by_day_code[day_idx].get(tcode) + if isinstance(candidate, dict): + base["displayName"] = candidate.get("displayName", tcode) + base["inSeason"] = candidate.get("inSeason") + base["advice"] = candidate.get("healthRecommendations") + break + forecast_list: list[dict[str, Any]] = [] + for offset, day in enumerate(daily[1:], start=1): + if offset >= self.forecast_days: + break + date_str, _ = _extract_day_info(day) + item = type_by_day_code[offset].get(tcode) or {} + idx_raw = item.get("indexInfo") + idx = idx_raw if isinstance(idx_raw, dict) else None + has_index = isinstance(idx_raw, dict) and bool(idx_raw) + rgb = _rgb_from_api(idx.get("color")) if has_index else None + forecast_list.append( + { + "offset": offset, + "date": date_str, + "has_index": has_index, + "value": idx.get("value") if has_index else None, + "category": idx.get("category") if has_index else None, + "description": ( + idx.get("indexDescription") if has_index else None + ), + "color_hex": _rgb_to_hex_triplet(rgb) if has_index else None, + "color_rgb": ( + list(rgb) if (has_index and rgb is not None) else None + ), + } + ) + # Attach common forecast attributes (convenience, trend, expected_peak) + base = self._process_forecast_attributes(base, forecast_list) + new_data[type_key] = base + + # Optional per-day sensors (only if requested and day exists) + def _add_day_sensor( + off: int, + *, + _forecast_list=forecast_list, + _base=base, + _tcode=tcode, + _type_key=type_key, + ) -> None: + """Create a per-day type sensor for a given offset.""" + f = next((d for d in _forecast_list if d["offset"] == off), None) + if not f: + return + + # Use day-specific 'inSeason' and 'advice' from the forecast day. + try: + day_obj = daily[off] + except (IndexError, TypeError): + day_obj = None + day_item = type_by_day_code[off].get(_tcode) if day_obj else None + day_in_season = ( + day_item.get("inSeason") if isinstance(day_item, dict) else None + ) + day_advice = ( + day_item.get("healthRecommendations") + if isinstance(day_item, dict) + else None + ) + + dname = f"{_base.get('displayName', _tcode)} (D+{off})" + new_data[f"{_type_key}_d{off}"] = { + "source": "type", + "displayName": dname, + "value": f.get("value") if f.get("has_index") else None, + "category": f.get("category") if f.get("has_index") else None, + "description": f.get("description") if f.get("has_index") else None, + "inSeason": day_in_season, + "advice": day_advice, + "color_hex": f.get("color_hex"), + "color_rgb": f.get("color_rgb"), + "date": f.get("date"), + "has_index": f.get("has_index"), + } + + if self.create_d1: + _add_day_sensor(1) + if self.create_d2: + _add_day_sensor(2) + + # Forecast for PLANTS (attributes only; no per-day plant sensors) + for key in plant_keys: + base = new_data.get(key) or {} + pcode = _normalize_plant_code(base.get("code")) + if not pcode: + # Safety: skip if for some reason code is missing + continue + + forecast_list: list[dict[str, Any]] = [] + for offset, day in enumerate(daily[1:], start=1): + if offset >= self.forecast_days: + break + date_str, _ = _extract_day_info(day) + item = plant_by_day_code[offset].get(pcode) or {} + idx_raw = item.get("indexInfo") + idx = idx_raw if isinstance(idx_raw, dict) else None + has_index = isinstance(idx_raw, dict) and bool(idx_raw) + rgb = _rgb_from_api(idx.get("color")) if has_index else None + forecast_list.append( + { + "offset": offset, + "date": date_str, + "has_index": has_index, + "value": idx.get("value") if has_index else None, + "category": idx.get("category") if has_index else None, + "description": ( + idx.get("indexDescription") if has_index else None + ), + "color_hex": _rgb_to_hex_triplet(rgb) if has_index else None, + "color_rgb": ( + list(rgb) if (has_index and rgb is not None) else None + ), + } + ) + + # Attach common forecast attributes (convenience, trend, expected_peak) + base = self._process_forecast_attributes(base, forecast_list) + new_data[key] = base + + self.data = new_data + self.last_updated = dt_util.utcnow() + if _LOGGER.isEnabledFor(logging.DEBUG): + total = len(new_data) + types = 0 + plants = 0 + meta = 0 + per_day = 0 + for key, value in new_data.items(): + source = value.get("source") + if source == "type": + types += 1 + elif source == "plant": + plants += 1 + else: + meta += 1 + if key.endswith(("_d1", "_d2")): + per_day += 1 + updated = self.last_updated.isoformat() if self.last_updated else "unknown" + _LOGGER.debug( + "Update complete: entries=%d types=%d plants=%d meta=%d per_day=%d " + "forecast_days=%d d1=%s d2=%s updated=%s", + total, + types, + plants, + meta, + per_day, + self.forecast_days, + self.create_d1, + self.create_d2, + updated, + ) + return self.data diff --git a/custom_components/pollenlevels/diagnostics.py b/custom_components/pollenlevels/diagnostics.py index df7f3270..fccf0fa3 100644 --- a/custom_components/pollenlevels/diagnostics.py +++ b/custom_components/pollenlevels/diagnostics.py @@ -11,7 +11,8 @@ from __future__ import annotations -from typing import Any +import math +from typing import Any, cast from homeassistant.components.diagnostics import async_redact_data from homeassistant.config_entries import ConfigEntry @@ -26,19 +27,17 @@ CONF_LONGITUDE, CONF_UPDATE_INTERVAL, DEFAULT_FORECAST_DAYS, # use constant instead of magic number - DOMAIN, + MAX_FORECAST_DAYS, + MIN_FORECAST_DAYS, ) -from .util import redact_api_key +from .runtime import PollenLevelsRuntimeData +from .util import redact_api_key, safe_parse_int # Redact potentially sensitive values from diagnostics. -# NOTE: Also redact the "location.*" variants used in the request example to avoid -# leaking coordinates in exported diagnostics. TO_REDACT = { CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, - "location.latitude", - "location.longitude", } @@ -57,7 +56,8 @@ async def async_get_config_entry_diagnostics( NOTE: This function must not perform any network I/O. """ - coordinator = hass.data.get(DOMAIN, {}).get(entry.entry_id) + runtime = cast(PollenLevelsRuntimeData | None, getattr(entry, "runtime_data", None)) + coordinator = getattr(runtime, "coordinator", None) if runtime else None options: dict[str, Any] = dict(entry.options or {}) data: dict[str, Any] = dict(entry.data or {}) @@ -65,9 +65,12 @@ async def async_get_config_entry_diagnostics( # coordinates. This should not be redacted. def _rounded(value: Any) -> float | None: try: - return round(float(value), 1) - except (TypeError, ValueError): + f = float(value) + except (TypeError, ValueError, OverflowError): return None + if not math.isfinite(f): + return None + return round(f, 1) approx_location = { "label": "approximate_location (rounded)", @@ -77,26 +80,25 @@ def _rounded(value: Any) -> float | None: # --- Build a safe params example (no network I/O) ---------------------- # Use DEFAULT_FORECAST_DAYS from const.py to avoid config drift. - try: - days_effective = int( - options.get( - CONF_FORECAST_DAYS, - data.get(CONF_FORECAST_DAYS, DEFAULT_FORECAST_DAYS), - ) - ) - except Exception: + days_raw = options.get( + CONF_FORECAST_DAYS, + data.get(CONF_FORECAST_DAYS, DEFAULT_FORECAST_DAYS), + ) + parsed_days = safe_parse_int(days_raw) + if parsed_days is None: # Defensive fallback days_effective = DEFAULT_FORECAST_DAYS + else: + days_effective = parsed_days - # Clamp days to a sensible minimum (avoid 0 or negative in diagnostics) - if days_effective < 1: - days_effective = 1 + days_effective = max(MIN_FORECAST_DAYS, min(MAX_FORECAST_DAYS, days_effective)) params_example: dict[str, Any] = { # Explicitly mask the API key example "key": redact_api_key(data.get(CONF_API_KEY), data.get(CONF_API_KEY)) or "***", - "location.latitude": data.get(CONF_LATITUDE), - "location.longitude": data.get(CONF_LONGITUDE), + # Use rounded coordinates to avoid exposing precise location data. + "location.latitude": _rounded(data.get(CONF_LATITUDE)), + "location.longitude": _rounded(data.get(CONF_LONGITUDE)), "days": days_effective, } lang = options.get(CONF_LANGUAGE_CODE, data.get(CONF_LANGUAGE_CODE)) @@ -115,8 +117,12 @@ def _rounded(value: Any) -> float | None: "create_d1": getattr(coordinator, "create_d1", None), "create_d2": getattr(coordinator, "create_d2", None), "last_updated": _iso_or_none(getattr(coordinator, "last_updated", None)), - "data_keys": list((getattr(coordinator, "data", {}) or {}).keys()), + "data_keys_total": 0, + "data_keys": [], } + all_keys = list((getattr(coordinator, "data", {}) or {}).keys()) + coord_info["data_keys_total"] = len(all_keys) + coord_info["data_keys"] = all_keys[:50] # ---------- Forecast summaries (TYPES & PLANTS) ---------- data_map: dict[str, Any] = getattr(coordinator, "data", {}) or {} @@ -183,8 +189,6 @@ def _rounded(value: Any) -> float | None: CONF_CREATE_FORECAST_SENSORS: options.get(CONF_CREATE_FORECAST_SENSORS), }, "data": { - CONF_LATITUDE: data.get(CONF_LATITUDE), - CONF_LONGITUDE: data.get(CONF_LONGITUDE), CONF_LANGUAGE_CODE: data.get(CONF_LANGUAGE_CODE), }, }, @@ -194,5 +198,7 @@ def _rounded(value: Any) -> float | None: "request_params_example": params_example, } + # NOTE: Home Assistant's `async_redact_data` is a synchronous callback helper + # despite its `async_` prefix. Do not `await` it. # Redact secrets and return return async_redact_data(diag, TO_REDACT) diff --git a/custom_components/pollenlevels/manifest.json b/custom_components/pollenlevels/manifest.json index 9b5577b6..8a6d4eb7 100644 --- a/custom_components/pollenlevels/manifest.json +++ b/custom_components/pollenlevels/manifest.json @@ -7,5 +7,5 @@ "integration_type": "service", "iot_class": "cloud_polling", "issue_tracker": "https://github.com/eXPerience83/pollenlevels/issues", - "version": "1.8.6" + "version": "1.9.3" } diff --git a/custom_components/pollenlevels/runtime.py b/custom_components/pollenlevels/runtime.py new file mode 100644 index 00000000..fbca54c6 --- /dev/null +++ b/custom_components/pollenlevels/runtime.py @@ -0,0 +1,24 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import TYPE_CHECKING + +from homeassistant.config_entries import ConfigEntry + +if TYPE_CHECKING: + from .client import GooglePollenApiClient + from .coordinator import PollenDataUpdateCoordinator + + +@dataclass(slots=True) +class PollenLevelsRuntimeData: + """Runtime container for a Pollen Levels config entry.""" + + coordinator: PollenDataUpdateCoordinator + client: GooglePollenApiClient + + +if TYPE_CHECKING: + PollenLevelsConfigEntry = ConfigEntry[PollenLevelsRuntimeData] +else: + PollenLevelsConfigEntry = ConfigEntry diff --git a/custom_components/pollenlevels/sensor.py b/custom_components/pollenlevels/sensor.py index 2e6d5a73..c2a99272 100644 --- a/custom_components/pollenlevels/sensor.py +++ b/custom_components/pollenlevels/sensor.py @@ -13,12 +13,10 @@ import asyncio import logging -import random from collections.abc import Awaitable -from datetime import date, timedelta # Added `date` for DATE device class native_value -from typing import TYPE_CHECKING, Any - -import aiohttp # For explicit ClientTimeout and ClientError +from datetime import date # Added `date` for DATE device class native_value +from enum import StrEnum +from typing import TYPE_CHECKING, Any, cast # Modern sensor base + enums from homeassistant.components.sensor import ( @@ -27,27 +25,21 @@ SensorStateClass, ) from homeassistant.const import ATTR_ATTRIBUTION -from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady +from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers import entity_registry as er # entity-registry cleanup -from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers.entity import EntityCategory from homeassistant.helpers.update_coordinator import ( CoordinatorEntity, - DataUpdateCoordinator, - UpdateFailed, ) -from homeassistant.util import dt as dt_util if TYPE_CHECKING: - from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from .const import ( + ATTRIBUTION, CONF_API_KEY, - CONF_CREATE_FORECAST_SENSORS, CONF_FORECAST_DAYS, - CONF_LANGUAGE_CODE, CONF_LATITUDE, CONF_LONGITUDE, CONF_UPDATE_INTERVAL, @@ -55,11 +47,24 @@ DEFAULT_FORECAST_DAYS, DEFAULT_UPDATE_INTERVAL, DOMAIN, + MAX_FORECAST_DAYS, + MIN_FORECAST_DAYS, ) -from .util import redact_api_key +from .coordinator import PollenDataUpdateCoordinator +from .runtime import PollenLevelsConfigEntry, PollenLevelsRuntimeData +from .util import safe_parse_int _LOGGER = logging.getLogger(__name__) +__all__ = [ + "CONF_API_KEY", + "CONF_LATITUDE", + "CONF_LONGITUDE", + "CONF_UPDATE_INTERVAL", + "DEFAULT_FORECAST_DAYS", + "DEFAULT_UPDATE_INTERVAL", +] + # ---- Icons --------------------------------------------------------------- TYPE_ICONS = { @@ -72,58 +77,12 @@ DEFAULT_ICON = "mdi:flower-pollen" -def _normalize_channel(v: Any) -> int | None: - """Normalize a single channel to 0..255 (accept 0..1 or 0..255 inputs). - - Returns None if the value cannot be interpreted as a number. - """ - try: - f = float(v) - except (TypeError, ValueError): - return None - if 0.0 <= f <= 1.0: - f *= 255.0 - return max(0, min(255, int(round(f)))) - - -def _rgb_from_api(color: dict[str, Any] | None) -> tuple[int, int, int] | None: - """Build an (R, G, B) tuple from API color dict. - - Rules: - - If color is not a dict, or an empty dict, or has no numeric channels at all, - return None (meaning "no color provided by API"). - - If only some channels are present, missing ones are treated as 0 (black baseline) - but ONLY when at least one channel exists. This preserves partial colors like - {green, blue} without inventing a color for {}. - """ - if not isinstance(color, dict) or not color: - return None - - # Check if any of the channels is actually provided as numeric - has_any_channel = any( - isinstance(color.get(k), (int, float)) for k in ("red", "green", "blue") - ) - if not has_any_channel: - return None - - r = _normalize_channel(color.get("red")) - g = _normalize_channel(color.get("green")) - b = _normalize_channel(color.get("blue")) +class ForecastSensorMode(StrEnum): + """Options for forecast sensor creation.""" - # If all channels are None, treat as no color - if r is None and g is None and b is None: - return None - - # Replace missing channels with 0 (only when at least one exists) - return (r or 0, g or 0, b or 0) - - -def _rgb_to_hex_triplet(rgb: tuple[int, int, int] | None) -> str | None: - """Convert (R,G,B) 0..255 to #RRGGBB.""" - if rgb is None: - return None - r, g, b = rgb - return f"#{r:02X}{g:02X}{b:02X}" + NONE = "none" + D1 = "D+1" + D1_D2 = "D+1+2" async def _cleanup_per_day_entities( @@ -186,63 +145,35 @@ def _matches(uid: str, suffix: str) -> bool: async def async_setup_entry( hass: HomeAssistant, - config_entry: ConfigEntry, + config_entry: PollenLevelsConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Create coordinator and build sensors.""" - api_key = config_entry.data.get(CONF_API_KEY) - if not api_key: - _LOGGER.warning( - "Config entry %s is missing the API key; prompting reauthentication", - config_entry.entry_id, - ) - raise ConfigEntryAuthFailed("Missing API key in config entry") - # Config flow already enforces type and range on coordinates; missing values here - # would indicate a corrupted entry, so we only guard for presence. - lat = config_entry.data.get(CONF_LATITUDE) - lon = config_entry.data.get(CONF_LONGITUDE) - if lat is None or lon is None: + runtime = cast( + PollenLevelsRuntimeData | None, getattr(config_entry, "runtime_data", None) + ) + if runtime is None: + raise ConfigEntryNotReady("Runtime data not ready") + coordinator = runtime.coordinator + + opts = config_entry.options or {} + raw_days = opts.get(CONF_FORECAST_DAYS, coordinator.forecast_days) + parsed = safe_parse_int(raw_days) + if parsed is None: _LOGGER.warning( - "Config entry %s is missing coordinates; delaying setup until entry is complete", + "Invalid forecast_days '%s' for entry %s; defaulting to %s", + raw_days, config_entry.entry_id, + coordinator.forecast_days, ) - raise ConfigEntryNotReady("Missing coordinates in config entry") + forecast_days = parsed if parsed is not None else coordinator.forecast_days + forecast_days = max(MIN_FORECAST_DAYS, min(MAX_FORECAST_DAYS, forecast_days)) + create_d1 = coordinator.create_d1 + create_d2 = coordinator.create_d2 - opts = config_entry.options or {} - interval = opts.get( - CONF_UPDATE_INTERVAL, - config_entry.data.get(CONF_UPDATE_INTERVAL, DEFAULT_UPDATE_INTERVAL), - ) - lang = opts.get(CONF_LANGUAGE_CODE, config_entry.data.get(CONF_LANGUAGE_CODE)) - forecast_days = int(opts.get(CONF_FORECAST_DAYS, DEFAULT_FORECAST_DAYS)) - - # Map unified selector to internal flags - mode = opts.get(CONF_CREATE_FORECAST_SENSORS, "none") - create_d1 = mode == "D+1" or mode == "D+1+2" - create_d2 = mode == "D+1+2" - - # Decide if per-day entities are allowed *given current options* allow_d1 = create_d1 and forecast_days >= 2 allow_d2 = create_d2 and forecast_days >= 3 - raw_title = config_entry.title or "" - clean_title = raw_title.strip() or DEFAULT_ENTRY_TITLE - - coordinator = PollenDataUpdateCoordinator( - hass=hass, - api_key=api_key, - lat=lat, - lon=lon, - hours=interval, - language=lang, # normalized in the coordinator - entry_id=config_entry.entry_id, - entry_title=clean_title, - forecast_days=forecast_days, - create_d1=allow_d1, # pass effective flags - create_d2=allow_d2, - ) - await coordinator.async_config_entry_first_refresh() - data = coordinator.data or {} has_daily = ("date" in data) or any( key.startswith(("type_", "plants_")) for key in data @@ -257,12 +188,14 @@ async def async_setup_entry( hass, config_entry.entry_id, allow_d1=allow_d1, allow_d2=allow_d2 ) - hass.data.setdefault(DOMAIN, {})[config_entry.entry_id] = coordinator - sensors: list[CoordinatorEntity] = [] - for code in coordinator.data: + for code in data: if code in ("region", "date"): continue + if code.endswith("_d1") and not allow_d1: + continue + if code.endswith("_d2") and not allow_d2: + continue sensors.append(PollenSensor(coordinator, code)) sensors.extend( @@ -273,516 +206,18 @@ async def async_setup_entry( ] ) - _LOGGER.debug( - "Creating %d sensors: %s", len(sensors), [s.unique_id for s in sensors] - ) - async_add_entities(sensors, True) - - -class PollenDataUpdateCoordinator(DataUpdateCoordinator): - """Coordinate pollen data fetch with forecast support for TYPES and PLANTS.""" - - def __init__( - self, - hass: HomeAssistant, - api_key: str, - lat: float, - lon: float, - hours: int, - language: str | None, - entry_id: str, - forecast_days: int, - create_d1: bool, - create_d2: bool, - entry_title: str = DEFAULT_ENTRY_TITLE, - ): - """Initialize coordinator with configuration and interval.""" - super().__init__( - hass, - _LOGGER, - name=f"{DOMAIN}_{entry_id}", - update_interval=timedelta(hours=hours), - ) - self.api_key = api_key - self.lat = lat - self.lon = lon - - # Normalize language once at runtime: - # - Trim whitespace - # - Use None if empty after normalization (skip sending languageCode) - if isinstance(language, str): - language = language.strip() - self.language = language if language else None - - self.entry_id = entry_id - self.entry_title = entry_title or DEFAULT_ENTRY_TITLE - # Options flow restricts this range; no runtime clamping needed. - self.forecast_days = int(forecast_days) - self.create_d1 = create_d1 - self.create_d2 = create_d2 - - self.data: dict[str, dict] = {} - self.last_updated = None - self._session = async_get_clientsession(hass) - - # ------------------------------ - # DRY helper for forecast attrs - # ------------------------------ - def _process_forecast_attributes( - self, base: dict[str, Any], forecast_list: list[dict[str, Any]] - ) -> dict[str, Any]: - """Attach common forecast attributes to a base sensor dict. - - This keeps TYPE and PLANT processing consistent without duplicating code. - - Adds: - - 'forecast' list - - Convenience: tomorrow_* / d2_* - - Derived: trend, expected_peak - - Does NOT touch per-day TYPE sensor creation (kept elsewhere). - """ - base["forecast"] = forecast_list - - def _set_convenience(prefix: str, off: int) -> None: - f = next((d for d in forecast_list if d["offset"] == off), None) - base[f"{prefix}_has_index"] = f.get("has_index") if f else False - base[f"{prefix}_value"] = ( - f.get("value") if f and f.get("has_index") else None - ) - base[f"{prefix}_category"] = ( - f.get("category") if f and f.get("has_index") else None - ) - base[f"{prefix}_description"] = ( - f.get("description") if f and f.get("has_index") else None - ) - base[f"{prefix}_color_hex"] = ( - f.get("color_hex") if f and f.get("has_index") else None - ) - - _set_convenience("tomorrow", 1) - _set_convenience("d2", 2) - - # Trend (today vs tomorrow) - now_val = base.get("value") - tomorrow_val = base.get("tomorrow_value") - if isinstance(now_val, (int, float)) and isinstance(tomorrow_val, (int, float)): - if tomorrow_val > now_val: - base["trend"] = "up" - elif tomorrow_val < now_val: - base["trend"] = "down" - else: - base["trend"] = "flat" - else: - base["trend"] = None - - # Expected peak (excluding today) - peak = None - for f in forecast_list: - if f.get("has_index") and isinstance(f.get("value"), (int, float)): - if peak is None or f["value"] > peak["value"]: - peak = f - base["expected_peak"] = ( - { - "offset": peak["offset"], - "date": peak["date"], - "value": peak["value"], - "category": peak["category"], - } - if peak - else None - ) - return base - - async def _async_update_data(self): - """Fetch pollen data and extract sensors for current day and forecast.""" - url = "https://pollen.googleapis.com/v1/forecast:lookup" - params = { - "key": self.api_key, - "location.latitude": f"{self.lat:.6f}", - "location.longitude": f"{self.lon:.6f}", - "days": self.forecast_days, - } - if self.language: - params["languageCode"] = self.language - - # SECURITY: Do not log request parameters (avoid coords/key leakage) + if _LOGGER.isEnabledFor(logging.DEBUG): + ids = [getattr(s, "unique_id", None) for s in sensors] + preview = ids[:10] + extra = max(0, len(ids) - len(preview)) + suffix = f", +{extra} more" if extra else "" _LOGGER.debug( - "Fetching forecast (days=%s, lang_set=%s)", - self.forecast_days, - bool(self.language), + "Creating %d sensors (preview=%s%s)", + len(ids), + preview, + suffix, ) - - # --- Minimal, safe retry policy (single retry) ----------------------- - max_retries = 1 # Keep it minimal to reduce cost/latency - for attempt in range(0, max_retries + 1): - try: - # Explicit total timeout for network call - async with self._session.get( - url, params=params, timeout=aiohttp.ClientTimeout(total=10) - ) as resp: - # Non-retryable auth logic first - if resp.status == 403: - raise ConfigEntryAuthFailed("Invalid API key") - - # 429: may be transient — respect Retry-After if present - if resp.status == 429: - if attempt < max_retries: - retry_after_raw = resp.headers.get("Retry-After") - delay = 2.0 - if retry_after_raw: - try: - delay = float(retry_after_raw) - except (TypeError, ValueError): - delay = 2.0 - # Cap delay and add small jitter to avoid herding - delay = min(delay, 5.0) + random.uniform(0.0, 0.4) - _LOGGER.warning( - "Pollen API 429 — retrying in %.2fs (attempt %d/%d)", - delay, - attempt + 1, - max_retries, - ) - await asyncio.sleep(delay) - continue - raise UpdateFailed("Quota exceeded") - - # 5xx -> retry once with short backoff - if 500 <= resp.status <= 599: - if attempt < max_retries: - delay = 0.8 * (2**attempt) + random.uniform(0.0, 0.3) - _LOGGER.warning( - "Pollen API HTTP %s — retrying in %.2fs (attempt %d/%d)", - resp.status, - delay, - attempt + 1, - max_retries, - ) - await asyncio.sleep(delay) - continue - raise UpdateFailed(f"HTTP {resp.status}") - - # Other 4xx (client errors except 403/429) are not retried - if 400 <= resp.status < 500 and resp.status not in (403, 429): - raise UpdateFailed(f"HTTP {resp.status}") - - if resp.status != 200: - raise UpdateFailed(f"HTTP {resp.status}") - - payload = await resp.json() - break # exit retry loop on success - - except ConfigEntryAuthFailed: - raise - except TimeoutError as err: - # Catch built-in TimeoutError; on modern Python (3.11+) this also - # covers asyncio.TimeoutError. - if attempt < max_retries: - delay = 0.8 * (2**attempt) + random.uniform(0.0, 0.3) - _LOGGER.warning( - "Pollen API timeout — retrying in %.2fs (attempt %d/%d)", - delay, - attempt + 1, - max_retries, - ) - await asyncio.sleep(delay) - continue - msg = redact_api_key(err, self.api_key) - if not msg: - msg = "Google Pollen API call timed out" - raise UpdateFailed(f"Timeout: {msg}") from err - - except aiohttp.ClientError as err: - # Transient client-side issues (DNS reset, connector errors, etc.) - if attempt < max_retries: - delay = 0.8 * (2**attempt) + random.uniform(0.0, 0.3) - _LOGGER.warning( - "Network error to Pollen API — retrying in %.2fs (attempt %d/%d)", - delay, - attempt + 1, - max_retries, - ) - await asyncio.sleep(delay) - continue - msg = redact_api_key(err, self.api_key) - if not msg: - msg = "Network error while calling the Google Pollen API" - raise UpdateFailed(msg) from err - - except Exception as err: # Keep previous behavior for unexpected errors - msg = redact_api_key(err, self.api_key) - _LOGGER.error("Pollen API error: %s", msg) - raise UpdateFailed(msg) from err - # -------------------------------------------------------------------- - - new_data: dict[str, dict] = {} - - # region - if region := payload.get("regionCode"): - new_data["region"] = {"source": "meta", "value": region} - - daily: list[dict] = payload.get("dailyInfo") or [] - if not daily: - self.data = new_data - self.last_updated = dt_util.utcnow() - return self.data - - # date (today) - first_day = daily[0] - date_obj = first_day.get("date", {}) or {} - if all(k in date_obj for k in ("year", "month", "day")): - new_data["date"] = { - "source": "meta", - "value": f"{date_obj['year']:04d}-{date_obj['month']:02d}-{date_obj['day']:02d}", - } - - # collect type codes found in any day - type_codes: set[str] = set() - for day in daily: - for item in day.get("pollenTypeInfo", []) or []: - code = (item.get("code") or "").upper() - if code: - type_codes.add(code) - - def _find_type(day: dict, code: str) -> dict | None: - """Find a pollen TYPE entry by code inside a day's 'pollenTypeInfo'.""" - for item in day.get("pollenTypeInfo", []) or []: - if (item.get("code") or "").upper() == code: - return item - return None - - def _find_plant(day: dict, code: str) -> dict | None: - """Find a PLANT entry by code inside a day's 'plantInfo'.""" - for item in day.get("plantInfo", []) or []: - if (item.get("code") or "") == code: - return item - return None - - # Current-day TYPES - for tcode in type_codes: - titem = _find_type(first_day, tcode) or {} - idx = (titem.get("indexInfo") or {}) if isinstance(titem, dict) else {} - rgb = _rgb_from_api(idx.get("color")) - key = f"type_{tcode.lower()}" - new_data[key] = { - "source": "type", - "value": idx.get("value"), - "category": idx.get("category"), - "displayName": titem.get("displayName", tcode), - "inSeason": titem.get("inSeason"), - "description": idx.get("indexDescription"), - "advice": titem.get("healthRecommendations"), - "color_hex": _rgb_to_hex_triplet(rgb), - "color_rgb": list(rgb) if rgb is not None else None, - "color_raw": ( - idx.get("color") if isinstance(idx.get("color"), dict) else None - ), - } - - # Current-day PLANTS - for pitem in first_day.get("plantInfo", []) or []: - code = pitem.get("code") - # Safety: skip plants without a stable 'code' to avoid duplicate 'plants_' keys - # and silent overwrites. This is robust and avoids creating unstable entities. - if not code: - continue - idx = pitem.get("indexInfo", {}) or {} - desc = pitem.get("plantDescription", {}) or {} - rgb = _rgb_from_api(idx.get("color")) - key = f"plants_{(code or '').lower()}" - new_data[key] = { - "source": "plant", - "value": idx.get("value"), - "category": idx.get("category"), - "displayName": pitem.get("displayName", code), - "code": code, - "inSeason": pitem.get("inSeason"), - "type": desc.get("type"), - "family": desc.get("family"), - "season": desc.get("season"), - "cross_reaction": desc.get("crossReaction"), - "description": idx.get("indexDescription"), - "advice": pitem.get("healthRecommendations"), - "color_hex": _rgb_to_hex_triplet(rgb), - "color_rgb": list(rgb) if rgb is not None else None, - "color_raw": ( - idx.get("color") if isinstance(idx.get("color"), dict) else None - ), - "picture": desc.get("picture"), - "picture_closeup": desc.get("pictureCloseup"), - } - - # Forecast for TYPES - def _extract_day_info(day: dict) -> tuple[str | None, dict | None]: - d = day.get("date") or {} - if not all(k in d for k in ("year", "month", "day")): - return None, None - return f"{d['year']:04d}-{d['month']:02d}-{d['day']:02d}", d - - for tcode in type_codes: - type_key = f"type_{tcode.lower()}" - existing = new_data.get(type_key) - needs_skeleton = not existing or ( - existing.get("source") == "type" - and existing.get("value") is None - and existing.get("category") is None - and existing.get("description") is None - ) - base = existing or {} - if needs_skeleton: - base = { - "source": "type", - "displayName": tcode, - "inSeason": None, - "advice": None, - "value": None, - "category": None, - "description": None, - "color_hex": None, - "color_rgb": None, - "color_raw": None, - } - - candidate = None - for day_data in daily: - candidate = _find_type(day_data, tcode) - if isinstance(candidate, dict): - base["displayName"] = candidate.get("displayName", tcode) - base["inSeason"] = candidate.get("inSeason") - base["advice"] = candidate.get("healthRecommendations") - break - forecast_list: list[dict[str, Any]] = [] - for offset, day in enumerate(daily[1:], start=1): - if offset >= self.forecast_days: - break - date_str, _ = _extract_day_info(day) - item = _find_type(day, tcode) or {} - idx = item.get("indexInfo") if isinstance(item, dict) else None - has_index = isinstance(idx, dict) - rgb = _rgb_from_api(idx.get("color")) if has_index else None - forecast_list.append( - { - "offset": offset, - "date": date_str, - "has_index": has_index, - "value": idx.get("value") if has_index else None, - "category": idx.get("category") if has_index else None, - "description": ( - idx.get("indexDescription") if has_index else None - ), - "color_hex": _rgb_to_hex_triplet(rgb) if has_index else None, - "color_rgb": ( - list(rgb) if (has_index and rgb is not None) else None - ), - "color_raw": ( - idx.get("color") - if has_index and isinstance(idx.get("color"), dict) - else None - ), - } - ) - # Attach common forecast attributes (convenience, trend, expected_peak) - base = self._process_forecast_attributes(base, forecast_list) - new_data[type_key] = base - - # Optional per-day sensors (only if requested and day exists) - def _add_day_sensor( - off: int, - *, - _forecast_list=forecast_list, - _base=base, - _tcode=tcode, - _type_key=type_key, - ) -> None: - """Create a per-day type sensor for a given offset.""" - f = next((d for d in _forecast_list if d["offset"] == off), None) - if not f: - return - - # Use day-specific 'inSeason' and 'advice' from the forecast day. - try: - day_obj = daily[off] - except (IndexError, TypeError): - day_obj = None - day_item = _find_type(day_obj, _tcode) if day_obj else None - day_in_season = ( - day_item.get("inSeason") if isinstance(day_item, dict) else None - ) - day_advice = ( - day_item.get("healthRecommendations") - if isinstance(day_item, dict) - else None - ) - - dname = f"{_base.get('displayName', _tcode)} (D+{off})" - new_data[f"{_type_key}_d{off}"] = { - "source": "type", - "displayName": dname, - "value": f.get("value") if f.get("has_index") else None, - "category": f.get("category") if f.get("has_index") else None, - "description": f.get("description") if f.get("has_index") else None, - "inSeason": day_in_season, - "advice": day_advice, - "color_hex": f.get("color_hex"), - "color_rgb": f.get("color_rgb"), - "color_raw": f.get("color_raw"), - "date": f.get("date"), - "has_index": f.get("has_index"), - } - - if self.create_d1: - _add_day_sensor(1) - if self.create_d2: - _add_day_sensor(2) - - # Forecast for PLANTS (attributes only; no per-day plant sensors) - for key, base in list(new_data.items()): - if base.get("source") != "plant": - continue - pcode = base.get("code") - if not pcode: - # Safety: skip if for some reason code is missing - continue - - forecast_list: list[dict[str, Any]] = [] - for offset, day in enumerate(daily[1:], start=1): - if offset >= self.forecast_days: - break - date_str, _ = _extract_day_info(day) - item = _find_plant(day, pcode) or {} - idx = item.get("indexInfo") if isinstance(item, dict) else None - has_index = isinstance(idx, dict) - rgb = _rgb_from_api(idx.get("color")) if has_index else None - forecast_list.append( - { - "offset": offset, - "date": date_str, - "has_index": has_index, - "value": idx.get("value") if has_index else None, - "category": idx.get("category") if has_index else None, - "description": ( - idx.get("indexDescription") if has_index else None - ), - "color_hex": _rgb_to_hex_triplet(rgb) if has_index else None, - "color_rgb": ( - list(rgb) if (has_index and rgb is not None) else None - ), - "color_raw": ( - idx.get("color") - if has_index and isinstance(idx.get("color"), dict) - else None - ), - } - ) - - # Attach common forecast attributes (convenience, trend, expected_peak) - base = self._process_forecast_attributes(base, forecast_list) - new_data[key] = base - - self.data = new_data - self.last_updated = dt_util.utcnow() - _LOGGER.debug("Updated data: %s", self.data) - return self.data + async_add_entities(sensors, True) class PollenSensor(CoordinatorEntity, SensorEntity): @@ -836,7 +271,7 @@ def extra_state_attributes(self): attrs = { "category": info.get("category"), # Always include explicit public attribution on all pollen sensors. - ATTR_ATTRIBUTION: "Data provided by Google Maps Pollen API", + ATTR_ATTRIBUTION: ATTRIBUTION, } for k in ( @@ -845,7 +280,6 @@ def extra_state_attributes(self): "advice", "color_hex", "color_rgb", - "color_raw", "date", "has_index", ): @@ -921,6 +355,14 @@ def device_info(self): """Return device info with translation support for the group.""" info = self.coordinator.data.get(self.code, {}) or {} group = info.get("source") + if not group: + if self.code.startswith("type_"): + group = "type" + elif self.code.startswith(("plant_", "plants_")): + group = "plant" + else: + group = "meta" + device_id = f"{self.coordinator.entry_id}_{group}" translation_keys = {"type": "types", "plant": "plants", "meta": "info"} translation_key = translation_keys.get(group, "info") @@ -969,7 +411,7 @@ def extra_state_attributes(self) -> dict[str, Any] | None: This mirrors PollenSensor's attribution so *all* sensors in this integration consistently show the data source. """ - return {ATTR_ATTRIBUTION: "Data provided by Google Maps Pollen API"} + return {ATTR_ATTRIBUTION: ATTRIBUTION} class RegionSensor(_BaseMetaSensor): diff --git a/custom_components/pollenlevels/services.yaml b/custom_components/pollenlevels/services.yaml index 0e2b22df..1d590a46 100644 --- a/custom_components/pollenlevels/services.yaml +++ b/custom_components/pollenlevels/services.yaml @@ -3,4 +3,6 @@ # Exposing a name improves discoverability in Developer Tools. force_update: + name: Force Update + description: Manually refresh pollen data for all configured locations. fields: {} diff --git a/custom_components/pollenlevels/translations/ca.json b/custom_components/pollenlevels/translations/ca.json index a2c68eca..4b3f97d2 100644 --- a/custom_components/pollenlevels/translations/ca.json +++ b/custom_components/pollenlevels/translations/ca.json @@ -3,13 +3,15 @@ "step": { "user": { "title": "Configuració de Nivells de pol·len", - "description": "Introdueix la teva clau d’API de Google, selecciona la teva ubicació al mapa, l’interval d’actualització (hores) i el codi d’idioma de la resposta de l’API.", + "description": "Introdueix la teva clau API de Google ([aconsegueix-la aquí]({api_key_url})) i revisa les bones pràctiques ([bones pràctiques]({restricting_api_keys_url})). Selecciona la ubicació al mapa, l’interval d’actualització (hores) i el codi d’idioma de la resposta de l’API. També pots definir els dies de previsió i l'abast dels sensors per dia (TIPUS).", "data": { "api_key": "Clau API", "name": "Nom", "location": "Ubicació", "update_interval": "Interval d’actualització (hores)", - "language_code": "Codi d’idioma de la resposta de l’API" + "language_code": "Codi d’idioma de la resposta de l’API", + "forecast_days": "Dies de previsió (1–5)", + "create_forecast_sensors": "Abast dels sensors per dia (TIPUS)" } }, "reauth_confirm": { @@ -21,15 +23,16 @@ } }, "error": { - "invalid_auth": "Clau API no vàlida", + "invalid_auth": "Clau API no vàlida\n\n{error_message}", "cannot_connect": "No es pot connectar al servei de pol·len.\n\n{error_message}", - "quota_exceeded": "Quota excedida", - "invalid_language": "Codi d’idioma no vàlid", + "quota_exceeded": "Quota excedida\n\n{error_message}", "invalid_language_format": "Utilitza un codi BCP-47 canònic com \"en\" o \"es-ES\".", "empty": "Aquest camp no pot estar buit", "invalid_option_combo": "Augmenta els 'Dies de previsió' per cobrir els sensors per dia seleccionats.", "invalid_coordinates": "Selecciona una ubicació vàlida al mapa.", - "unknown": "Error desconegut" + "unknown": "Error desconegut", + "invalid_update_interval": "L’interval d’actualització ha d’estar entre 1 i 24 hores.", + "invalid_forecast_days": "Els dies de previsió han d’estar entre 1 i 5." }, "abort": { "already_configured": "Aquesta ubicació ja està configurada.", @@ -41,7 +44,7 @@ "step": { "init": { "title": "Pollen Levels – Opcions", - "description": "Canvia l’interval d’actualització, l’idioma de resposta de l’API, els dies de previsió i els sensors per dia per a {title}.\nOpcions de sensors per dia (TIPUS): Només avui (none), Fins demà (D+1), Fins demà passat (D+2).", + "description": "Canvia l’interval d’actualització, l’idioma de resposta de l’API, els dies de previsió i els sensors per dia per a {title}.\nOpcions de sensors per dia (TIPUS): Només avui (none), Fins demà (D+1), Fins demà passat (D+2; crea tant els sensors D+1 com D+2).", "data": { "update_interval": "Interval d’actualització (hores)", "language_code": "Codi d’idioma de la resposta de l’API", @@ -51,14 +54,12 @@ } }, "error": { - "invalid_auth": "Clau API no vàlida", - "cannot_connect": "No es pot connectar al servei de pol·len.", - "quota_exceeded": "Quota excedida", - "invalid_language": "Codi d’idioma no vàlid", "invalid_language_format": "Utilitza un codi BCP-47 canònic com \"en\" o \"es-ES\".", "empty": "Aquest camp no pot estar buit", "invalid_option_combo": "Augmenta els 'Dies de previsió' per cobrir els sensors per dia seleccionats.", - "unknown": "Error desconegut" + "unknown": "Error desconegut", + "invalid_update_interval": "L’interval d’actualització ha d’estar entre 1 i 24 hores.", + "invalid_forecast_days": "Els dies de previsió han d’estar entre 1 i 5." } }, "device": { diff --git a/custom_components/pollenlevels/translations/cs.json b/custom_components/pollenlevels/translations/cs.json index 35c3b65a..1d451730 100644 --- a/custom_components/pollenlevels/translations/cs.json +++ b/custom_components/pollenlevels/translations/cs.json @@ -3,13 +3,15 @@ "step": { "user": { "title": "Konfigurace úrovní pylu", - "description": "Zadejte svůj klíč Google API, vyberte svou polohu na mapě, interval aktualizace (hodiny) a kód jazyka pro odpověď API.", + "description": "Zadejte svůj Google API klíč ([získejte jej zde]({api_key_url})) a přečtěte si doporučené postupy ([doporučené postupy]({restricting_api_keys_url})). Vyberte polohu na mapě, interval aktualizace (v hodinách) a jazykový kód odpovědi API. Můžete také nastavit dny předpovědi a rozsah senzorů po dnech (TYPY).", "data": { "api_key": "Klíč API", "name": "Název", "location": "Poloha", "update_interval": "Interval aktualizace (hodiny)", - "language_code": "Kód jazyka odpovědi API" + "language_code": "Kód jazyka odpovědi API", + "forecast_days": "Dny předpovědi (1–5)", + "create_forecast_sensors": "Rozsah senzorů po dnech (TYPY)" } }, "reauth_confirm": { @@ -21,15 +23,16 @@ } }, "error": { - "invalid_auth": "Neplatný klíč API", - "cannot_connect": "Nelze se připojit ke službě", - "quota_exceeded": "Překročena kvóta", - "invalid_language": "Neplatný kód jazyka", + "invalid_auth": "Neplatný klíč API\n\n{error_message}", + "cannot_connect": "Nelze se připojit ke službě\n\n{error_message}", + "quota_exceeded": "Překročena kvóta\n\n{error_message}", "invalid_language_format": "Použijte kanonický kód BCP-47, například \"en\" nebo \"es-ES\".", "empty": "Toto pole nemůže být prázdné", "invalid_option_combo": "Zvyšte „Dny předpovědi“, aby pokryly vybrané senzory po dnech.", "invalid_coordinates": "Vyberte platné umístění na mapě.", - "unknown": "Neznámá chyba" + "unknown": "Neznámá chyba", + "invalid_update_interval": "Interval aktualizace musí být mezi 1 a 24 hodinami.", + "invalid_forecast_days": "Dny předpovědi musí být v rozmezí 1–5." }, "abort": { "already_configured": "Toto umístění je již nakonfigurováno.", @@ -41,7 +44,7 @@ "step": { "init": { "title": "Pollen Levels – Možnosti", - "description": "Změňte interval aktualizace, jazyk API, dny předpovědi a senzory po dnech pro {title}.\nMožnosti senzorů po dnech (TYPY): Pouze dnes (none), Do zítra (D+1), Do pozítří (D+2).", + "description": "Změňte interval aktualizace, jazyk API, dny předpovědi a senzory po dnech pro {title}.\nMožnosti senzorů po dnech (TYPY): Pouze dnes (none), Do zítra (D+1), Do pozítří (D+2; vytvoří senzory D+1 i D+2).", "data": { "update_interval": "Interval aktualizace (hodiny)", "language_code": "Kód jazyka odpovědi API", @@ -51,14 +54,12 @@ } }, "error": { - "invalid_auth": "Neplatný klíč API", - "cannot_connect": "Nelze se připojit ke službě", - "quota_exceeded": "Překročena kvóta", - "invalid_language": "Neplatný kód jazyka", "invalid_language_format": "Použijte kanonický kód BCP-47, například \"en\" nebo \"es-ES\".", "empty": "Toto pole nemůže být prázdné", "invalid_option_combo": "Zvyšte „Dny předpovědi“, aby pokryly vybrané senzory po dnech.", - "unknown": "Neznámá chyba" + "unknown": "Neznámá chyba", + "invalid_update_interval": "Interval aktualizace musí být mezi 1 a 24 hodinami.", + "invalid_forecast_days": "Dny předpovědi musí být v rozmezí 1–5." } }, "device": { diff --git a/custom_components/pollenlevels/translations/da.json b/custom_components/pollenlevels/translations/da.json index 3cfc6ceb..2ddc2d76 100644 --- a/custom_components/pollenlevels/translations/da.json +++ b/custom_components/pollenlevels/translations/da.json @@ -3,13 +3,15 @@ "step": { "user": { "title": "Konfiguration af pollenniveauer", - "description": "Angiv din Google API-nøgle, vælg din placering på kortet, opdateringsinterval (timer) og sprogkode for API-svar.", + "description": "Indtast din Google API-nøgle ([hent den her]({api_key_url})) og læs bedste praksis ([bedste praksis]({restricting_api_keys_url})). Vælg din placering på kortet, opdateringsinterval (timer) og sprogkode for API-svaret. Du kan også angive prognosedage og omfanget af sensorer pr. dag (TYPER).", "data": { "api_key": "API-nøgle", "name": "Navn", "location": "Placering", "update_interval": "Opdateringsinterval (timer)", - "language_code": "Sprogkode for API-svar" + "language_code": "Sprogkode for API-svar", + "forecast_days": "Prognosedage (1–5)", + "create_forecast_sensors": "Omfang af sensorer pr. dag (TYPER)" } }, "reauth_confirm": { @@ -21,15 +23,16 @@ } }, "error": { - "invalid_auth": "Ugyldig API-nøgle", - "cannot_connect": "Kan ikke oprette forbindelse til tjenesten", - "quota_exceeded": "Kvote overskredet", - "invalid_language": "Ugyldig sprogkode", + "invalid_auth": "Ugyldig API-nøgle\n\n{error_message}", + "cannot_connect": "Kan ikke oprette forbindelse til tjenesten\n\n{error_message}", + "quota_exceeded": "Kvote overskredet\n\n{error_message}", "invalid_language_format": "Brug en kanonisk BCP-47-kode som \"en\" eller \"es-ES\".", "empty": "Dette felt må ikke være tomt", "invalid_option_combo": "Forøg \"Prognosedage\" for at dække valgte sensorer pr. dag.", "invalid_coordinates": "Vælg en gyldig placering på kortet.", - "unknown": "Ukendt fejl" + "unknown": "Ukendt fejl", + "invalid_update_interval": "Opdateringsintervallet skal være mellem 1 og 24 timer.", + "invalid_forecast_days": "Prognosedage skal være mellem 1 og 5." }, "abort": { "already_configured": "Denne placering er allerede konfigureret.", @@ -41,7 +44,7 @@ "step": { "init": { "title": "Pollen Levels – Indstillinger", - "description": "Skift opdateringsinterval, API-sprog, prognosedage og sensorer pr. dag for {title}.\nIndstillinger for sensorer pr. dag (TYPER): Kun i dag (none), Til og med i morgen (D+1), Til og med overmorgen (D+2).", + "description": "Skift opdateringsinterval, API-sprog, prognosedage og sensorer pr. dag for {title}.\nIndstillinger for sensorer pr. dag (TYPER): Kun i dag (none), Til og med i morgen (D+1), Til og med overmorgen (D+2; opretter både D+1- og D+2-sensorer).", "data": { "update_interval": "Opdateringsinterval (timer)", "language_code": "Sprogkode for API-svar", @@ -51,14 +54,12 @@ } }, "error": { - "invalid_auth": "Ugyldig API-nøgle", - "cannot_connect": "Kan ikke oprette forbindelse til tjenesten", - "quota_exceeded": "Kvote overskredet", - "invalid_language": "Ugyldig sprogkode", "invalid_language_format": "Brug en kanonisk BCP-47-kode som \"en\" eller \"es-ES\".", "empty": "Dette felt må ikke være tomt", "invalid_option_combo": "Forøg \"Prognosedage\" for at dække valgte sensorer pr. dag.", - "unknown": "Ukendt fejl" + "unknown": "Ukendt fejl", + "invalid_update_interval": "Opdateringsintervallet skal være mellem 1 og 24 timer.", + "invalid_forecast_days": "Prognosedage skal være mellem 1 og 5." } }, "device": { diff --git a/custom_components/pollenlevels/translations/de.json b/custom_components/pollenlevels/translations/de.json index b260013c..70b8a0d2 100644 --- a/custom_components/pollenlevels/translations/de.json +++ b/custom_components/pollenlevels/translations/de.json @@ -3,13 +3,15 @@ "step": { "user": { "title": "Pollen Levels – Konfiguration", - "description": "Gib deinen Google API-Schlüssel an, wähle deinen Standort auf der Karte, das Aktualisierungsintervall (Stunden) und den Sprachcode für die API-Antwort.", + "description": "Gib deinen Google API-Schlüssel ein ([hier abrufen]({api_key_url})) und lies die Best Practices ([Best Practices]({restricting_api_keys_url})). Wähle deinen Standort auf der Karte, das Aktualisierungsintervall (Stunden) und den Sprachcode der API-Antwort. Sie können auch Vorhersagetage und den Umfang der Tagessensoren (TYPEN) festlegen.", "data": { "api_key": "API-Schlüssel", "name": "Name", "location": "Standort", "update_interval": "Aktualisierungsintervall (Stunden)", - "language_code": "Sprachcode für die API-Antwort" + "language_code": "Sprachcode für die API-Antwort", + "forecast_days": "Vorhersagetage (1–5)", + "create_forecast_sensors": "Bereich der Tagessensoren (TYPEN)" } }, "reauth_confirm": { @@ -21,15 +23,16 @@ } }, "error": { - "invalid_auth": "Ungültiger API-Schlüssel", - "cannot_connect": "Verbindung zum Dienst fehlgeschlagen", - "quota_exceeded": "Kontingent überschritten", - "invalid_language": "Ungültiger Sprachcode", + "invalid_auth": "Ungültiger API-Schlüssel\n\n{error_message}", + "cannot_connect": "Verbindung zum Dienst fehlgeschlagen\n\n{error_message}", + "quota_exceeded": "Kontingent überschritten\n\n{error_message}", "invalid_language_format": "Verwenden Sie einen kanonischen BCP-47-Code wie \"en\" oder \"es-ES\".", "empty": "Dieses Feld darf nicht leer sein", "invalid_option_combo": "Erhöhe 'Vorhersagetage', um die gewählten Tagessensoren abzudecken.", "invalid_coordinates": "Wähle einen gültigen Standort auf der Karte aus.", - "unknown": "Unbekannter Fehler" + "unknown": "Unbekannter Fehler", + "invalid_update_interval": "Das Aktualisierungsintervall muss zwischen 1 und 24 Stunden liegen.", + "invalid_forecast_days": "Vorhersagetage müssen zwischen 1 und 5 liegen." }, "abort": { "already_configured": "Dieser Standort ist bereits konfiguriert.", @@ -41,7 +44,7 @@ "step": { "init": { "title": "Pollen Levels – Optionen", - "description": "Ändere Aktualisierungsintervall, API-Sprache, Vorhersagetage und Tagessensoren für {title}.\nOptionen für Tagessensoren (TYPEN): Nur heute (none), Bis morgen (D+1), Bis übermorgen (D+2).", + "description": "Ändere Aktualisierungsintervall, API-Sprache, Vorhersagetage und Tagessensoren für {title}.\nOptionen für Tagessensoren (TYPEN): Nur heute (none), Bis morgen (D+1), Bis übermorgen (D+2; erstellt sowohl D+1- als auch D+2-Sensoren).", "data": { "update_interval": "Aktualisierungsintervall (Stunden)", "language_code": "Sprachcode für die API-Antwort", @@ -51,14 +54,12 @@ } }, "error": { - "invalid_auth": "Ungültiger API-Schlüssel", - "cannot_connect": "Verbindung zum Dienst fehlgeschlagen", - "quota_exceeded": "Kontingent überschritten", - "invalid_language": "Ungültiger Sprachcode", "invalid_language_format": "Verwenden Sie einen kanonischen BCP-47-Code wie \"en\" oder \"es-ES\".", "empty": "Dieses Feld darf nicht leer sein", "invalid_option_combo": "Erhöhe 'Vorhersagetage', um die gewählten Tagessensoren abzudecken.", - "unknown": "Unbekannter Fehler" + "unknown": "Unbekannter Fehler", + "invalid_update_interval": "Das Aktualisierungsintervall muss zwischen 1 und 24 Stunden liegen.", + "invalid_forecast_days": "Vorhersagetage müssen zwischen 1 und 5 liegen." } }, "device": { diff --git a/custom_components/pollenlevels/translations/en.json b/custom_components/pollenlevels/translations/en.json index dffcc70a..a4750573 100644 --- a/custom_components/pollenlevels/translations/en.json +++ b/custom_components/pollenlevels/translations/en.json @@ -3,13 +3,15 @@ "step": { "user": { "title": "Pollen Levels Configuration", - "description": "Enter your Google API Key, select your location on the map, update interval (hours) and API response language code.", + "description": "Enter your Google API Key ([get it here]({api_key_url})) and review best practices ([best practices]({restricting_api_keys_url})). Select your location on the map, update interval (hours) and API response language code. You can also set forecast days and per-day TYPE sensor range.", "data": { "api_key": "API Key", "name": "Name", "location": "Location", "update_interval": "Update interval (hours)", - "language_code": "API response language code" + "language_code": "API response language code", + "forecast_days": "Forecast days (1–5)", + "create_forecast_sensors": "Per-day TYPE sensors range" } }, "reauth_confirm": { @@ -21,15 +23,16 @@ } }, "error": { - "invalid_auth": "Invalid API key", + "invalid_auth": "Invalid API key\n\n{error_message}", "cannot_connect": "Unable to connect to the pollen service.\n\n{error_message}", - "quota_exceeded": "Quota exceeded", - "invalid_language": "Invalid language code", + "quota_exceeded": "Quota exceeded\n\n{error_message}", "invalid_language_format": "Use a canonical BCP-47 code such as \"en\" or \"es-ES\".", "empty": "This field cannot be empty", "invalid_option_combo": "Increase 'Forecast days' to cover selected per-day sensors.", "invalid_coordinates": "Please select a valid location on the map.", - "unknown": "Unknown error" + "unknown": "Unknown error", + "invalid_update_interval": "Update interval must be between 1 and 24 hours.", + "invalid_forecast_days": "Forecast days must be between 1 and 5." }, "abort": { "already_configured": "This location is already configured.", @@ -41,7 +44,7 @@ "step": { "init": { "title": "Pollen Levels – Options", - "description": "Change the update interval, API language, forecast days and per-day TYPE sensors for {title}.\nOptions for per-day TYPE sensors: Only today (none), Through tomorrow (D+1), Through day after tomorrow (D+2).", + "description": "Change the update interval, API language, forecast days and per-day TYPE sensors for {title}.\nOptions for per-day TYPE sensors: Only today (none), Through tomorrow (D+1), Through day after tomorrow (D+2; creates both D+1 and D+2 sensors).", "data": { "update_interval": "Update interval (hours)", "language_code": "API response language code", @@ -51,14 +54,12 @@ } }, "error": { - "invalid_auth": "Invalid API key", - "cannot_connect": "Unable to connect to the pollen service.", - "quota_exceeded": "Quota exceeded", - "invalid_language": "Invalid language code", "invalid_language_format": "Use a canonical BCP-47 code such as \"en\" or \"es-ES\".", "empty": "This field cannot be empty", "invalid_option_combo": "Increase 'Forecast days' to cover selected per-day sensors.", - "unknown": "Unknown error" + "unknown": "Unknown error", + "invalid_update_interval": "Update interval must be between 1 and 24 hours.", + "invalid_forecast_days": "Forecast days must be between 1 and 5." } }, "device": { diff --git a/custom_components/pollenlevels/translations/es.json b/custom_components/pollenlevels/translations/es.json index ebd1e46c..6dc960d0 100644 --- a/custom_components/pollenlevels/translations/es.json +++ b/custom_components/pollenlevels/translations/es.json @@ -3,13 +3,15 @@ "step": { "user": { "title": "Configuración de Niveles de Polen", - "description": "Introduce tu clave de API de Google, selecciona tu ubicación en el mapa, el intervalo de actualización (horas) y el código de idioma de la respuesta de la API.", + "description": "Introduce tu clave API de Google ([consíguela aquí]({api_key_url})) y revisa las buenas prácticas ([buenas prácticas]({restricting_api_keys_url})). Selecciona tu ubicación en el mapa, el intervalo de actualización (horas) y el código de idioma de la respuesta de la API. También puedes configurar los días de previsión y el alcance de los sensores por día (TIPOS).", "data": { "api_key": "Clave API", "name": "Nombre", "location": "Ubicación", "update_interval": "Intervalo de actualización (horas)", - "language_code": "Código de idioma de la respuesta de la API" + "language_code": "Código de idioma de la respuesta de la API", + "forecast_days": "Días de previsión (1–5)", + "create_forecast_sensors": "Alcance de sensores por día (TIPOS)" } }, "reauth_confirm": { @@ -21,15 +23,16 @@ } }, "error": { - "invalid_auth": "Clave API inválida", + "invalid_auth": "Clave API inválida\n\n{error_message}", "cannot_connect": "No se puede conectar al servicio de polen.\n\n{error_message}", - "quota_exceeded": "Cuota excedida", - "invalid_language": "Código de idioma no válido", + "quota_exceeded": "Cuota excedida\n\n{error_message}", "invalid_language_format": "Usa un código BCP-47 canónico como \"en\" o \"es-ES\".", "empty": "Este campo no puede estar vacío", "invalid_option_combo": "Aumenta 'Días de previsión' para cubrir los sensores por día seleccionados.", "invalid_coordinates": "Selecciona una ubicación válida en el mapa.", - "unknown": "Error desconocido" + "unknown": "Error desconocido", + "invalid_update_interval": "El intervalo de actualización debe estar entre 1 y 24 horas.", + "invalid_forecast_days": "Los días de previsión deben estar entre 1 y 5." }, "abort": { "already_configured": "Esta ubicación ya está configurada.", @@ -41,7 +44,7 @@ "step": { "init": { "title": "Pollen Levels – Opciones", - "description": "Cambia el intervalo de actualización, el idioma de respuesta de la API, los días de previsión y los sensores por día para {title}.\nOpciones de sensores por día (TIPOS): Solo hoy (none), Hasta mañana (D+1), Hasta pasado mañana (D+2).", + "description": "Cambia el intervalo de actualización, el idioma de respuesta de la API, los días de previsión y los sensores por día para {title}.\nOpciones de sensores por día (TIPOS): Solo hoy (none), Hasta mañana (D+1), Hasta pasado mañana (D+2; crea sensores D+1 y D+2).", "data": { "update_interval": "Intervalo de actualización (horas)", "language_code": "Código de idioma de la respuesta de la API", @@ -51,14 +54,12 @@ } }, "error": { - "invalid_auth": "Clave API inválida", - "cannot_connect": "No se puede conectar al servicio de polen.", - "quota_exceeded": "Cuota excedida", - "invalid_language": "Código de idioma no válido", "invalid_language_format": "Usa un código BCP-47 canónico como \"en\" o \"es-ES\".", "empty": "Este campo no puede estar vacío", "invalid_option_combo": "Aumenta 'Días de previsión' para cubrir los sensores por día seleccionados.", - "unknown": "Error desconocido" + "unknown": "Error desconocido", + "invalid_update_interval": "El intervalo de actualización debe estar entre 1 y 24 horas.", + "invalid_forecast_days": "Los días de previsión deben estar entre 1 y 5." } }, "device": { diff --git a/custom_components/pollenlevels/translations/fi.json b/custom_components/pollenlevels/translations/fi.json index 071dc24c..d1d396ac 100644 --- a/custom_components/pollenlevels/translations/fi.json +++ b/custom_components/pollenlevels/translations/fi.json @@ -3,13 +3,15 @@ "step": { "user": { "title": "Siitepölytason asetukset", - "description": "Anna Google API -avaimesi, valitse sijaintisi kartalta, päivitysväli (tunnit) ja API-vastauksen kielikoodi.", + "description": "Syötä Google API -avaimesi ([hanki se täältä]({api_key_url})) ja tutustu parhaisiin käytäntöihin ([parhaat käytännöt]({restricting_api_keys_url})). Valitse sijainti kartalta, päivitysväli (tunteina) ja API-vastauksen kielikoodi. Voit myös määrittää ennustepäivät ja päiväsensorien laajuuden (TYYPIT).", "data": { "api_key": "API-avain", "name": "Nimi", "location": "Sijainti", "update_interval": "Päivitysväli (tunnit)", - "language_code": "API-vastauksen kielikoodi" + "language_code": "API-vastauksen kielikoodi", + "forecast_days": "Ennustepäivät (1–5)", + "create_forecast_sensors": "Päiväsensorien laajuus (TYYPIT)" } }, "reauth_confirm": { @@ -21,15 +23,16 @@ } }, "error": { - "invalid_auth": "Virheellinen API-avain", - "cannot_connect": "Palveluun ei saada yhteyttä", - "quota_exceeded": "Kiintiö ylitetty", - "invalid_language": "Virheellinen kielikoodi", + "invalid_auth": "Virheellinen API-avain\n\n{error_message}", + "cannot_connect": "Palveluun ei saada yhteyttä\n\n{error_message}", + "quota_exceeded": "Kiintiö ylitetty\n\n{error_message}", "invalid_language_format": "Käytä kanonista BCP-47-koodia, esimerkiksi \"en\" tai \"es-ES\".", "empty": "Tämä kenttä ei voi olla tyhjä", "invalid_option_combo": "Lisää \"Ennustepäiviä\", jotta valitut päiväsensorit katetaan.", "invalid_coordinates": "Valitse kartalta kelvollinen sijainti.", - "unknown": "Tuntematon virhe" + "unknown": "Tuntematon virhe", + "invalid_update_interval": "Päivitysvälin on oltava 1–24 tuntia.", + "invalid_forecast_days": "Ennustepäivien on oltava välillä 1–5." }, "abort": { "already_configured": "Tämä sijainti on jo määritetty.", @@ -41,7 +44,7 @@ "step": { "init": { "title": "Pollen Levels – Asetukset", - "description": "Muuta päivitysväliä, API-kieltä, ennustepäiviä ja päiväsensoreita TYYPEILLE kohteelle {title}.\nPäiväsensorien vaihtoehdot (TYYPIT): Vain tänään (none), Huomiseen asti (D+1), Ylihuomiseen asti (D+2).", + "description": "Muuta päivitysväliä, API-kieltä, ennustepäiviä ja päiväsensoreita TYYPEILLE kohteelle {title}.\nPäiväsensorien vaihtoehdot (TYYPIT): Vain tänään (none), Huomiseen asti (D+1), Ylihuomiseen asti (D+2; luo sekä D+1- että D+2-sensorit).", "data": { "update_interval": "Päivitysväli (tunnit)", "language_code": "API-vastauksen kielikoodi", @@ -51,14 +54,12 @@ } }, "error": { - "invalid_auth": "Virheellinen API-avain", - "cannot_connect": "Palveluun ei saada yhteyttä", - "quota_exceeded": "Kiintiö ylitetty", - "invalid_language": "Virheellinen kielikoodi", "invalid_language_format": "Käytä kanonista BCP-47-koodia, esimerkiksi \"en\" tai \"es-ES\".", "empty": "Tämä kenttä ei voi olla tyhjä", "invalid_option_combo": "Lisää \"Ennustepäiviä\", jotta valitut päiväsensorit katetaan.", - "unknown": "Tuntematon virhe" + "unknown": "Tuntematon virhe", + "invalid_update_interval": "Päivitysvälin on oltava 1–24 tuntia.", + "invalid_forecast_days": "Ennustepäivien on oltava välillä 1–5." } }, "device": { diff --git a/custom_components/pollenlevels/translations/fr.json b/custom_components/pollenlevels/translations/fr.json index cf55d0f4..2e31d471 100644 --- a/custom_components/pollenlevels/translations/fr.json +++ b/custom_components/pollenlevels/translations/fr.json @@ -3,13 +3,15 @@ "step": { "user": { "title": "Pollen Levels – Configuration", - "description": "Saisissez votre clé API Google, sélectionnez votre position sur la carte, l’intervalle de mise à jour (heures) et le code de langue pour la réponse de l’API.", + "description": "Saisissez votre clé API Google ([l’obtenir ici]({api_key_url})) et consultez les bonnes pratiques ([bonnes pratiques]({restricting_api_keys_url})). Sélectionnez votre emplacement sur la carte, l’intervalle de mise à jour (heures) et le code de langue de la réponse de l’API. Vous pouvez aussi définir les jours de prévision et la portée des capteurs par jour (TYPES).", "data": { "api_key": "Clé API", "name": "Nom", "location": "Emplacement", "update_interval": "Intervalle de mise à jour (heures)", - "language_code": "Code de langue pour la réponse de l’API" + "language_code": "Code de langue pour la réponse de l’API", + "forecast_days": "Jours de prévision (1–5)", + "create_forecast_sensors": "Portée des capteurs par jour (TYPES)" } }, "reauth_confirm": { @@ -21,15 +23,16 @@ } }, "error": { - "invalid_auth": "Clé API invalide", - "cannot_connect": "Impossible de se connecter au service", - "quota_exceeded": "Quota dépassé", - "invalid_language": "Code de langue invalide", + "invalid_auth": "Clé API invalide\n\n{error_message}", + "cannot_connect": "Impossible de se connecter au service\n\n{error_message}", + "quota_exceeded": "Quota dépassé\n\n{error_message}", "invalid_language_format": "Utilisez un code BCP-47 canonique tel que \"en\" ou \"es-ES\".", "empty": "Ce champ ne peut pas être vide", "invalid_option_combo": "Augmentez le nombre de « Jours de prévision » afin de couvrir les capteurs par jour sélectionnés.", "invalid_coordinates": "Sélectionnez un emplacement valide sur la carte.", - "unknown": "Erreur inconnue" + "unknown": "Erreur inconnue", + "invalid_update_interval": "L’intervalle de mise à jour doit être compris entre 1 et 24 heures.", + "invalid_forecast_days": "Les jours de prévision doivent être compris entre 1 et 5." }, "abort": { "already_configured": "Cet emplacement est déjà configuré.", @@ -41,7 +44,7 @@ "step": { "init": { "title": "Pollen Levels – Options", - "description": "Modifiez l’intervalle de mise à jour, la langue de l’API, les jours de prévision et les capteurs par jour pour {title}.\nOptions des capteurs par jour (TYPES) : Aujourd’hui uniquement (none), Jusqu’à demain (D+1), Jusqu’au surlendemain (D+2).", + "description": "Modifiez l’intervalle de mise à jour, la langue de l’API, les jours de prévision et les capteurs par jour pour {title}.\nOptions des capteurs par jour (TYPES) : Aujourd’hui uniquement (none), Jusqu’à demain (D+1), Jusqu’au surlendemain (D+2 ; crée les capteurs D+1 et D+2).", "data": { "update_interval": "Intervalle de mise à jour (heures)", "language_code": "Code de langue pour la réponse de l’API", @@ -51,14 +54,12 @@ } }, "error": { - "invalid_auth": "Clé API invalide", - "cannot_connect": "Impossible de se connecter au service", - "quota_exceeded": "Quota dépassé", - "invalid_language": "Code de langue invalide", "invalid_language_format": "Utilisez un code BCP-47 canonique tel que \"en\" ou \"es-ES\".", "empty": "Ce champ ne peut pas être vide", "invalid_option_combo": "Augmentez le nombre de « Jours de prévision » afin de couvrir les capteurs par jour sélectionnés.", - "unknown": "Erreur inconnue" + "unknown": "Erreur inconnue", + "invalid_update_interval": "L’intervalle de mise à jour doit être compris entre 1 et 24 heures.", + "invalid_forecast_days": "Les jours de prévision doivent être compris entre 1 et 5." } }, "device": { diff --git a/custom_components/pollenlevels/translations/hu.json b/custom_components/pollenlevels/translations/hu.json index 0c551097..c6ac1d86 100644 --- a/custom_components/pollenlevels/translations/hu.json +++ b/custom_components/pollenlevels/translations/hu.json @@ -3,13 +3,15 @@ "step": { "user": { "title": "Pollen szintek – beállítás", - "description": "Adja meg a Google API-kulcsot, válassza ki helyszínét a térképen, a frissítési időközt (órák) és az API-válasz nyelvi kódját.", + "description": "Add meg a Google API-kulcsodat ([itt szerezhető be]({api_key_url})) és nézd át a bevált gyakorlatokat ([bevált gyakorlatok]({restricting_api_keys_url})). Válaszd ki a helyszínt a térképen, a frissítési időközt (órában) és az API-válasz nyelvi kódját. Beállíthatod az előrejelzési napokat és a napi szenzorok tartományát (TÍPUSOK).", "data": { "api_key": "API-kulcs", "name": "Név", "location": "Helyszín", "update_interval": "Frissítési időköz (óra)", - "language_code": "API-válasz nyelvi kódja" + "language_code": "API-válasz nyelvi kódja", + "forecast_days": "Előrejelzési napok (1–5)", + "create_forecast_sensors": "Napi TÍPUS szenzorok tartománya" } }, "reauth_confirm": { @@ -21,15 +23,16 @@ } }, "error": { - "invalid_auth": "Érvénytelen API-kulcs", - "cannot_connect": "Nem lehet csatlakozni a szolgáltatáshoz", - "quota_exceeded": "Kvóta túllépve", - "invalid_language": "Érvénytelen nyelvi kód", + "invalid_auth": "Érvénytelen API-kulcs\n\n{error_message}", + "cannot_connect": "Nem lehet csatlakozni a szolgáltatáshoz\n\n{error_message}", + "quota_exceeded": "Kvóta túllépve\n\n{error_message}", "invalid_language_format": "Használjon kanonikus BCP-47 kódot, például \"en\" vagy \"es-ES\".", "empty": "A mező nem lehet üres", "invalid_option_combo": "Növeld a \"Előrejelzési napok\" értéket a kiválasztott napi szenzorok lefedéséhez.", "invalid_coordinates": "Válassz érvényes helyet a térképen.", - "unknown": "Ismeretlen hiba" + "unknown": "Ismeretlen hiba", + "invalid_update_interval": "A frissítési időköznek 1 és 24 óra között kell lennie.", + "invalid_forecast_days": "Az előrejelzési napoknak 1 és 5 között kell lenniük." }, "abort": { "already_configured": "Ez a hely már konfigurálva van.", @@ -41,7 +44,7 @@ "step": { "init": { "title": "Pollen Levels – Beállítások", - "description": "Módosítsd a frissítési időközt, az API nyelvét, az előrejelzési napokat és a napi TÍPUS szenzorokat a(z) {title} bejegyzéshez.\nNapi TÍPUS szenzorok: Csak ma (none), Holnapig (D+1), Holnaputánig (D+2).", + "description": "Módosítsd a frissítési időközt, az API nyelvét, az előrejelzési napokat és a napi TÍPUS szenzorokat a(z) {title} bejegyzéshez.\nNapi TÍPUS szenzorok: Csak ma (none), Holnapig (D+1), Holnaputánig (D+2; létrehozza a D+1 és D+2 szenzorokat is).", "data": { "update_interval": "Frissítési időköz (óra)", "language_code": "API-válasz nyelvi kódja", @@ -51,14 +54,12 @@ } }, "error": { - "invalid_auth": "Érvénytelen API-kulcs", - "cannot_connect": "Nem lehet csatlakozni a szolgáltatáshoz", - "quota_exceeded": "Kvóta túllépve", - "invalid_language": "Érvénytelen nyelvi kód", "invalid_language_format": "Használjon kanonikus BCP-47 kódot, például \"en\" vagy \"es-ES\".", "empty": "A mező nem lehet üres", "invalid_option_combo": "Növeld a \"Előrejelzési napok\" értéket a kiválasztott napi szenzorok lefedéséhez.", - "unknown": "Ismeretlen hiba" + "unknown": "Ismeretlen hiba", + "invalid_update_interval": "A frissítési időköznek 1 és 24 óra között kell lennie.", + "invalid_forecast_days": "Az előrejelzési napoknak 1 és 5 között kell lenniük." } }, "device": { diff --git a/custom_components/pollenlevels/translations/it.json b/custom_components/pollenlevels/translations/it.json index 8ad8b823..09ab015d 100644 --- a/custom_components/pollenlevels/translations/it.json +++ b/custom_components/pollenlevels/translations/it.json @@ -3,13 +3,15 @@ "step": { "user": { "title": "Configurazione Livelli di polline", - "description": "Inserisci la tua chiave API Google, seleziona la tua posizione sulla mappa, l’intervallo di aggiornamento (ore) e il codice lingua per la risposta dell'API.", + "description": "Inserisci la tua chiave API di Google ([ottienila qui]({api_key_url})) e consulta le best practice ([best practice]({restricting_api_keys_url})). Seleziona la posizione sulla mappa, l’intervallo di aggiornamento (ore) e il codice lingua della risposta dell’API. Puoi anche impostare i giorni di previsione e l'ambito dei sensori per giorno (TIPI).", "data": { "api_key": "Chiave API", "name": "Nome", "location": "Posizione", "update_interval": "Intervallo di aggiornamento (ore)", - "language_code": "Codice lingua per la risposta dell'API" + "language_code": "Codice lingua per la risposta dell'API", + "forecast_days": "Giorni di previsione (1–5)", + "create_forecast_sensors": "Ambito dei sensori per giorno (TIPI)" } }, "reauth_confirm": { @@ -21,15 +23,16 @@ } }, "error": { - "invalid_auth": "Chiave API non valida", - "cannot_connect": "Impossibile connettersi al servizio", - "quota_exceeded": "Quota superata", - "invalid_language": "Codice lingua non valido", + "invalid_auth": "Chiave API non valida\n\n{error_message}", + "cannot_connect": "Impossibile connettersi al servizio\n\n{error_message}", + "quota_exceeded": "Quota superata\n\n{error_message}", "invalid_language_format": "Usa un codice BCP-47 canonico come \"en\" o \"es-ES\".", "empty": "Questo campo non può essere vuoto", "invalid_option_combo": "Aumenta 'Giorni di previsione' per coprire i sensori giornalieri selezionati.", "invalid_coordinates": "Seleziona una posizione valida sulla mappa.", - "unknown": "Errore sconosciuto" + "unknown": "Errore sconosciuto", + "invalid_update_interval": "L’intervallo di aggiornamento deve essere compreso tra 1 e 24 ore.", + "invalid_forecast_days": "I giorni di previsione devono essere compresi tra 1 e 5." }, "abort": { "already_configured": "Questa posizione è già configurata.", @@ -41,7 +44,7 @@ "step": { "init": { "title": "Pollen Levels – Opzioni", - "description": "Modifica l’intervallo di aggiornamento, la lingua della risposta dell’API, i giorni di previsione e i sensori giornalieri per i TIPI per {title}.\nOpzioni dei sensori giornalieri (TIPI): Solo oggi (none), Fino a domani (D+1), Fino a dopodomani (D+2).", + "description": "Modifica l’intervallo di aggiornamento, la lingua della risposta dell’API, i giorni di previsione e i sensori giornalieri per i TIPI per {title}.\nOpzioni dei sensori giornalieri (TIPI): Solo oggi (none), Fino a domani (D+1), Fino a dopodomani (D+2; crea sia i sensori D+1 che D+2).", "data": { "update_interval": "Intervallo di aggiornamento (ore)", "language_code": "Codice lingua per la risposta dell'API", @@ -51,14 +54,12 @@ } }, "error": { - "invalid_auth": "Chiave API non valida", - "cannot_connect": "Impossibile connettersi al servizio", - "quota_exceeded": "Quota superata", - "invalid_language": "Codice lingua non valido", "invalid_language_format": "Usa un codice BCP-47 canonico come \"en\" o \"es-ES\".", "empty": "Questo campo non può essere vuoto", "invalid_option_combo": "Aumenta 'Giorni di previsione' per coprire i sensori giornalieri selezionati.", - "unknown": "Errore sconosciuto" + "unknown": "Errore sconosciuto", + "invalid_update_interval": "L’intervallo di aggiornamento deve essere compreso tra 1 e 24 ore.", + "invalid_forecast_days": "I giorni di previsione devono essere compresi tra 1 e 5." } }, "device": { diff --git a/custom_components/pollenlevels/translations/nb.json b/custom_components/pollenlevels/translations/nb.json index 3da54ef0..e082548f 100644 --- a/custom_components/pollenlevels/translations/nb.json +++ b/custom_components/pollenlevels/translations/nb.json @@ -3,13 +3,15 @@ "step": { "user": { "title": "Konfigurasjon av pollennivåer", - "description": "Angi Google API-nøkkel, velg posisjonen din på kartet, oppdateringsintervall (timer) og språkkode for API-svar.", + "description": "Oppgi Google API-nøkkelen din ([få den her]({api_key_url})) og les beste praksis ([beste praksis]({restricting_api_keys_url})). Velg posisjonen din på kartet, oppdateringsintervallet (timer) og språkkoden for API-svaret. Du kan også angi prognosedager og omfanget av sensorer per dag (TYPER).", "data": { "api_key": "API-nøkkel", "name": "Navn", "location": "Posisjon", "update_interval": "Oppdateringsintervall (timer)", - "language_code": "Språkkode for API-svar" + "language_code": "Språkkode for API-svar", + "forecast_days": "Prognosedager (1–5)", + "create_forecast_sensors": "Omfang av sensorer per dag (TYPER)" } }, "reauth_confirm": { @@ -21,15 +23,16 @@ } }, "error": { - "invalid_auth": "Ugyldig API-nøkkel", - "cannot_connect": "Kan ikke koble til tjenesten", - "quota_exceeded": "Kvote overskredet", - "invalid_language": "Ugyldig språkkode", + "invalid_auth": "Ugyldig API-nøkkel\n\n{error_message}", + "cannot_connect": "Kan ikke koble til tjenesten\n\n{error_message}", + "quota_exceeded": "Kvote overskredet\n\n{error_message}", "invalid_language_format": "Bruk en kanonisk BCP-47-kode som \"en\" eller \"es-ES\".", "empty": "Dette feltet kan ikke være tomt", "invalid_option_combo": "Øk «Prognosedager» for å dekke valgte sensorer per dag.", "invalid_coordinates": "Velg en gyldig posisjon på kartet.", - "unknown": "Ukjent feil" + "unknown": "Ukjent feil", + "invalid_update_interval": "Oppdateringsintervallet må være mellom 1 og 24 timer.", + "invalid_forecast_days": "Prognosedager må være mellom 1 og 5." }, "abort": { "already_configured": "Dette stedet er allerede konfigurert.", @@ -41,7 +44,7 @@ "step": { "init": { "title": "Pollen Levels – Innstillinger", - "description": "Endre oppdateringsintervall, API-språk, prognosedager og sensorer per dag for {title}.\nAlternativer for sensorer per dag (TYPER): Kun i dag (none), Til og med i morgen (D+1), Til og med i overmorgen (D+2).", + "description": "Endre oppdateringsintervall, API-språk, prognosedager og sensorer per dag for {title}.\nAlternativer for sensorer per dag (TYPER): Kun i dag (none), Til og med i morgen (D+1), Til og med i overmorgen (D+2; oppretter både D+1- og D+2-sensorer).", "data": { "update_interval": "Oppdateringsintervall (timer)", "language_code": "Språkkode for API-svar", @@ -51,14 +54,12 @@ } }, "error": { - "invalid_auth": "Ugyldig API-nøkkel", - "cannot_connect": "Kan ikke koble til tjenesten", - "quota_exceeded": "Kvote overskredet", - "invalid_language": "Ugyldig språkkode", "invalid_language_format": "Bruk en kanonisk BCP-47-kode som \"en\" eller \"es-ES\".", "empty": "Dette feltet kan ikke være tomt", "invalid_option_combo": "Øk «Prognosedager» for å dekke valgte sensorer per dag.", - "unknown": "Ukjent feil" + "unknown": "Ukjent feil", + "invalid_update_interval": "Oppdateringsintervallet må være mellom 1 og 24 timer.", + "invalid_forecast_days": "Prognosedager må være mellom 1 og 5." } }, "device": { diff --git a/custom_components/pollenlevels/translations/nl.json b/custom_components/pollenlevels/translations/nl.json index 7da90615..9759950a 100644 --- a/custom_components/pollenlevels/translations/nl.json +++ b/custom_components/pollenlevels/translations/nl.json @@ -3,13 +3,15 @@ "step": { "user": { "title": "Pollen Levels – Configuratie", - "description": "Voer je Google API-sleutel in, selecteer je locatie op de kaart, het update-interval (uren) en de taalcode voor de API-respons.", + "description": "Voer je Google API-sleutel in ([haal hem hier]({api_key_url})) en bekijk de best practices ([best practices]({restricting_api_keys_url})). Selecteer je locatie op de kaart, het update-interval (uren) en de taalcode van de API-respons. Je kunt ook voorspellingsdagen en het bereik van per-dag TYPE-sensoren instellen.", "data": { "api_key": "API-sleutel", "name": "Naam", "location": "Locatie", "update_interval": "Update-interval (uren)", - "language_code": "Taalcode voor API-respons" + "language_code": "Taalcode voor API-respons", + "forecast_days": "Voorspellingsdagen (1–5)", + "create_forecast_sensors": "Bereik van per-dag TYPE-sensoren" } }, "reauth_confirm": { @@ -21,15 +23,16 @@ } }, "error": { - "invalid_auth": "Ongeldige API-sleutel", - "cannot_connect": "Kan geen verbinding maken met de service", - "quota_exceeded": "Limiet overschreden", - "invalid_language": "Ongeldige taalcode", + "invalid_auth": "Ongeldige API-sleutel\n\n{error_message}", + "cannot_connect": "Kan geen verbinding maken met de service\n\n{error_message}", + "quota_exceeded": "Limiet overschreden\n\n{error_message}", "invalid_language_format": "Gebruik een canonieke BCP-47-code zoals \"en\" of \"es-ES\".", "empty": "Dit veld mag niet leeg zijn", "invalid_option_combo": "Verhoog 'Voorspellingsdagen' om de geselecteerde per-dag sensoren te dekken.", "invalid_coordinates": "Selecteer een geldige locatie op de kaart.", - "unknown": "Onbekende fout" + "unknown": "Onbekende fout", + "invalid_update_interval": "Het update-interval moet tussen 1 en 24 uur liggen.", + "invalid_forecast_days": "Voorspellingsdagen moeten tussen 1 en 5 liggen." }, "abort": { "already_configured": "Deze locatie is al geconfigureerd.", @@ -41,7 +44,7 @@ "step": { "init": { "title": "Pollen Levels – Opties", - "description": "Wijzig het update-interval, de API-taal, het aantal voorspellingsdagen en de per-dag TYPE-sensoren voor {title}.\nOpties voor per-dag TYPE-sensoren: Alleen vandaag (none), Tot en met morgen (D+1), Tot en met overmorgen (D+2).", + "description": "Wijzig het update-interval, de API-taal, het aantal voorspellingsdagen en de per-dag TYPE-sensoren voor {title}.\nOpties voor per-dag TYPE-sensoren: Alleen vandaag (none), Tot en met morgen (D+1), Tot en met overmorgen (D+2; maakt zowel D+1- als D+2-sensoren aan).", "data": { "update_interval": "Update-interval (uren)", "language_code": "Taalcode voor API-respons", @@ -51,14 +54,12 @@ } }, "error": { - "invalid_auth": "Ongeldige API-sleutel", - "cannot_connect": "Kan geen verbinding maken met de service", - "quota_exceeded": "Limiet overschreden", - "invalid_language": "Ongeldige taalcode", "invalid_language_format": "Gebruik een canonieke BCP-47-code zoals \"en\" of \"es-ES\".", "empty": "Dit veld mag niet leeg zijn", "invalid_option_combo": "Verhoog 'Voorspellingsdagen' om de geselecteerde per-dag sensoren te dekken.", - "unknown": "Onbekende fout" + "unknown": "Onbekende fout", + "invalid_update_interval": "Het update-interval moet tussen 1 en 24 uur liggen.", + "invalid_forecast_days": "Voorspellingsdagen moeten tussen 1 en 5 liggen." } }, "device": { diff --git a/custom_components/pollenlevels/translations/pl.json b/custom_components/pollenlevels/translations/pl.json index 8af09456..de2d5e11 100644 --- a/custom_components/pollenlevels/translations/pl.json +++ b/custom_components/pollenlevels/translations/pl.json @@ -3,13 +3,15 @@ "step": { "user": { "title": "Konfiguracja poziomów pyłku", - "description": "Wprowadź klucz Google API, wybierz swoją lokalizację na mapie, interwał aktualizacji (godziny) oraz kod języka odpowiedzi API.", + "description": "Wprowadź swój klucz Google API ([uzyskaj go tutaj]({api_key_url})) i zapoznaj się z dobrymi praktykami ([dobre praktyki]({restricting_api_keys_url})). Wybierz lokalizację na mapie, interwał aktualizacji (godziny) oraz kod języka odpowiedzi API. Możesz także ustawić dni prognozy oraz zakres czujników dziennych (TYPY).", "data": { "api_key": "Klucz API", "name": "Nazwa", "location": "Lokalizacja", "update_interval": "Interwał aktualizacji (godziny)", - "language_code": "Kod języka odpowiedzi API" + "language_code": "Kod języka odpowiedzi API", + "forecast_days": "Dni prognozy (1–5)", + "create_forecast_sensors": "Zakres czujników dziennych (TYPY)" } }, "reauth_confirm": { @@ -21,15 +23,16 @@ } }, "error": { - "invalid_auth": "Nieprawidłowy klucz API", - "cannot_connect": "Brak połączenia z usługą", - "quota_exceeded": "Przekroczono limit", - "invalid_language": "Nieprawidłowy kod języka", + "invalid_auth": "Nieprawidłowy klucz API\n\n{error_message}", + "cannot_connect": "Brak połączenia z usługą\n\n{error_message}", + "quota_exceeded": "Przekroczono limit\n\n{error_message}", "invalid_language_format": "Użyj kanonicznego kodu BCP-47, np. \"en\" lub \"es-ES\".", "empty": "To pole nie może być puste", "invalid_option_combo": "Zwiększ 'Dni prognozy', aby objąć wybrane czujniki dzienne.", "invalid_coordinates": "Wybierz prawidłową lokalizację na mapie.", - "unknown": "Nieznany błąd" + "unknown": "Nieznany błąd", + "invalid_update_interval": "Interwał aktualizacji musi wynosić od 1 do 24 godzin.", + "invalid_forecast_days": "Dni prognozy muszą mieścić się w zakresie 1–5." }, "abort": { "already_configured": "Ta lokalizacja jest już skonfigurowana.", @@ -41,7 +44,7 @@ "step": { "init": { "title": "Pollen Levels – Opcje", - "description": "Zmień interwał aktualizacji, język odpowiedzi API, liczbę dni prognozy oraz czujniki dzienne dla TYPÓW dla {title}.\nOpcje czujników dziennych (TYPY): Tylko dziś (none), Do jutra (D+1), Do pojutrza (D+2).", + "description": "Zmień interwał aktualizacji, język odpowiedzi API, liczbę dni prognozy oraz czujniki dzienne dla TYPÓW dla {title}.\nOpcje czujników dziennych (TYPY): Tylko dziś (none), Do jutra (D+1), Do pojutrza (D+2; tworzy czujniki D+1 i D+2).", "data": { "update_interval": "Interwał aktualizacji (godziny)", "language_code": "Kod języka odpowiedzi API", @@ -51,14 +54,12 @@ } }, "error": { - "invalid_auth": "Nieprawidłowy klucz API", - "cannot_connect": "Brak połączenia z usługą", - "quota_exceeded": "Przekroczono limit", - "invalid_language": "Nieprawidłowy kod języka", "invalid_language_format": "Użyj kanonicznego kodu BCP-47, np. \"en\" lub \"es-ES\".", "empty": "To pole nie może być puste", "invalid_option_combo": "Zwiększ 'Dni prognozy', aby objąć wybrane czujniki dzienne.", - "unknown": "Nieznany błąd" + "unknown": "Nieznany błąd", + "invalid_update_interval": "Interwał aktualizacji musi wynosić od 1 do 24 godzin.", + "invalid_forecast_days": "Dni prognozy muszą mieścić się w zakresie 1–5." } }, "device": { diff --git a/custom_components/pollenlevels/translations/pt-BR.json b/custom_components/pollenlevels/translations/pt-BR.json index 68f60be2..31e98645 100644 --- a/custom_components/pollenlevels/translations/pt-BR.json +++ b/custom_components/pollenlevels/translations/pt-BR.json @@ -3,13 +3,15 @@ "step": { "user": { "title": "Configuração dos Níveis de Pólen", - "description": "Informe sua chave da API do Google, selecione sua localização no mapa, o intervalo de atualização (horas) e o código de idioma para a resposta da API.", + "description": "Insira sua chave de API do Google ([obtenha aqui]({api_key_url})) e consulte as melhores práticas ([melhores práticas]({restricting_api_keys_url})). Selecione sua localização no mapa, o intervalo de atualização (horas) e o código de idioma da resposta da API. Você também pode definir os dias de previsão e o escopo dos sensores por dia (TIPOS).", "data": { "api_key": "Chave da API", "name": "Nome", "location": "Localização", "update_interval": "Intervalo de atualização (horas)", - "language_code": "Código de idioma da resposta da API" + "language_code": "Código de idioma da resposta da API", + "forecast_days": "Dias de previsão (1–5)", + "create_forecast_sensors": "Escopo dos sensores por dia (TIPOS)" } }, "reauth_confirm": { @@ -21,15 +23,16 @@ } }, "error": { - "invalid_auth": "Chave de API inválida", - "cannot_connect": "Não foi possível conectar ao serviço", - "quota_exceeded": "Cota excedida", - "invalid_language": "Código de idioma inválido", + "invalid_auth": "Chave de API inválida\n\n{error_message}", + "cannot_connect": "Não foi possível conectar ao serviço\n\n{error_message}", + "quota_exceeded": "Cota excedida\n\n{error_message}", "invalid_language_format": "Use um código BCP-47 canônico, como \"en\" ou \"es-ES\".", "empty": "Este campo não pode ficar vazio", "invalid_option_combo": "Aumente \"Dias de previsão\" para cobrir os sensores por dia selecionados.", "invalid_coordinates": "Selecione um local válido no mapa.", - "unknown": "Erro desconhecido" + "unknown": "Erro desconhecido", + "invalid_update_interval": "O intervalo de atualização deve estar entre 1 e 24 horas.", + "invalid_forecast_days": "Os dias de previsão devem estar entre 1 e 5." }, "abort": { "already_configured": "Este local já está configurado.", @@ -41,7 +44,7 @@ "step": { "init": { "title": "Pollen Levels – Opções", - "description": "Altere o intervalo de atualização, o idioma da API, os dias de previsão e os sensores por dia para {title}.\nOpções de sensores por dia (TIPOS): Apenas hoje (none), Até amanhã (D+1), Até depois de amanhã (D+2).", + "description": "Altere o intervalo de atualização, o idioma da API, os dias de previsão e os sensores por dia para {title}.\nOpções de sensores por dia (TIPOS): Apenas hoje (none), Até amanhã (D+1), Até depois de amanhã (D+2; cria sensores D+1 e D+2).", "data": { "update_interval": "Intervalo de atualização (horas)", "language_code": "Código de idioma da resposta da API", @@ -51,14 +54,12 @@ } }, "error": { - "invalid_auth": "Chave de API inválida", - "cannot_connect": "Não foi possível conectar ao serviço", - "quota_exceeded": "Cota excedida", - "invalid_language": "Código de idioma inválido", "invalid_language_format": "Use um código BCP-47 canônico, como \"en\" ou \"es-ES\".", "empty": "Este campo não pode ficar vazio", "invalid_option_combo": "Aumente \"Dias de previsão\" para cobrir os sensores por dia selecionados.", - "unknown": "Erro desconhecido" + "unknown": "Erro desconhecido", + "invalid_update_interval": "O intervalo de atualização deve estar entre 1 e 24 horas.", + "invalid_forecast_days": "Os dias de previsão devem estar entre 1 e 5." } }, "device": { diff --git a/custom_components/pollenlevels/translations/pt-PT.json b/custom_components/pollenlevels/translations/pt-PT.json index a4cefb09..254a6d0d 100644 --- a/custom_components/pollenlevels/translations/pt-PT.json +++ b/custom_components/pollenlevels/translations/pt-PT.json @@ -3,13 +3,15 @@ "step": { "user": { "title": "Configuração dos Níveis de Pólen", - "description": "Introduza a sua chave da API do Google, selecione a sua localização no mapa, o intervalo de atualização (horas) e o código de idioma para a resposta da API.", + "description": "Introduza a sua chave de API do Google ([obtenha-a aqui]({api_key_url})) e reveja as melhores práticas ([melhores práticas]({restricting_api_keys_url})). Selecione a sua localização no mapa, o intervalo de atualização (horas) e o código de idioma da resposta da API. Também pode definir os dias de previsão e o âmbito dos sensores por dia (TIPOS).", "data": { "api_key": "Chave da API", "name": "Nome", "location": "Localização", "update_interval": "Intervalo de atualização (horas)", - "language_code": "Código de idioma da resposta da API" + "language_code": "Código de idioma da resposta da API", + "forecast_days": "Dias de previsão (1–5)", + "create_forecast_sensors": "Âmbito dos sensores por dia (TIPOS)" } }, "reauth_confirm": { @@ -21,15 +23,16 @@ } }, "error": { - "invalid_auth": "Chave da API inválida", - "cannot_connect": "Não é possível ligar ao serviço", - "quota_exceeded": "Quota excedida", - "invalid_language": "Código de idioma inválido", + "invalid_auth": "Chave da API inválida\n\n{error_message}", + "cannot_connect": "Não é possível ligar ao serviço\n\n{error_message}", + "quota_exceeded": "Quota excedida\n\n{error_message}", "invalid_language_format": "Use um código BCP-47 canónico, como \"en\" ou \"es-ES\".", "empty": "Este campo não pode estar vazio", "invalid_option_combo": "Aumente \"Dias de previsão\" para cobrir os sensores por dia selecionados.", "invalid_coordinates": "Selecione uma localização válida no mapa.", - "unknown": "Erro desconhecido" + "unknown": "Erro desconhecido", + "invalid_update_interval": "O intervalo de atualização deve estar entre 1 e 24 horas.", + "invalid_forecast_days": "Os dias de previsão devem estar entre 1 e 5." }, "abort": { "already_configured": "Esta localização já está configurada.", @@ -41,7 +44,7 @@ "step": { "init": { "title": "Pollen Levels – Opções", - "description": "Altere o intervalo de atualização, o idioma da API, os dias de previsão e os sensores por dia para {title}.\nOpções de sensores por dia (TIPOS): Apenas hoje (none), Até amanhã (D+1), Até depois de amanhã (D+2).", + "description": "Altere o intervalo de atualização, o idioma da API, os dias de previsão e os sensores por dia para {title}.\nOpções de sensores por dia (TIPOS): Apenas hoje (none), Até amanhã (D+1), Até depois de amanhã (D+2; cria sensores D+1 e D+2).", "data": { "update_interval": "Intervalo de atualização (horas)", "language_code": "Código de idioma da resposta da API", @@ -51,14 +54,12 @@ } }, "error": { - "invalid_auth": "Chave da API inválida", - "cannot_connect": "Não é possível ligar ao serviço", - "quota_exceeded": "Quota excedida", - "invalid_language": "Código de idioma inválido", "invalid_language_format": "Use um código BCP-47 canónico, como \"en\" ou \"es-ES\".", "empty": "Este campo não pode estar vazio", "invalid_option_combo": "Aumente \"Dias de previsão\" para cobrir os sensores por dia selecionados.", - "unknown": "Erro desconhecido" + "unknown": "Erro desconhecido", + "invalid_update_interval": "O intervalo de atualização deve estar entre 1 e 24 horas.", + "invalid_forecast_days": "Os dias de previsão devem estar entre 1 e 5." } }, "device": { diff --git a/custom_components/pollenlevels/translations/ro.json b/custom_components/pollenlevels/translations/ro.json index a32c125a..ad78a630 100644 --- a/custom_components/pollenlevels/translations/ro.json +++ b/custom_components/pollenlevels/translations/ro.json @@ -3,13 +3,15 @@ "step": { "user": { "title": "Configurare Niveluri de Polen", - "description": "Introduceți cheia Google API, selectați locația pe hartă, intervalul de actualizare (ore) și codul de limbă pentru răspunsul API.", + "description": "Introdu cheia ta API Google ([obține-o aici]({api_key_url})) și consultă cele mai bune practici ([cele mai bune practici]({restricting_api_keys_url})). Selectează locația pe hartă, intervalul de actualizare (ore) și codul de limbă al răspunsului API. Poți seta și zilele de prognoză și domeniul senzorilor pe zile (TIPURI).", "data": { "api_key": "Cheie API", "name": "Nume", "location": "Locație", "update_interval": "Interval de actualizare (ore)", - "language_code": "Codul limbii pentru răspunsul API" + "language_code": "Codul limbii pentru răspunsul API", + "forecast_days": "Zile de prognoză (1–5)", + "create_forecast_sensors": "Domeniul senzorilor pe zile (TIPURI)" } }, "reauth_confirm": { @@ -21,15 +23,16 @@ } }, "error": { - "invalid_auth": "Cheie API nevalidă", - "cannot_connect": "Nu se poate conecta la serviciu", - "quota_exceeded": "Cota depășită", - "invalid_language": "Cod de limbă nevalid", + "invalid_auth": "Cheie API nevalidă\n\n{error_message}", + "cannot_connect": "Nu se poate conecta la serviciu\n\n{error_message}", + "quota_exceeded": "Cota depășită\n\n{error_message}", "invalid_language_format": "Folosiți un cod BCP-47 canonic, de exemplu \"en\" sau \"es-ES\".", "empty": "Acest câmp nu poate fi gol", "invalid_option_combo": "Măriți \"Zilele de prognoză\" pentru a acoperi senzorii selectați pe zile.", "invalid_coordinates": "Selectează o locație validă pe hartă.", - "unknown": "Eroare necunoscută" + "unknown": "Eroare necunoscută", + "invalid_update_interval": "Intervalul de actualizare trebuie să fie între 1 și 24 de ore.", + "invalid_forecast_days": "Zilele de prognoză trebuie să fie între 1 și 5." }, "abort": { "already_configured": "Această locație este deja configurată.", @@ -41,7 +44,7 @@ "step": { "init": { "title": "Pollen Levels – Opțiuni", - "description": "Modificați intervalul de actualizare, limba API, zilele de prognoză și senzorii pe zile pentru {title}.\nOpțiuni pentru senzorii pe zile (TIPURI): Doar azi (none), Până mâine (D+1), Până poimâine (D+2).", + "description": "Modificați intervalul de actualizare, limba API, zilele de prognoză și senzorii pe zile pentru {title}.\nOpțiuni pentru senzorii pe zile (TIPURI): Doar azi (none), Până mâine (D+1), Până poimâine (D+2; creează atât senzori D+1, cât și D+2).", "data": { "update_interval": "Interval de actualizare (ore)", "language_code": "Codul limbii pentru răspunsul API", @@ -51,14 +54,12 @@ } }, "error": { - "invalid_auth": "Cheie API nevalidă", - "cannot_connect": "Nu se poate conecta la serviciu", - "quota_exceeded": "Cota depășită", - "invalid_language": "Cod de limbă nevalid", "invalid_language_format": "Folosiți un cod BCP-47 canonic, de exemplu \"en\" sau \"es-ES\".", "empty": "Acest câmp nu poate fi gol", "invalid_option_combo": "Măriți \"Zilele de prognoză\" pentru a acoperi senzorii selectați pe zile.", - "unknown": "Eroare necunoscută" + "unknown": "Eroare necunoscută", + "invalid_update_interval": "Intervalul de actualizare trebuie să fie între 1 și 24 de ore.", + "invalid_forecast_days": "Zilele de prognoză trebuie să fie între 1 și 5." } }, "device": { diff --git a/custom_components/pollenlevels/translations/ru.json b/custom_components/pollenlevels/translations/ru.json index 1a42b05b..3f4332e1 100644 --- a/custom_components/pollenlevels/translations/ru.json +++ b/custom_components/pollenlevels/translations/ru.json @@ -3,13 +3,15 @@ "step": { "user": { "title": "Настройка уровней пыльцы", - "description": "Введите ваш ключ Google API, выберите свое местоположение на карте, интервал обновления (в часах) и код языка ответа API.", + "description": "Введите ключ Google API ([получите его здесь]({api_key_url})) и изучите рекомендации ([лучшие практики]({restricting_api_keys_url})). Выберите местоположение на карте, интервал обновления (часы) и языковой код ответа API. Вы также можете настроить дни прогноза и диапазон дневных датчиков (ТИПЫ).", "data": { "api_key": "Ключ API", "name": "Имя", "location": "Местоположение", "update_interval": "Интервал обновления (в часах)", - "language_code": "Код языка ответа API" + "language_code": "Код языка ответа API", + "forecast_days": "Дни прогноза (1–5)", + "create_forecast_sensors": "Диапазон дневных датчиков (ТИПЫ)" } }, "reauth_confirm": { @@ -21,15 +23,16 @@ } }, "error": { - "invalid_auth": "Неверный ключ API", - "cannot_connect": "Не удаётся подключиться к сервису", - "quota_exceeded": "Превышен лимит запросов", - "invalid_language": "Неверный код языка", + "invalid_auth": "Неверный ключ API\n\n{error_message}", + "cannot_connect": "Не удаётся подключиться к сервису\n\n{error_message}", + "quota_exceeded": "Превышен лимит запросов\n\n{error_message}", "invalid_language_format": "Используйте канонический код BCP-47, например \"en\" или \"es-ES\".", "empty": "Это поле не может быть пустым", "invalid_option_combo": "Увеличьте «Дни прогноза», чтобы охватить выбранные датчики по дням.", "invalid_coordinates": "Выберите корректное местоположение на карте.", - "unknown": "Неизвестная ошибка" + "unknown": "Неизвестная ошибка", + "invalid_update_interval": "Интервал обновления должен быть от 1 до 24 часов.", + "invalid_forecast_days": "Дни прогноза должны быть от 1 до 5." }, "abort": { "already_configured": "Это местоположение уже настроено.", @@ -41,7 +44,7 @@ "step": { "init": { "title": "Pollen Levels – Параметры", - "description": "Измените интервал обновления, язык ответа API, дни прогноза и дневные датчики для ТИПОВ для {title}.\nВарианты дневных датчиков (ТИПЫ): Только сегодня (none), До завтра (D+1), До послезавтра (D+2).", + "description": "Измените интервал обновления, язык ответа API, дни прогноза и дневные датчики для ТИПОВ для {title}.\nВарианты дневных датчиков (ТИПЫ): Только сегодня (none), До завтра (D+1), До послезавтра (D+2; создаёт датчики D+1 и D+2).", "data": { "update_interval": "Интервал обновления (в часах)", "language_code": "Код языка ответа API", @@ -51,14 +54,12 @@ } }, "error": { - "invalid_auth": "Неверный ключ API", - "cannot_connect": "Не удаётся подключиться к сервису", - "quota_exceeded": "Превышен лимит запросов", - "invalid_language": "Неверный код языка", "invalid_language_format": "Используйте канонический код BCP-47, например \"en\" или \"es-ES\".", "empty": "Это поле не может быть пустым", "invalid_option_combo": "Увеличьте «Дни прогноза», чтобы охватить выбранные датчики по дням.", - "unknown": "Неизвестная ошибка" + "unknown": "Неизвестная ошибка", + "invalid_update_interval": "Интервал обновления должен быть от 1 до 24 часов.", + "invalid_forecast_days": "Дни прогноза должны быть от 1 до 5." } }, "device": { diff --git a/custom_components/pollenlevels/translations/sv.json b/custom_components/pollenlevels/translations/sv.json index 57918c84..5d1868fd 100644 --- a/custom_components/pollenlevels/translations/sv.json +++ b/custom_components/pollenlevels/translations/sv.json @@ -3,13 +3,15 @@ "step": { "user": { "title": "Konfiguration av pollennivåer", - "description": "Ange din Google API-nyckel, välj din plats på kartan, uppdateringsintervall (timmar) och språkkod för API-svar.", + "description": "Ange din Google API-nyckel ([hämta den här]({api_key_url})) och läs bästa praxis ([bästa praxis]({restricting_api_keys_url})). Välj din plats på kartan, uppdateringsintervallet (timmar) och språkkoden för API-svaret. Du kan också ange prognosdagar och omfånget för sensorer per dag (TYPER).", "data": { "api_key": "API-nyckel", "name": "Namn", "location": "Plats", "update_interval": "Uppdateringsintervall (timmar)", - "language_code": "Språkkod för API-svar" + "language_code": "Språkkod för API-svar", + "forecast_days": "Prognosdagar (1–5)", + "create_forecast_sensors": "Omfång för sensorer per dag (TYPER)" } }, "reauth_confirm": { @@ -21,15 +23,16 @@ } }, "error": { - "invalid_auth": "Ogiltig API-nyckel", - "cannot_connect": "Kan inte ansluta till tjänsten", - "quota_exceeded": "Kvoten har överskridits", - "invalid_language": "Ogiltig språkkod", + "invalid_auth": "Ogiltig API-nyckel\n\n{error_message}", + "cannot_connect": "Kan inte ansluta till tjänsten\n\n{error_message}", + "quota_exceeded": "Kvoten har överskridits\n\n{error_message}", "invalid_language_format": "Använd en kanonisk BCP-47-kod som \"en\" eller \"es-ES\".", "empty": "Detta fält får inte vara tomt", "invalid_option_combo": "Öka \"Prognosdagar\" för att täcka valda sensorer per dag.", "invalid_coordinates": "Välj en giltig plats på kartan.", - "unknown": "Okänt fel" + "unknown": "Okänt fel", + "invalid_update_interval": "Uppdateringsintervallet måste vara mellan 1 och 24 timmar.", + "invalid_forecast_days": "Prognosdagar måste vara mellan 1 och 5." }, "abort": { "already_configured": "Den här platsen är redan konfigurerad.", @@ -41,7 +44,7 @@ "step": { "init": { "title": "Pollen Levels – Alternativ", - "description": "Ändra uppdateringsintervall, API-språk, prognosdagar och sensorer per dag för {title}.\nAlternativ för sensorer per dag (TYPER): Endast idag (none), Till och med i morgon (D+1), Till och med i övermorgon (D+2).", + "description": "Ändra uppdateringsintervall, API-språk, prognosdagar och sensorer per dag för {title}.\nAlternativ för sensorer per dag (TYPER): Endast idag (none), Till och med i morgon (D+1), Till och med i övermorgon (D+2; skapar både D+1- och D+2-sensorer).", "data": { "update_interval": "Uppdateringsintervall (timmar)", "language_code": "Språkkod för API-svar", @@ -51,14 +54,12 @@ } }, "error": { - "invalid_auth": "Ogiltig API-nyckel", - "cannot_connect": "Kan inte ansluta till tjänsten", - "quota_exceeded": "Kvoten har överskridits", - "invalid_language": "Ogiltig språkkod", "invalid_language_format": "Använd en kanonisk BCP-47-kod som \"en\" eller \"es-ES\".", "empty": "Detta fält får inte vara tomt", "invalid_option_combo": "Öka \"Prognosdagar\" för att täcka valda sensorer per dag.", - "unknown": "Okänt fel" + "unknown": "Okänt fel", + "invalid_update_interval": "Uppdateringsintervallet måste vara mellan 1 och 24 timmar.", + "invalid_forecast_days": "Prognosdagar måste vara mellan 1 och 5." } }, "device": { diff --git a/custom_components/pollenlevels/translations/uk.json b/custom_components/pollenlevels/translations/uk.json index 388f9bfa..0d90cba4 100644 --- a/custom_components/pollenlevels/translations/uk.json +++ b/custom_components/pollenlevels/translations/uk.json @@ -3,13 +3,15 @@ "step": { "user": { "title": "Налаштування рівнів пилку", - "description": "Введіть свій ключ Google API, виберіть місцезнаходження на карті, інтервал оновлення (у годинах) та код мови для відповіді API.", + "description": "Введіть свій ключ Google API ([отримайте його тут]({api_key_url})) та ознайомтеся з найкращими практиками ([найкращі практики]({restricting_api_keys_url})). Виберіть місце на карті, інтервал оновлення (години) і код мови відповіді API. Ви також можете налаштувати дні прогнозу та діапазон денних датчиків (ТИПИ).", "data": { "api_key": "Ключ API", "name": "Ім'я", "location": "Місцезнаходження", "update_interval": "Інтервал оновлення (у годинах)", - "language_code": "Код мови відповіді API" + "language_code": "Код мови відповіді API", + "forecast_days": "Дні прогнозу (1–5)", + "create_forecast_sensors": "Діапазон денних датчиків (ТИПИ)" } }, "reauth_confirm": { @@ -21,15 +23,16 @@ } }, "error": { - "invalid_auth": "Невірний ключ API", - "cannot_connect": "Не вдається підключитися до сервісу", - "quota_exceeded": "Перевищено ліміт запитів", - "invalid_language": "Невірний код мови", + "invalid_auth": "Невірний ключ API\n\n{error_message}", + "cannot_connect": "Не вдається підключитися до сервісу\n\n{error_message}", + "quota_exceeded": "Перевищено ліміт запитів\n\n{error_message}", "invalid_language_format": "Використовуйте канонічний код BCP-47, наприклад \"en\" або \"es-ES\".", "empty": "Це поле не може бути порожнім", "invalid_option_combo": "Збільшіть «Дні прогнозу», щоб охопити вибрані денні датчики.", "invalid_coordinates": "Виберіть дійсне місце на карті.", - "unknown": "Невідома помилка" + "unknown": "Невідома помилка", + "invalid_update_interval": "Інтервал оновлення має бути між 1 і 24 годинами.", + "invalid_forecast_days": "Дні прогнозу мають бути від 1 до 5." }, "abort": { "already_configured": "Це розташування вже налаштовано.", @@ -41,7 +44,7 @@ "step": { "init": { "title": "Pollen Levels – Параметри", - "description": "Змініть інтервал оновлення, мову відповіді API, кількість днів прогнозу та денні датчики для ТИПІВ для {title}.\nПараметри денних датчиків (ТИПИ): Лише сьогодні (none), До завтра (D+1), До післязавтра (D+2).", + "description": "Змініть інтервал оновлення, мову відповіді API, кількість днів прогнозу та денні датчики для ТИПІВ для {title}.\nПараметри денних датчиків (ТИПИ): Лише сьогодні (none), До завтра (D+1), До післязавтра (D+2; створює датчики D+1 і D+2).", "data": { "update_interval": "Інтервал оновлення (у годинах)", "language_code": "Код мови відповіді API", @@ -51,14 +54,12 @@ } }, "error": { - "invalid_auth": "Невірний ключ API", - "cannot_connect": "Не вдається підключитися до сервісу", - "quota_exceeded": "Перевищено ліміт запитів", - "invalid_language": "Невірний код мови", "invalid_language_format": "Використовуйте канонічний код BCP-47, наприклад \"en\" або \"es-ES\".", "empty": "Це поле не може бути порожнім", "invalid_option_combo": "Збільшіть «Дні прогнозу», щоб охопити вибрані денні датчики.", - "unknown": "Невідома помилка" + "unknown": "Невідома помилка", + "invalid_update_interval": "Інтервал оновлення має бути між 1 і 24 годинами.", + "invalid_forecast_days": "Дні прогнозу мають бути від 1 до 5." } }, "device": { diff --git a/custom_components/pollenlevels/translations/zh-Hans.json b/custom_components/pollenlevels/translations/zh-Hans.json index 1f200637..e639bff4 100644 --- a/custom_components/pollenlevels/translations/zh-Hans.json +++ b/custom_components/pollenlevels/translations/zh-Hans.json @@ -3,13 +3,15 @@ "step": { "user": { "title": "花粉水平配置", - "description": "请输入 Google API 密钥、在地图上选择位置、更新间隔(小时)以及 API 响应的语言代码。", + "description": "输入你的 Google API 密钥([在此获取]({api_key_url}))并查看最佳实践([最佳实践]({restricting_api_keys_url}))。在地图上选择位置、更新间隔(小时)以及 API 响应的语言代码。你还可以设置预测天数以及逐日类型传感器的范围。", "data": { "api_key": "API 密钥", "name": "名称", "location": "位置", "update_interval": "更新间隔(小时)", - "language_code": "API 响应语言代码" + "language_code": "API 响应语言代码", + "forecast_days": "预测天数(1–5)", + "create_forecast_sensors": "逐日类型传感器范围" } }, "reauth_confirm": { @@ -21,15 +23,16 @@ } }, "error": { - "invalid_auth": "无效的 API 密钥", - "cannot_connect": "无法连接到服务", - "quota_exceeded": "配额已用尽", - "invalid_language": "无效的语言代码", + "invalid_auth": "无效的 API 密钥\n\n{error_message}", + "cannot_connect": "无法连接到服务\n\n{error_message}", + "quota_exceeded": "配额已用尽\n\n{error_message}", "invalid_language_format": "请使用规范的 BCP-47 代码,例如 \"en\" 或 \"es-ES\"。", "empty": "此字段不能为空", "invalid_option_combo": "请增加“预测天数”,以覆盖所选的逐日类型传感器。", "invalid_coordinates": "请在地图上选择有效的位置。", - "unknown": "未知错误" + "unknown": "未知错误", + "invalid_update_interval": "更新间隔必须在 1 到 24 小时之间。", + "invalid_forecast_days": "预测天数必须在 1 到 5 之间。" }, "abort": { "already_configured": "该位置已配置。", @@ -41,7 +44,7 @@ "step": { "init": { "title": "Pollen Levels – 选项", - "description": "修改更新间隔、API 语言、预测天以及逐日类型传感器,适用于 {title}。\n逐日类型传感器选项:仅今日(none)、至明日(D+1)、至后日(D+2)。", + "description": "修改更新间隔、API 语言、预测天以及逐日类型传感器,适用于 {title}。\n逐日类型传感器选项:仅今日(none)、至明日(D+1)、至后日(D+2;会同时创建 D+1 和 D+2 传感器)。", "data": { "update_interval": "更新间隔(小时)", "language_code": "API 响应语言代码", @@ -51,14 +54,12 @@ } }, "error": { - "invalid_auth": "无效的 API 密钥", - "cannot_connect": "无法连接到服务", - "quota_exceeded": "配额已用尽", - "invalid_language": "无效的语言代码", "invalid_language_format": "请使用规范的 BCP-47 代码,例如 \"en\" 或 \"es-ES\"。", "empty": "此字段不能为空", "invalid_option_combo": "请增加“预测天数”,以覆盖所选的逐日类型传感器。", - "unknown": "未知错误" + "unknown": "未知错误", + "invalid_update_interval": "更新间隔必须在 1 到 24 小时之间。", + "invalid_forecast_days": "预测天数必须在 1 到 5 之间。" } }, "device": { diff --git a/custom_components/pollenlevels/translations/zh-Hant.json b/custom_components/pollenlevels/translations/zh-Hant.json index 0c4c4e3d..10f54599 100644 --- a/custom_components/pollenlevels/translations/zh-Hant.json +++ b/custom_components/pollenlevels/translations/zh-Hant.json @@ -3,13 +3,15 @@ "step": { "user": { "title": "花粉水平設定", - "description": "請輸入 Google API 金鑰、在地圖上選擇位置、更新間隔(小時)以及 API 回應的語言代碼。", + "description": "輸入你的 Google API 金鑰([在此取得]({api_key_url}))並查看最佳實務([最佳實務]({restricting_api_keys_url}))。在地圖上選擇位置、更新間隔(小時)以及 API 回應的語言代碼。你也可以設定預測天數與逐日類型感測器的範圍。", "data": { "api_key": "API 金鑰", "name": "名稱", "location": "位置", "update_interval": "更新間隔(小時)", - "language_code": "API 回應語言代碼" + "language_code": "API 回應語言代碼", + "forecast_days": "預測天數(1–5)", + "create_forecast_sensors": "逐日類型感測器範圍" } }, "reauth_confirm": { @@ -21,15 +23,16 @@ } }, "error": { - "invalid_auth": "無效的 API 金鑰", - "cannot_connect": "無法連線到服務", - "quota_exceeded": "超出配額", - "invalid_language": "無效的語言代碼", + "invalid_auth": "無效的 API 金鑰\n\n{error_message}", + "cannot_connect": "無法連線到服務\n\n{error_message}", + "quota_exceeded": "超出配額\n\n{error_message}", "invalid_language_format": "請使用標準的 BCP-47 代碼,例如 \"en\" 或 \"es-ES\"。", "empty": "此欄位不得為空", "invalid_option_combo": "請增加「預測天數」以涵蓋所選的逐日類型感測器。", "invalid_coordinates": "請在地圖上選擇有效的位置。", - "unknown": "未知錯誤" + "unknown": "未知錯誤", + "invalid_update_interval": "更新間隔必須在 1 到 24 小時之間。", + "invalid_forecast_days": "預測天數必須在 1 到 5 之間。" }, "abort": { "already_configured": "此位置已設定。", @@ -41,7 +44,7 @@ "step": { "init": { "title": "Pollen Levels – 選項", - "description": "修改更新間隔、API 語言、預測天數與逐日類型感測器,適用於 {title}。\n逐日類型感測器選項:僅今日(none)、至明日(D+1)、至後日(D+2)。", + "description": "修改更新間隔、API 語言、預測天數與逐日類型感測器,適用於 {title}。\n逐日類型感測器選項:僅今日(none)、至明日(D+1)、至後日(D+2;會同時建立 D+1 與 D+2 感測器)。", "data": { "update_interval": "更新間隔(小時)", "language_code": "API 回應語言代碼", @@ -51,14 +54,12 @@ } }, "error": { - "invalid_auth": "無效的 API 金鑰", - "cannot_connect": "無法連線到服務", - "quota_exceeded": "超出配額", - "invalid_language": "無效的語言代碼", "invalid_language_format": "請使用標準的 BCP-47 代碼,例如 \"en\" 或 \"es-ES\"。", "empty": "此欄位不得為空", "invalid_option_combo": "請增加「預測天數」以涵蓋所選的逐日類型感測器。", - "unknown": "未知錯誤" + "unknown": "未知錯誤", + "invalid_update_interval": "更新間隔必須在 1 到 24 小時之間。", + "invalid_forecast_days": "預測天數必須在 1 到 5 之間。" } }, "device": { diff --git a/custom_components/pollenlevels/util.py b/custom_components/pollenlevels/util.py index 0529ef28..d99218b3 100644 --- a/custom_components/pollenlevels/util.py +++ b/custom_components/pollenlevels/util.py @@ -2,6 +2,53 @@ from __future__ import annotations +import logging +import math +from typing import TYPE_CHECKING, Any + +from .const import FORECAST_SENSORS_CHOICES + +if TYPE_CHECKING: # pragma: no cover - typing-only import + from aiohttp import ClientResponse +else: # pragma: no cover - runtime fallback for test environments without aiohttp + ClientResponse = Any + + +async def extract_error_message(resp: ClientResponse, default: str = "") -> str: + """Extract and normalize an HTTP error message without secrets.""" + + message: str | None = None + try: + try: + json_obj = await resp.json(content_type=None) + except TypeError: + json_obj = await resp.json() + if isinstance(json_obj, dict): + error = json_obj.get("error") + if isinstance(error, dict): + raw_msg = error.get("message") + if isinstance(raw_msg, str): + message = raw_msg + except Exception: # noqa: BLE001 + message = None + + if not message: + try: + text = await resp.text() + if isinstance(text, str): + message = text + except Exception: # noqa: BLE001 + message = None + + normalized = " ".join( + (message or "").replace("\r", " ").replace("\n", " ").split() + ).strip() + + if len(normalized) > 300: + normalized = normalized[:300] + + return normalized or default + def redact_api_key(text: object, api_key: str | None) -> str: """Return a string representation of *text* with the API key redacted.""" @@ -22,7 +69,53 @@ def redact_api_key(text: object, api_key: str | None) -> str: return s +def normalize_sensor_mode(mode: Any, logger: logging.Logger) -> str: + """Normalize sensor mode, defaulting and logging a warning if invalid.""" + raw_mode = getattr(mode, "value", mode) + mode_str = None if raw_mode is None else str(raw_mode).strip() + if not mode_str: + mode_str = None + if mode_str in FORECAST_SENSORS_CHOICES: + return mode_str + + if "none" in FORECAST_SENSORS_CHOICES: + default_mode = "none" + else: + default_mode = ( + FORECAST_SENSORS_CHOICES[0] if FORECAST_SENSORS_CHOICES else "none" + ) + if mode_str is not None: + logger.warning( + "Invalid stored per-day sensor mode '%s'; defaulting to '%s'", + mode_str, + default_mode, + ) + return default_mode + + +def safe_parse_int(value: Any) -> int | None: + """Parse an integer-like value, rejecting non-finite and decimal numbers.""" + if value is None or isinstance(value, bool): + return None + + try: + parsed_float = float(value) + except (TypeError, ValueError, OverflowError): + return None + + if not math.isfinite(parsed_float) or not parsed_float.is_integer(): + return None + + return int(parsed_float) + + # Backwards-compatible alias for modules that still import the private helper name. _redact_api_key = redact_api_key -__all__ = ["redact_api_key", "_redact_api_key"] +__all__ = [ + "extract_error_message", + "normalize_sensor_mode", + "redact_api_key", + "safe_parse_int", + "_redact_api_key", +] diff --git a/pyproject.toml b/pyproject.toml index fc1f494a..c29fb16d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ [project] name = "pollenlevels" -version = "1.8.6" +version = "1.9.3" # Enforce the runtime floor aligned with upcoming HA Python 3.14 images. requires-python = ">=3.14" @@ -35,6 +35,12 @@ ignore = [ known-first-party = ["custom_components.pollenlevels"] combine-as-imports = true +[tool.pytest.ini_options] +# Use importlib mode to avoid test module name collisions in environments +# where unrelated packages (or plugins) ship a top-level "tests" package. +addopts = ["--import-mode=importlib"] +testpaths = ["tests"] + # --- Optional hardening (uncomment if needed) --- # [tool.ruff] # force-exclude = true diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..ae4307a2 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,6 @@ +"""Test package marker. + +This file ensures that imports like `import tests.test_config_flow` resolve to the +repository's local `tests` package instead of an unrelated third-party package +named `tests` that may be present in site-packages. +""" diff --git a/tests/conftest.py b/tests/conftest.py index 0ceccb1c..6611eb5d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -3,6 +3,7 @@ from __future__ import annotations import asyncio +import inspect import pytest @@ -20,7 +21,7 @@ def pytest_pyfunc_call(pyfuncitem: pytest.Function) -> bool | None: """Run @pytest.mark.asyncio tests locally when no other async plugin is active.""" marker = pyfuncitem.get_closest_marker("asyncio") - if marker is None or not asyncio.iscoroutinefunction(pyfuncitem.obj): + if marker is None or not inspect.iscoroutinefunction(pyfuncitem.obj): return None # If another asyncio-aware plugin is active, let it handle the test. diff --git a/tests/test_config_flow.py b/tests/test_config_flow.py index fd2b89dd..af1f78dc 100644 --- a/tests/test_config_flow.py +++ b/tests/test_config_flow.py @@ -15,19 +15,34 @@ ROOT = Path(__file__).resolve().parents[1] sys.path.insert(0, str(ROOT)) + +def _force_module(name: str, module: ModuleType) -> None: + """Force a module into sys.modules. + + Tests in this repository are designed to run without Home Assistant installed. + In some developer environments, other pytest plugins or pre-imports may have + already inserted modules like `custom_components` or `homeassistant`. + + Using `setdefault()` can then silently keep the pre-existing module, which + may not match the lightweight stubs expected by these tests. + """ + + sys.modules[name] = module + + # --------------------------------------------------------------------------- # Minimal package and dependency stubs so the config flow can be imported. # --------------------------------------------------------------------------- custom_components_pkg = ModuleType("custom_components") custom_components_pkg.__path__ = [str(ROOT / "custom_components")] -sys.modules.setdefault("custom_components", custom_components_pkg) +_force_module("custom_components", custom_components_pkg) pollenlevels_pkg = ModuleType("custom_components.pollenlevels") pollenlevels_pkg.__path__ = [str(ROOT / "custom_components" / "pollenlevels")] -sys.modules.setdefault("custom_components.pollenlevels", pollenlevels_pkg) +_force_module("custom_components.pollenlevels", pollenlevels_pkg) ha_mod = ModuleType("homeassistant") -sys.modules.setdefault("homeassistant", ha_mod) +_force_module("homeassistant", ha_mod) config_entries_mod = ModuleType("homeassistant.config_entries") @@ -51,6 +66,9 @@ def async_show_form(self, *args, **kwargs): # pragma: no cover - not used def async_create_entry(self, *args, **kwargs): # pragma: no cover - not used return {"title": kwargs.get("title"), "data": kwargs.get("data")} + def add_suggested_values_to_schema(self, schema, suggested_values): + return schema + class _StubOptionsFlow: pass @@ -68,17 +86,17 @@ def __init__(self, data=None, options=None, entry_id="stub-entry"): config_entries_mod.ConfigFlow = _StubConfigFlow config_entries_mod.OptionsFlow = _StubOptionsFlow config_entries_mod.ConfigEntry = _StubConfigEntry -sys.modules.setdefault("homeassistant.config_entries", config_entries_mod) +_force_module("homeassistant.config_entries", config_entries_mod) const_mod = ModuleType("homeassistant.const") const_mod.CONF_LATITUDE = "latitude" const_mod.CONF_LOCATION = "location" const_mod.CONF_LONGITUDE = "longitude" const_mod.CONF_NAME = "name" -sys.modules.setdefault("homeassistant.const", const_mod) +_force_module("homeassistant.const", const_mod) helpers_mod = ModuleType("homeassistant.helpers") -sys.modules.setdefault("homeassistant.helpers", helpers_mod) +_force_module("homeassistant.helpers", helpers_mod) config_validation_mod = ModuleType("homeassistant.helpers.config_validation") @@ -110,7 +128,7 @@ def _longitude(value=None): config_validation_mod.latitude = _latitude config_validation_mod.longitude = _longitude config_validation_mod.string = lambda value=None: value -sys.modules.setdefault("homeassistant.helpers.config_validation", config_validation_mod) +_force_module("homeassistant.helpers.config_validation", config_validation_mod) aiohttp_client_mod = ModuleType("homeassistant.helpers.aiohttp_client") @@ -152,7 +170,7 @@ def get(self, *args, **kwargs) -> _StubResponse: aiohttp_client_mod.async_get_clientsession = lambda hass: _StubSession() -sys.modules.setdefault("homeassistant.helpers.aiohttp_client", aiohttp_client_mod) +_force_module("homeassistant.helpers.aiohttp_client", aiohttp_client_mod) selector_mod = ModuleType("homeassistant.helpers.selector") @@ -167,9 +185,75 @@ def __init__(self, config: _LocationSelectorConfig): self.config = config +class _NumberSelectorConfig: + def __init__( + self, + *, + min: float | None = None, + max: float | None = None, + step: float | None = None, + mode: str | None = None, + unit_of_measurement: str | None = None, + ) -> None: + self.min = min + self.max = max + self.step = step + self.mode = mode + self.unit_of_measurement = unit_of_measurement + + +class _NumberSelectorMode: + BOX = "BOX" + + +class _NumberSelector: + def __init__(self, config: _NumberSelectorConfig): + self.config = config + + +class _TextSelectorConfig: + def __init__(self, *, type: str | None = None): # noqa: A003 + self.type = type + + +class _TextSelectorType: + TEXT = "TEXT" + PASSWORD = "PASSWORD" + + +class _TextSelector: + def __init__(self, config: _TextSelectorConfig): + self.config = config + + +class _SelectSelectorConfig: + def __init__(self, *, mode: str | None = None, options=None): + self.mode = mode + self.options = options + + +class _SelectSelectorMode: + DROPDOWN = "DROPDOWN" + + +class _SelectSelector: + def __init__(self, config: _SelectSelectorConfig): + self.config = config + + selector_mod.LocationSelector = _LocationSelector selector_mod.LocationSelectorConfig = _LocationSelectorConfig -sys.modules.setdefault("homeassistant.helpers.selector", selector_mod) +selector_mod.NumberSelector = _NumberSelector +selector_mod.NumberSelectorConfig = _NumberSelectorConfig +selector_mod.NumberSelectorMode = _NumberSelectorMode +selector_mod.TextSelector = _TextSelector +selector_mod.TextSelectorConfig = _TextSelectorConfig +selector_mod.TextSelectorType = _TextSelectorType +selector_mod.SelectSelector = _SelectSelector +selector_mod.SelectSelectorConfig = _SelectSelectorConfig +selector_mod.SelectSelectorMode = _SelectSelectorMode +selector_mod.section = lambda key: key +_force_module("homeassistant.helpers.selector", selector_mod) ha_mod.helpers = helpers_mod ha_mod.config_entries = config_entries_mod @@ -188,7 +272,7 @@ def __init__(self, *, total: float | int): aiohttp_mod.ClientError = _StubClientError aiohttp_mod.ClientTimeout = _StubClientTimeout -sys.modules.setdefault("aiohttp", aiohttp_mod) +_force_module("aiohttp", aiohttp_mod) vol_mod = ModuleType("voluptuous") @@ -199,15 +283,20 @@ def __init__(self, error_message=""): self.error_message = error_message +class _StubSchema: + def __init__(self, schema): + self.schema = schema + + vol_mod.Invalid = _StubInvalid -vol_mod.Schema = lambda *args, **kwargs: None -vol_mod.Optional = lambda *args, **kwargs: None -vol_mod.Required = lambda *args, **kwargs: None +vol_mod.Schema = lambda schema, **kwargs: _StubSchema(schema) +vol_mod.Optional = lambda key, **kwargs: key +vol_mod.Required = lambda key, **kwargs: key vol_mod.All = lambda *args, **kwargs: None vol_mod.Coerce = lambda *args, **kwargs: None vol_mod.Range = lambda *args, **kwargs: None vol_mod.In = lambda *args, **kwargs: None -sys.modules.setdefault("voluptuous", vol_mod) +_force_module("voluptuous", vol_mod) from homeassistant.const import ( CONF_LATITUDE, @@ -223,9 +312,17 @@ def __init__(self, error_message=""): ) from custom_components.pollenlevels.const import ( CONF_API_KEY, + CONF_CREATE_FORECAST_SENSORS, + CONF_FORECAST_DAYS, CONF_LANGUAGE_CODE, CONF_UPDATE_INTERVAL, DEFAULT_ENTRY_TITLE, + DEFAULT_FORECAST_DAYS, + DEFAULT_UPDATE_INTERVAL, + FORECAST_SENSORS_CHOICES, + MAX_FORECAST_DAYS, + MAX_UPDATE_INTERVAL_HOURS, + MIN_FORECAST_DAYS, ) @@ -243,6 +340,14 @@ async def __aexit__(self, exc_type, exc, tb): # pragma: no cover - trivial async def read(self) -> bytes: return self._body + async def json(self): + import json as _json + + return _json.loads(self._body.decode()) + + async def text(self) -> str: + return self._body.decode() + class _SequenceSession: def __init__(self, responses: list[_StubResponse]) -> None: @@ -335,6 +440,36 @@ def test_validate_input_invalid_language_key_mapping() -> None: assert normalized is None +def test_validate_input_empty_api_key(monkeypatch: pytest.MonkeyPatch) -> None: + """Blank or whitespace API keys should be rejected without HTTP calls.""" + + flow = PollenLevelsConfigFlow() + flow.hass = SimpleNamespace() + + session_called = False + + def _raise_session(hass): + nonlocal session_called + session_called = True + raise AssertionError("async_get_clientsession should not be called") + + monkeypatch.setattr(cf, "async_get_clientsession", _raise_session) + + errors, normalized = asyncio.run( + flow._async_validate_input( + { + CONF_API_KEY: " ", + CONF_LOCATION: {CONF_LATITUDE: 1.0, CONF_LONGITUDE: 2.0}, + }, + check_unique_id=False, + ) + ) + + assert errors == {CONF_API_KEY: "empty"} + assert normalized is None + assert session_called is False + + def test_language_error_to_form_key_mapping() -> None: """voluptuous error messages map to localized form keys.""" @@ -433,12 +568,234 @@ def _base_user_input() -> dict: } -def test_validate_input_http_403_sets_invalid_auth( +@pytest.mark.parametrize( + ("raw_value", "expected"), + [ + ("not-a-number", DEFAULT_UPDATE_INTERVAL), + (0, 1), + (999, MAX_UPDATE_INTERVAL_HOURS), + ], +) +def test_setup_schema_update_interval_default_is_sanitized( + monkeypatch: pytest.MonkeyPatch, + raw_value: object, + expected: int, +) -> None: + """Update interval defaults should be sanitized for form rendering.""" + + captured_defaults: list[int | None] = [] + + def _capture_optional(key, **kwargs): + if key == CONF_UPDATE_INTERVAL: + captured_defaults.append(kwargs.get("default")) + return key + + monkeypatch.setattr(cf.vol, "Optional", _capture_optional) + + hass = SimpleNamespace( + config=SimpleNamespace(latitude=1.0, longitude=2.0, language="en") + ) + cf._build_step_user_schema(hass, {CONF_UPDATE_INTERVAL: raw_value}) + + assert captured_defaults == [expected] + + +@pytest.mark.parametrize( + ("raw_value", "expected"), + [ + ("999", str(MAX_FORECAST_DAYS)), + (-5, str(MIN_FORECAST_DAYS)), + ("abc", str(DEFAULT_FORECAST_DAYS)), + ], +) +def test_setup_schema_forecast_days_default_is_sanitized( + monkeypatch: pytest.MonkeyPatch, + raw_value: object, + expected: str, +) -> None: + """Forecast days defaults should be sanitized for form rendering.""" + + captured_defaults: list[str | None] = [] + + def _capture_optional(key, **kwargs): + if key == CONF_FORECAST_DAYS: + captured_defaults.append(kwargs.get("default")) + return key + + monkeypatch.setattr(cf.vol, "Optional", _capture_optional) + + hass = SimpleNamespace( + config=SimpleNamespace(latitude=1.0, longitude=2.0, language="en") + ) + cf._build_step_user_schema(hass, {CONF_FORECAST_DAYS: raw_value}) + + assert captured_defaults == [expected] + + +def test_setup_schema_sensor_mode_default_is_sanitized( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Per-day sensor defaults should fall back to a valid selector choice.""" + + captured_defaults: list[str | None] = [] + + def _capture_optional(key, **kwargs): + if key == CONF_CREATE_FORECAST_SENSORS: + captured_defaults.append(kwargs.get("default")) + return key + + monkeypatch.setattr(cf.vol, "Optional", _capture_optional) + + hass = SimpleNamespace( + config=SimpleNamespace(latitude=1.0, longitude=2.0, language="en") + ) + cf._build_step_user_schema(hass, {CONF_CREATE_FORECAST_SENSORS: "bad"}) + + assert captured_defaults == [FORECAST_SENSORS_CHOICES[0]] + + +def test_step_user_schema_masks_api_key_field() -> None: + """Initial setup form should render API key as a password selector.""" + + hass = SimpleNamespace( + config=SimpleNamespace(latitude=1.0, longitude=2.0, language="en") + ) + + schema = cf._build_step_user_schema(hass, {}) + api_selector = schema.schema[CONF_API_KEY] + + assert isinstance(api_selector, cf.TextSelector) + assert api_selector.config.type == cf.TextSelectorType.PASSWORD + + +def test_reauth_confirm_schema_masks_api_key_and_uses_blank_default( monkeypatch: pytest.MonkeyPatch, ) -> None: - """HTTP 403 during validation should map to invalid_auth.""" + """Reauth form should mask API key input and avoid prefilling secrets.""" + + captured_default: dict[str, object] = {} + orig_required = cf.vol.Required - session = _patch_client_session(monkeypatch, _StubResponse(403)) + def _capture_required(key, **kwargs): + if key == CONF_API_KEY: + captured_default["api_key"] = kwargs.get("default") + return orig_required(key, **kwargs) + + monkeypatch.setattr(cf.vol, "Required", _capture_required) + + entry = cf.config_entries.ConfigEntry( + data={ + CONF_API_KEY: "old-key", + CONF_LATITUDE: 1.0, + CONF_LONGITUDE: 2.0, + }, + entry_id="entry-id", + ) + + flow = PollenLevelsConfigFlow() + flow.hass = SimpleNamespace(config_entries=SimpleNamespace()) + flow.context = {"entry_id": "entry-id"} + flow._reauth_entry = entry + + captured: dict[str, object] = {} + + def _capture_show_form(*, step_id=None, data_schema=None, **kwargs): + captured["step_id"] = step_id + captured["schema"] = data_schema + return {"step_id": step_id} + + flow.async_show_form = _capture_show_form # type: ignore[method-assign] + + result = asyncio.run(flow.async_step_reauth_confirm()) + + assert result == {"step_id": "reauth_confirm"} + assert captured_default["api_key"] == "" + schema = captured["schema"] + assert hasattr(schema, "schema") + api_selector = schema.schema[CONF_API_KEY] + assert isinstance(api_selector, cf.TextSelector) + assert api_selector.config.type == cf.TextSelectorType.PASSWORD + + +def test_validate_input_update_interval_below_min_sets_error( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Sub-1 update intervals should surface a field error and skip I/O.""" + + session = _patch_client_session(monkeypatch, _StubResponse(200)) + + flow = PollenLevelsConfigFlow() + flow.hass = SimpleNamespace() + + user_input = {**_base_user_input(), CONF_UPDATE_INTERVAL: 0} + + errors, normalized = asyncio.run( + flow._async_validate_input(user_input, check_unique_id=False) + ) + + assert errors == {CONF_UPDATE_INTERVAL: "invalid_update_interval"} + assert normalized is None + assert not session.calls + + +def test_validate_input_update_interval_float_string( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Float-like strings should coerce to int and allow validation to proceed.""" + + session = _patch_client_session( + monkeypatch, _StubResponse(200, b'{"dailyInfo": [{"indexInfo": []}]}') + ) + + flow = PollenLevelsConfigFlow() + flow.hass = SimpleNamespace() + + user_input = {**_base_user_input(), CONF_UPDATE_INTERVAL: "1.0"} + + errors, normalized = asyncio.run( + flow._async_validate_input(user_input, check_unique_id=False) + ) + + assert errors == {} + assert normalized is not None + assert normalized[CONF_UPDATE_INTERVAL] == 1 + assert session.calls + + +def test_validate_input_update_interval_non_numeric_sets_error( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Non-numeric update intervals should surface a field error and skip I/O.""" + + session = _patch_client_session(monkeypatch, _StubResponse(200)) + + flow = PollenLevelsConfigFlow() + flow.hass = SimpleNamespace() + + user_input = {**_base_user_input(), CONF_UPDATE_INTERVAL: "abc"} + + errors, normalized = asyncio.run( + flow._async_validate_input(user_input, check_unique_id=False) + ) + + assert errors == {CONF_UPDATE_INTERVAL: "invalid_update_interval"} + assert normalized is None + assert not session.calls + + +@pytest.mark.parametrize( + ("status", "expected"), + [ + (401, {"base": "invalid_auth"}), + (403, {"base": "cannot_connect"}), + ], +) +def test_validate_input_http_auth_errors_map_correctly( + monkeypatch: pytest.MonkeyPatch, status: int, expected: dict +) -> None: + """HTTP auth failures during validation should map correctly.""" + + session = _patch_client_session(monkeypatch, _StubResponse(status)) flow = PollenLevelsConfigFlow() flow.hass = SimpleNamespace() @@ -448,7 +805,7 @@ def test_validate_input_http_403_sets_invalid_auth( ) assert session.calls - assert errors == {"base": "invalid_auth"} + assert errors == expected assert normalized is None @@ -515,6 +872,211 @@ def test_validate_input_http_500_sets_error_message_placeholder( assert placeholders.get("error_message") +def test_validate_input_clears_error_message_placeholder_on_validation_error( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Field-level validation errors should clear stale error_message placeholders.""" + + session = _patch_client_session(monkeypatch, _StubResponse(500)) + + flow = PollenLevelsConfigFlow() + flow.hass = SimpleNamespace() + placeholders: dict[str, str] = {} + + errors, normalized = asyncio.run( + flow._async_validate_input( + _base_user_input(), + check_unique_id=False, + description_placeholders=placeholders, + ) + ) + + assert session.calls + assert errors == {"base": "cannot_connect"} + assert normalized is None + assert placeholders.get("error_message") + + errors, normalized = asyncio.run( + flow._async_validate_input( + {**_base_user_input(), CONF_LANGUAGE_CODE: "bad code"}, + check_unique_id=False, + description_placeholders=placeholders, + ) + ) + + assert errors == {CONF_LANGUAGE_CODE: "invalid_language_format"} + assert normalized is None + assert "error_message" not in placeholders + + +def test_validate_input_invalid_option_combo_clears_error_message_placeholder( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """invalid_option_combo should clear stale error_message placeholders.""" + + session = _patch_client_session(monkeypatch, _StubResponse(500)) + + flow = PollenLevelsConfigFlow() + flow.hass = SimpleNamespace() + placeholders: dict[str, str] = {} + + errors, normalized = asyncio.run( + flow._async_validate_input( + _base_user_input(), + check_unique_id=False, + description_placeholders=placeholders, + ) + ) + + assert session.calls + assert errors == {"base": "cannot_connect"} + assert normalized is None + assert placeholders.get("error_message") + + _patch_client_session( + monkeypatch, _StubResponse(200, b'{"dailyInfo": [{"day": "D0"}]}') + ) + + errors, normalized = asyncio.run( + flow._async_validate_input( + { + **_base_user_input(), + CONF_FORECAST_DAYS: 1, + CONF_CREATE_FORECAST_SENSORS: "D+1", + }, + check_unique_id=False, + description_placeholders=placeholders, + ) + ) + + assert errors == {CONF_CREATE_FORECAST_SENSORS: "invalid_option_combo"} + assert normalized is None + assert "error_message" not in placeholders + + +def test_validate_input_http_403_sets_error_message_placeholder( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """HTTP 403 should populate the cannot_connect error_message placeholder.""" + + body = b'{"error": {"message": "Forbidden for this project"}}' + session = _patch_client_session(monkeypatch, _StubResponse(status=403, body=body)) + + flow = PollenLevelsConfigFlow() + flow.hass = SimpleNamespace() + placeholders: dict[str, str] = {} + + errors, normalized = asyncio.run( + flow._async_validate_input( + _base_user_input(), + check_unique_id=False, + description_placeholders=placeholders, + ) + ) + + assert session.calls + assert errors == {"base": "cannot_connect"} + assert normalized is None + assert "Forbidden" in placeholders.get("error_message", "") + + +def test_validate_input_http_403_invalid_key_maps_to_invalid_auth( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """HTTP 403 invalid API key messages should behave like invalid_auth.""" + + body = b'{"error": {"message": "API key not valid. Please pass a valid API key."}}' + session = _patch_client_session(monkeypatch, _StubResponse(status=403, body=body)) + + flow = PollenLevelsConfigFlow() + flow.hass = SimpleNamespace() + placeholders: dict[str, str] = {} + + errors, normalized = asyncio.run( + flow._async_validate_input( + _base_user_input(), + check_unique_id=False, + description_placeholders=placeholders, + ) + ) + + assert session.calls + assert errors == {"base": "invalid_auth"} + assert normalized is None + assert "api key not valid" in placeholders.get("error_message", "").lower() + + +def test_validate_input_redacts_api_key_in_error_message( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Error placeholders should redact API keys returned by the service.""" + + body = b'{"error": {"message": "API key test-key not valid"}}' + session = _patch_client_session(monkeypatch, _StubResponse(status=401, body=body)) + + flow = PollenLevelsConfigFlow() + flow.hass = SimpleNamespace() + placeholders: dict[str, str] = {} + + user_input = _base_user_input() + user_input[cf.CONF_API_KEY] = "test-key" + + errors, normalized = asyncio.run( + flow._async_validate_input( + user_input, + check_unique_id=False, + description_placeholders=placeholders, + ) + ) + + assert session.calls + assert errors == {"base": "invalid_auth"} + assert normalized is None + error_message = placeholders.get("error_message", "") + assert "test-key" not in error_message + assert "***" in error_message + + +def test_validate_input_http_200_non_list_dailyinfo_sets_cannot_connect( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """A non-list dailyInfo in HTTP 200 should be treated as invalid.""" + + body = b'{"dailyInfo": "invalid"}' + session = _patch_client_session(monkeypatch, _StubResponse(status=200, body=body)) + + flow = PollenLevelsConfigFlow() + flow.hass = SimpleNamespace() + + errors, normalized = asyncio.run( + flow._async_validate_input(_base_user_input(), check_unique_id=False) + ) + + assert session.calls + assert errors == {"base": "cannot_connect"} + assert normalized is None + + +def test_validate_input_http_200_dailyinfo_with_non_dict_sets_cannot_connect( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """A dailyInfo list with non-dict items should be treated as invalid.""" + + body = b'{"dailyInfo": ["invalid-item"]}' + session = _patch_client_session(monkeypatch, _StubResponse(status=200, body=body)) + + flow = PollenLevelsConfigFlow() + flow.hass = SimpleNamespace() + + errors, normalized = asyncio.run( + flow._async_validate_input(_base_user_input(), check_unique_id=False) + ) + + assert session.calls + assert errors == {"base": "cannot_connect"} + assert normalized is None + + def test_validate_input_unexpected_exception_sets_unknown( monkeypatch: pytest.MonkeyPatch, ) -> None: @@ -542,7 +1104,8 @@ def test_validate_input_happy_path_sets_unique_id_and_normalizes( """Successful validation should normalize data and set unique ID.""" body = b'{"dailyInfo": [{"day": "D0"}]}' - session = _patch_client_session(monkeypatch, _StubResponse(200, body)) + session = _SequenceSession([_StubResponse(200, body), _StubResponse(200, body)]) + monkeypatch.setattr(cf, "async_get_clientsession", lambda hass: session) class _TrackingFlow(PollenLevelsConfigFlow): def __init__(self) -> None: @@ -582,6 +1145,55 @@ def _abort_if_unique_id_configured(self): assert flow.abort_calls == 1 +def test_validate_input_unique_id_collapses_nearby_locations_legacy_compat( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Unique-id format should match legacy 4-decimal duplicate detection.""" + + body = b'{"dailyInfo": [{"day": "D0"}]}' + session = _SequenceSession([_StubResponse(200, body), _StubResponse(200, body)]) + monkeypatch.setattr(cf, "async_get_clientsession", lambda hass: session) + + class _TrackingFlow(PollenLevelsConfigFlow): + def __init__(self) -> None: + super().__init__() + self.unique_ids: list[str] = [] + + async def async_set_unique_id(self, uid: str, raise_on_progress: bool = False): + self.unique_ids.append(uid) + return None + + def _abort_if_unique_id_configured(self): + return None + + flow = _TrackingFlow() + flow.hass = SimpleNamespace(config=SimpleNamespace()) + + first = { + **_base_user_input(), + CONF_LOCATION: {CONF_LATITUDE: "1.0000044", CONF_LONGITUDE: "2.0000044"}, + } + second = { + **_base_user_input(), + CONF_LOCATION: {CONF_LATITUDE: "1.0000046", CONF_LONGITUDE: "2.0000046"}, + } + + first_errors, first_normalized = asyncio.run( + flow._async_validate_input(first, check_unique_id=True) + ) + second_errors, second_normalized = asyncio.run( + flow._async_validate_input(second, check_unique_id=True) + ) + + assert session.calls + assert first_errors == {} + assert second_errors == {} + assert first_normalized is not None + assert second_normalized is not None + assert len(flow.unique_ids) == 2 + assert flow.unique_ids[0] == flow.unique_ids[1] == "1.0000_2.0000" + + def test_reauth_confirm_updates_and_reloads_entry() -> None: """Re-auth confirmation should update stored credentials and reload the entry.""" @@ -713,3 +1325,35 @@ async def fake_validate( assert result["title"] == DEFAULT_ENTRY_TITLE assert result["data"] == normalized + + +@pytest.mark.parametrize("raw", ["inf", "-inf", "nan"]) +def test_parse_int_option_non_finite_returns_error(raw: str) -> None: + """Non-finite numeric values should be rejected safely.""" + + parsed, err = cf._parse_int_option( + raw, + default=cf.DEFAULT_UPDATE_INTERVAL, + min_value=cf.MIN_UPDATE_INTERVAL_HOURS, + max_value=cf.MAX_UPDATE_INTERVAL_HOURS, + error_key="invalid_update_interval", + ) + + assert parsed == cf.DEFAULT_UPDATE_INTERVAL + assert err == "invalid_update_interval" + + +@pytest.mark.parametrize("raw", ["2.9", 2.1]) +def test_parse_int_option_decimal_returns_error(raw: object) -> None: + """Decimal values should be rejected for integer-only options.""" + + parsed, err = cf._parse_int_option( + raw, + default=cf.DEFAULT_UPDATE_INTERVAL, + min_value=cf.MIN_UPDATE_INTERVAL_HOURS, + max_value=cf.MAX_UPDATE_INTERVAL_HOURS, + error_key="invalid_update_interval", + ) + + assert parsed == cf.DEFAULT_UPDATE_INTERVAL + assert err == "invalid_update_interval" diff --git a/tests/test_diagnostics.py b/tests/test_diagnostics.py new file mode 100644 index 00000000..47a7070d --- /dev/null +++ b/tests/test_diagnostics.py @@ -0,0 +1,195 @@ +"""Diagnostics tests for privacy and payload sizing.""" + +from __future__ import annotations + +import datetime as dt +import sys +from types import ModuleType, SimpleNamespace +from typing import Any + +import pytest + + +def _force_module(name: str, module: ModuleType) -> None: + sys.modules[name] = module + + +components_mod = ModuleType("homeassistant.components") +diagnostics_mod = ModuleType("homeassistant.components.diagnostics") + + +def _async_redact_data(data: dict[str, Any], _redact: set[str]) -> dict[str, Any]: + def _walk(value): + if isinstance(value, dict): + return { + k: ("**REDACTED**" if k in _redact else _walk(v)) + for k, v in value.items() + } + if isinstance(value, list): + return [_walk(v) for v in value] + return value + + return _walk(data) + + +diagnostics_mod.async_redact_data = _async_redact_data +_force_module("homeassistant.components", components_mod) +_force_module("homeassistant.components.diagnostics", diagnostics_mod) + +config_entries_mod = ModuleType("homeassistant.config_entries") + + +class _ConfigEntry: + def __init__( + self, + *, + data: dict[str, Any], + options: dict[str, Any], + entry_id: str, + title: str, + ) -> None: + self.data = data + self.options = options + self.entry_id = entry_id + self.title = title + self.runtime_data = None + + +config_entries_mod.ConfigEntry = _ConfigEntry +_force_module("homeassistant.config_entries", config_entries_mod) + +core_mod = ModuleType("homeassistant.core") + + +class _HomeAssistant: + pass + + +core_mod.HomeAssistant = _HomeAssistant +_force_module("homeassistant.core", core_mod) + +from custom_components.pollenlevels import diagnostics as diag # noqa: E402 +from custom_components.pollenlevels.const import ( # noqa: E402 + CONF_API_KEY, + CONF_FORECAST_DAYS, + CONF_LANGUAGE_CODE, + CONF_LATITUDE, + CONF_LONGITUDE, + DEFAULT_FORECAST_DAYS, + MAX_FORECAST_DAYS, + MIN_FORECAST_DAYS, +) +from custom_components.pollenlevels.runtime import PollenLevelsRuntimeData # noqa: E402 + + +@pytest.mark.asyncio +async def test_diagnostics_rounds_coordinates_and_truncates_keys() -> None: + """Diagnostics should use rounded coordinates and limit data_keys length.""" + + data = { + CONF_API_KEY: "secret-token", + CONF_LATITUDE: 12.3456, + CONF_LONGITUDE: 78.9876, + CONF_LANGUAGE_CODE: "en", + } + options = {CONF_FORECAST_DAYS: 3} + + entry = _ConfigEntry(data=data, options=options, entry_id="entry", title="Home") + + coordinator = SimpleNamespace( + entry_id="entry", + forecast_days=3, + language="en", + create_d1=True, + create_d2=False, + last_updated=dt.datetime(2025, 1, 1, tzinfo=dt.UTC), + data={f"type_{idx}": {} for idx in range(60)}, + ) + entry.runtime_data = PollenLevelsRuntimeData( + coordinator=coordinator, client=object() + ) + + diagnostics = await diag.async_get_config_entry_diagnostics(None, entry) + + assert diagnostics["request_params_example"]["key"] == "***" + assert CONF_LATITUDE not in diagnostics["entry"]["data"] + assert CONF_LONGITUDE not in diagnostics["entry"]["data"] + assert diagnostics["request_params_example"]["location.latitude"] == 12.3 + assert diagnostics["request_params_example"]["location.longitude"] == 79.0 + assert diagnostics["coordinator"]["data_keys_total"] == 60 + assert len(diagnostics["coordinator"]["data_keys"]) == 50 + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + ("raw_days", "expected_days"), + [ + (999, MAX_FORECAST_DAYS), + (-3, MIN_FORECAST_DAYS), + ("nan", DEFAULT_FORECAST_DAYS), + ], +) +async def test_diagnostics_clamps_request_days( + raw_days: Any, expected_days: int +) -> None: + """Diagnostics request params should always show a supported day count.""" + + data = { + CONF_LATITUDE: 12.3, + CONF_LONGITUDE: 45.6, + CONF_LANGUAGE_CODE: "en", + } + options = {CONF_FORECAST_DAYS: raw_days} + + entry = _ConfigEntry(data=data, options=options, entry_id="entry", title="Home") + + coordinator = SimpleNamespace( + entry_id="entry", + forecast_days=3, + language="en", + create_d1=True, + create_d2=False, + last_updated=dt.datetime(2025, 1, 1, tzinfo=dt.UTC), + data={"type_grass": {"source": "type"}}, + ) + entry.runtime_data = PollenLevelsRuntimeData( + coordinator=coordinator, client=object() + ) + + diagnostics = await diag.async_get_config_entry_diagnostics(None, entry) + + assert diagnostics["request_params_example"]["days"] == expected_days + + +@pytest.mark.asyncio +async def test_diagnostics_nonfinite_coordinates_are_omitted_in_examples() -> None: + """Rounded coordinate helpers should drop non-finite values.""" + + data = { + CONF_LATITUDE: "nan", + CONF_LONGITUDE: float("inf"), + CONF_LANGUAGE_CODE: "en", + } + options = {CONF_FORECAST_DAYS: 2} + + entry = _ConfigEntry(data=data, options=options, entry_id="entry", title="Home") + + coordinator = SimpleNamespace( + entry_id="entry", + forecast_days=2, + language="en", + create_d1=True, + create_d2=False, + last_updated=dt.datetime(2025, 1, 1, tzinfo=dt.UTC), + data={"type_grass": {"source": "type"}}, + ) + entry.runtime_data = PollenLevelsRuntimeData( + coordinator=coordinator, client=object() + ) + + diagnostics = await diag.async_get_config_entry_diagnostics(None, entry) + + assert diagnostics["approximate_location"]["latitude_rounded"] is None + assert diagnostics["approximate_location"]["longitude_rounded"] is None + assert diagnostics["request_params_example"]["location.latitude"] is None + assert diagnostics["request_params_example"]["location.longitude"] is None diff --git a/tests/test_init.py b/tests/test_init.py index 4ac49a95..40788381 100644 --- a/tests/test_init.py +++ b/tests/test_init.py @@ -7,6 +7,7 @@ import sys import types from pathlib import Path +from typing import Any import pytest @@ -19,7 +20,9 @@ # Provide the additional stubs required by __init__. sys.modules.setdefault("homeassistant", types.ModuleType("homeassistant")) -core_mod = types.ModuleType("homeassistant.core") +core_mod = sys.modules.get("homeassistant.core") or types.ModuleType( + "homeassistant.core" +) class _StubHomeAssistant: # pragma: no cover - structure only @@ -32,7 +35,74 @@ class _StubServiceCall: # pragma: no cover - structure only core_mod.HomeAssistant = _StubHomeAssistant core_mod.ServiceCall = _StubServiceCall -sys.modules.setdefault("homeassistant.core", core_mod) +sys.modules["homeassistant.core"] = core_mod + +ha_components_mod = sys.modules.get("homeassistant.components") or types.ModuleType( + "homeassistant.components" +) +sys.modules["homeassistant.components"] = ha_components_mod + +sensor_mod = types.ModuleType("homeassistant.components.sensor") + + +class _StubSensorEntity: # pragma: no cover - structure only + def __init__(self, *args, **kwargs): + self._attr_unique_id = None + self._attr_device_info = None + + @property + def unique_id(self): + return getattr(self, "_attr_unique_id", None) + + @property + def device_info(self): + return getattr(self, "_attr_device_info", None) + + +class _StubSensorDeviceClass: # pragma: no cover - structure only + DATE = "date" + TIMESTAMP = "timestamp" + + +class _StubSensorStateClass: # pragma: no cover - structure only + MEASUREMENT = "measurement" + + +sensor_mod.SensorEntity = _StubSensorEntity +sensor_mod.SensorDeviceClass = _StubSensorDeviceClass +sensor_mod.SensorStateClass = _StubSensorStateClass +sys.modules.setdefault("homeassistant.components.sensor", sensor_mod) + +const_mod = sys.modules.get("homeassistant.const") or types.ModuleType( + "homeassistant.const" +) +const_mod.ATTR_ATTRIBUTION = "Attribution" +sys.modules["homeassistant.const"] = const_mod + +aiohttp_client_mod = types.ModuleType("homeassistant.helpers.aiohttp_client") +aiohttp_client_mod.async_get_clientsession = lambda _hass: None +sys.modules.setdefault("homeassistant.helpers.aiohttp_client", aiohttp_client_mod) + +aiohttp_mod = sys.modules.get("aiohttp") or types.ModuleType("aiohttp") + + +class _StubClientError(Exception): + pass + + +class _StubClientSession: # pragma: no cover - structure only + pass + + +class _StubClientTimeout: + def __init__(self, total: float | None = None): + self.total = total + + +aiohttp_mod.ClientError = _StubClientError +aiohttp_mod.ClientSession = _StubClientSession +aiohttp_mod.ClientTimeout = _StubClientTimeout +sys.modules["aiohttp"] = aiohttp_mod cv_mod = sys.modules["homeassistant.helpers.config_validation"] cv_mod.config_entry_only_config_schema = lambda _domain: lambda config: config @@ -41,6 +111,77 @@ class _StubServiceCall: # pragma: no cover - structure only if not hasattr(vol_mod, "Schema"): vol_mod.Schema = lambda *args, **kwargs: None +helpers_mod = sys.modules.get("homeassistant.helpers") or types.ModuleType( + "homeassistant.helpers" +) +sys.modules["homeassistant.helpers"] = helpers_mod + +entity_registry_mod = types.ModuleType("homeassistant.helpers.entity_registry") + + +def _stub_async_get(_hass): # pragma: no cover - structure only + class _Registry: + @staticmethod + def async_entries_for_config_entry(_registry, _entry_id): + return [] + + return _Registry() + + +entity_registry_mod.async_get = _stub_async_get +entity_registry_mod.async_entries_for_config_entry = lambda *args, **kwargs: [] +sys.modules.setdefault("homeassistant.helpers.entity_registry", entity_registry_mod) + +entity_mod = types.ModuleType("homeassistant.helpers.entity") + + +class _StubEntityCategory: + DIAGNOSTIC = "diagnostic" + + +entity_mod.EntityCategory = _StubEntityCategory +sys.modules.setdefault("homeassistant.helpers.entity", entity_mod) + +dt_mod = types.ModuleType("homeassistant.util.dt") + + +def _stub_utcnow(): + from datetime import UTC, datetime + + return datetime.now(UTC) + + +dt_mod.utcnow = _stub_utcnow + + +def _stub_parse_http_date(value: str | None): # pragma: no cover - stub only + from datetime import UTC, datetime + from email.utils import parsedate_to_datetime + + try: + parsed = parsedate_to_datetime(value) if value is not None else None + except (TypeError, ValueError, IndexError): + return None + + if parsed is None: + return None + + if parsed.tzinfo is None: + return parsed.replace(tzinfo=UTC) + + if isinstance(parsed, datetime): + return parsed + + return None + + +dt_mod.parse_http_date = _stub_parse_http_date +sys.modules.setdefault("homeassistant.util.dt", dt_mod) + +util_mod = types.ModuleType("homeassistant.util") +util_mod.dt = dt_mod +sys.modules.setdefault("homeassistant.util", util_mod) + exceptions_mod = sys.modules.setdefault( "homeassistant.exceptions", types.ModuleType("homeassistant.exceptions") ) @@ -57,9 +198,49 @@ class _StubConfigEntryAuthFailed(Exception): exceptions_mod.ConfigEntryAuthFailed = _StubConfigEntryAuthFailed +update_coordinator_mod = types.ModuleType("homeassistant.helpers.update_coordinator") + + +class _StubUpdateFailed(Exception): + pass + + +class _StubCoordinatorEntity: + def __init__(self, coordinator): + self.coordinator = coordinator + + +class _StubDataUpdateCoordinator: + def __init__(self, hass, logger, *, name: str, update_interval): + self.hass = hass + self.logger = logger + self.name = name + self.update_interval = update_interval + self.data = {"date": {}, "region": {}} + self.last_updated = None + + async def async_config_entry_first_refresh(self): + self.last_updated = "now" + return None + + async def async_refresh(self): + return None + + def async_request_refresh(self): # pragma: no cover - scheduling helper + return asyncio.create_task(self.async_refresh()) + + +update_coordinator_mod.DataUpdateCoordinator = _StubDataUpdateCoordinator +update_coordinator_mod.UpdateFailed = _StubUpdateFailed +update_coordinator_mod.CoordinatorEntity = _StubCoordinatorEntity +sys.modules.setdefault( + "homeassistant.helpers.update_coordinator", update_coordinator_mod +) + integration = importlib.import_module( "custom_components.pollenlevels.__init__" ) # noqa: E402 +const = importlib.import_module("custom_components.pollenlevels.const") # noqa: E402 class _FakeConfigEntries: @@ -67,12 +248,14 @@ def __init__( self, forward_exception: Exception | None = None, unload_result: bool = True, + entries: list[object] | None = None, ): self._forward_exception = forward_exception self._unload_result = unload_result self.forward_calls: list[tuple[object, list[str]]] = [] self.unload_calls: list[tuple[object, list[str]]] = [] self.reload_calls: list[str] = [] + self._entries = entries or [] async def async_forward_entry_setups(self, entry, platforms): self.forward_calls.append((entry, platforms)) @@ -83,15 +266,47 @@ async def async_unload_platforms(self, entry, platforms): self.unload_calls.append((entry, platforms)) return self._unload_result + def async_update_entry(self, entry, **kwargs): + if "data" in kwargs: + entry.data = kwargs["data"] + if "options" in kwargs: + entry.options = kwargs["options"] + if "version" in kwargs: + entry.version = kwargs["version"] + async def async_reload(self, entry_id: str): # pragma: no cover - used in tests self.reload_calls.append(entry_id) + def async_entries(self, domain: str | None = None): + if domain is None: + return list(self._entries) + return [ + entry for entry in self._entries if getattr(entry, "domain", None) == domain + ] + class _FakeEntry: - def __init__(self, *, entry_id: str = "entry-1", title: str = "Pollen Levels"): + def __init__( + self, + *, + entry_id: str = "entry-1", + title: str = "Pollen Levels", + data: dict | None = None, + options: dict | None = None, + version: int = 1, + ): self.entry_id = entry_id self.title = title + self.domain = integration.DOMAIN self._update_listener = None + self.data = data or { + integration.CONF_API_KEY: "key", + integration.CONF_LATITUDE: 1.0, + integration.CONF_LONGITUDE: 2.0, + } + self.options = options or {} + self.version = version + self.runtime_data = None def add_update_listener(self, listener): self._update_listener = listener @@ -104,9 +319,29 @@ def async_on_unload(self, callback): class _FakeHass: - def __init__(self, *, forward_exception: Exception | None = None): - self.config_entries = _FakeConfigEntries(forward_exception) + def __init__( + self, + *, + forward_exception: Exception | None = None, + entries: list[object] | None = None, + ): + self.config_entries = _FakeConfigEntries( + forward_exception=forward_exception, unload_result=True, entries=entries + ) self.data = {} + self.services = _ServiceRegistry() + + +class _ServiceRegistry: + def __init__(self): + self.registered: dict[tuple[str, str], Any] = {} + + def async_register(self, domain: str, service: str, handler, schema=None): + self.registered[(domain, service)] = handler + + async def async_call(self, domain: str, service: str): + handler = self.registered[(domain, service)] + await handler(_StubServiceCall()) def test_setup_entry_propagates_auth_failed() -> None: @@ -119,6 +354,168 @@ def test_setup_entry_propagates_auth_failed() -> None: asyncio.run(integration.async_setup_entry(hass, entry)) +def test_setup_entry_clears_runtime_data_on_forward_auth_failed() -> None: + """runtime_data is cleared when forwarding raises ConfigEntryAuthFailed.""" + + hass = _FakeHass(forward_exception=integration.ConfigEntryAuthFailed("bad key")) + entry = _FakeEntry() + + with pytest.raises(integration.ConfigEntryAuthFailed): + asyncio.run(integration.async_setup_entry(hass, entry)) + + assert entry.runtime_data is None + + +def test_setup_entry_clears_runtime_data_on_forward_not_ready() -> None: + """runtime_data is cleared when forwarding raises ConfigEntryNotReady.""" + + hass = _FakeHass(forward_exception=integration.ConfigEntryNotReady("retry")) + entry = _FakeEntry() + + with pytest.raises(integration.ConfigEntryNotReady): + asyncio.run(integration.async_setup_entry(hass, entry)) + + assert entry.runtime_data is None + + +def test_setup_entry_clears_runtime_data_on_forward_generic_error() -> None: + """runtime_data is cleared when forwarding raises an unexpected exception.""" + + class _Boom(Exception): + pass + + hass = _FakeHass(forward_exception=_Boom("boom")) + entry = _FakeEntry() + + with pytest.raises(integration.ConfigEntryNotReady): + asyncio.run(integration.async_setup_entry(hass, entry)) + + assert entry.runtime_data is None + + +def test_setup_entry_missing_api_key_raises_auth_failed() -> None: + """Missing API key should trigger ConfigEntryAuthFailed.""" + + hass = _FakeHass() + entry = _FakeEntry( + data={ + integration.CONF_LATITUDE: 1.0, + integration.CONF_LONGITUDE: 2.0, + } + ) + + with pytest.raises(integration.ConfigEntryAuthFailed): + asyncio.run(integration.async_setup_entry(hass, entry)) + + +def test_setup_entry_whitespace_api_key_raises_auth_failed() -> None: + """Whitespace-only API key should trigger ConfigEntryAuthFailed.""" + + hass = _FakeHass() + entry = _FakeEntry( + data={ + integration.CONF_API_KEY: " ", + integration.CONF_LATITUDE: 1.0, + integration.CONF_LONGITUDE: 2.0, + } + ) + + with pytest.raises(integration.ConfigEntryAuthFailed): + asyncio.run(integration.async_setup_entry(hass, entry)) + + +def test_setup_entry_invalid_coordinates_raise_not_ready() -> None: + """Invalid coordinates should trigger ConfigEntryNotReady.""" + + hass = _FakeHass() + entry = _FakeEntry( + data={ + integration.CONF_API_KEY: "key", + integration.CONF_LATITUDE: "not-a-number", + integration.CONF_LONGITUDE: 2.0, + } + ) + + with pytest.raises(integration.ConfigEntryNotReady): + asyncio.run(integration.async_setup_entry(hass, entry)) + + +def test_setup_entry_nonfinite_or_out_of_range_coordinates_raise_not_ready() -> None: + """Non-finite or out-of-range coordinates should trigger ConfigEntryNotReady.""" + + bad_pairs = [ + (float("inf"), 2.0), + (1.0, float("nan")), + (91.0, 2.0), + (1.0, 181.0), + ] + + for lat, lon in bad_pairs: + hass = _FakeHass() + entry = _FakeEntry( + data={ + integration.CONF_API_KEY: "key", + integration.CONF_LATITUDE: lat, + integration.CONF_LONGITUDE: lon, + } + ) + + with pytest.raises(integration.ConfigEntryNotReady): + asyncio.run(integration.async_setup_entry(hass, entry)) + + +def test_setup_entry_boundary_coordinates_are_allowed() -> None: + """Coordinate values on valid boundaries should still set up successfully.""" + + for lat, lon in [(-90.0, -180.0), (90.0, 180.0)]: + hass = _FakeHass() + entry = _FakeEntry( + data={ + integration.CONF_API_KEY: "key", + integration.CONF_LATITUDE: lat, + integration.CONF_LONGITUDE: lon, + } + ) + + assert asyncio.run(integration.async_setup_entry(hass, entry)) is True + + +def test_setup_entry_decimal_numeric_options_fallback_to_defaults( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Decimal options should not be truncated silently during setup.""" + + hass = _FakeHass() + entry = _FakeEntry( + data={ + integration.CONF_API_KEY: "key", + integration.CONF_LATITUDE: 1.0, + integration.CONF_LONGITUDE: 2.0, + }, + options={ + integration.CONF_UPDATE_INTERVAL: 2.5, + integration.CONF_FORECAST_DAYS: 3.1, + }, + ) + + seen: dict[str, int] = {} + + class _StubCoordinator(update_coordinator_mod.DataUpdateCoordinator): + def __init__(self, *args, **kwargs): + seen["hours"] = kwargs["hours"] + seen["forecast_days"] = kwargs["forecast_days"] + self.data = {"region": {"source": "meta"}, "date": {"source": "meta"}} + + async def async_config_entry_first_refresh(self): + return None + + monkeypatch.setattr(integration, "PollenDataUpdateCoordinator", _StubCoordinator) + + assert asyncio.run(integration.async_setup_entry(hass, entry)) is True + assert seen["hours"] == integration.DEFAULT_UPDATE_INTERVAL + assert seen["forecast_days"] == integration.DEFAULT_FORECAST_DAYS + + def test_setup_entry_wraps_generic_error() -> None: """Unexpected errors convert to ConfigEntryNotReady for retries.""" @@ -132,12 +529,44 @@ class _Boom(Exception): asyncio.run(integration.async_setup_entry(hass, entry)) -def test_setup_entry_success_and_unload() -> None: +def test_setup_entry_success_and_unload( + monkeypatch: pytest.MonkeyPatch, +) -> None: """Happy path should forward setup, register listener, and unload cleanly.""" hass = _FakeHass() entry = _FakeEntry() - hass.data[integration.DOMAIN] = {entry.entry_id: "coordinator"} + + class _StubClient: + def __init__(self, _session, _api_key): + self.session = _session + self.api_key = _api_key + + async def async_fetch_pollen_data(self, **_kwargs): + return {"region": {"source": "meta"}, "dailyInfo": []} + + class _StubCoordinator(update_coordinator_mod.DataUpdateCoordinator): + def __init__(self, *args, **kwargs): + self.api_key = kwargs["api_key"] + self.lat = kwargs["lat"] + self.lon = kwargs["lon"] + self.forecast_days = kwargs["forecast_days"] + self.language = kwargs["language"] + self.create_d1 = kwargs["create_d1"] + self.create_d2 = kwargs["create_d2"] + self.entry_id = kwargs["entry_id"] + self.entry_title = kwargs.get("entry_title") + self.last_updated = None + self.data = {"region": {"source": "meta"}, "date": {"source": "meta"}} + + async def async_config_entry_first_refresh(self): + return None + + async def async_refresh(self): + return None + + monkeypatch.setattr(integration, "GooglePollenApiClient", _StubClient) + monkeypatch.setattr(integration, "PollenDataUpdateCoordinator", _StubCoordinator) assert asyncio.run(integration.async_setup_entry(hass, entry)) is True @@ -145,9 +574,295 @@ def test_setup_entry_success_and_unload() -> None: assert entry._update_listener is integration._update_listener # noqa: SLF001 assert entry._on_unload is entry._update_listener # noqa: SLF001 + assert entry.runtime_data is not None + assert entry.runtime_data.coordinator.entry_id == entry.entry_id + asyncio.run(entry._update_listener(hass, entry)) # noqa: SLF001 assert hass.config_entries.reload_calls == [entry.entry_id] assert asyncio.run(integration.async_unload_entry(hass, entry)) is True assert hass.config_entries.unload_calls == [(entry, ["sensor"])] - assert hass.data[integration.DOMAIN] == {} + assert entry.runtime_data is None + + +def test_setup_entry_normalizes_forecast_sensor_mode( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Setup should normalize stored forecast mode values before coordinator flags.""" + + hass = _FakeHass() + entry = _FakeEntry(options={integration.CONF_CREATE_FORECAST_SENSORS: " D+1 "}) + + class _StubClient: + def __init__(self, _session, _api_key): + self.session = _session + self.api_key = _api_key + + async def async_fetch_pollen_data(self, **_kwargs): + return {"region": {"source": "meta"}, "dailyInfo": []} + + class _StubCoordinator(update_coordinator_mod.DataUpdateCoordinator): + def __init__(self, *args, **kwargs): + self.create_d1 = kwargs["create_d1"] + self.create_d2 = kwargs["create_d2"] + self.entry_id = kwargs["entry_id"] + self.entry_title = kwargs.get("entry_title") + self.lat = kwargs["lat"] + self.lon = kwargs["lon"] + self.last_updated = None + self.data = {"region": {"source": "meta"}, "date": {"source": "meta"}} + + async def async_config_entry_first_refresh(self): + return None + + monkeypatch.setattr(integration, "GooglePollenApiClient", _StubClient) + monkeypatch.setattr(integration, "PollenDataUpdateCoordinator", _StubCoordinator) + + assert asyncio.run(integration.async_setup_entry(hass, entry)) is True + assert entry.runtime_data is not None + assert entry.runtime_data.coordinator.create_d1 is True + assert entry.runtime_data.coordinator.create_d2 is False + + +def test_setup_entry_disables_d1_when_forecast_days_is_one( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Setup should disable D+1/D+2 creation when forecast days disallow them.""" + + hass = _FakeHass() + entry = _FakeEntry( + options={ + integration.CONF_CREATE_FORECAST_SENSORS: "D+1+2", + integration.CONF_FORECAST_DAYS: 1, + } + ) + + class _StubClient: + def __init__(self, _session, _api_key): + self.session = _session + self.api_key = _api_key + + async def async_fetch_pollen_data(self, **_kwargs): + return {"region": {"source": "meta"}, "dailyInfo": []} + + class _StubCoordinator(update_coordinator_mod.DataUpdateCoordinator): + def __init__(self, *args, **kwargs): + self.create_d1 = kwargs["create_d1"] + self.create_d2 = kwargs["create_d2"] + self.entry_id = kwargs["entry_id"] + self.entry_title = kwargs.get("entry_title") + self.lat = kwargs["lat"] + self.lon = kwargs["lon"] + self.last_updated = None + self.data = {"region": {"source": "meta"}, "date": {"source": "meta"}} + + async def async_config_entry_first_refresh(self): + return None + + monkeypatch.setattr(integration, "GooglePollenApiClient", _StubClient) + monkeypatch.setattr(integration, "PollenDataUpdateCoordinator", _StubCoordinator) + + assert asyncio.run(integration.async_setup_entry(hass, entry)) is True + assert entry.runtime_data is not None + assert entry.runtime_data.coordinator.create_d1 is False + assert entry.runtime_data.coordinator.create_d2 is False + + +def test_force_update_requests_refresh_per_entry() -> None: + """force_update should queue refresh via runtime_data coordinators and skip missing runtime data.""" + + class _StubCoordinator: + def __init__(self): + self.calls: list[str] = [] + self.done = asyncio.Event() + + async def _mark(self): + self.calls.append("refresh") + self.done.set() + + async def async_request_refresh(self): + await self._mark() + + entry1 = _FakeEntry(entry_id="entry-1") + entry1.runtime_data = types.SimpleNamespace(coordinator=_StubCoordinator()) + entry2 = _FakeEntry(entry_id="entry-2") + entry2.runtime_data = types.SimpleNamespace(coordinator=_StubCoordinator()) + entry3 = _FakeEntry(entry_id="entry-3") + entry3.runtime_data = None + + hass = _FakeHass(entries=[entry1, entry2, entry3]) + + assert asyncio.run(integration.async_setup(hass, {})) is True + assert (integration.DOMAIN, "force_update") in hass.services.registered + + asyncio.run(hass.services.async_call(integration.DOMAIN, "force_update")) + + assert entry1.runtime_data.coordinator.calls == ["refresh"] + assert entry2.runtime_data.coordinator.calls == ["refresh"] + assert entry1.runtime_data.coordinator.done.is_set() + assert entry2.runtime_data.coordinator.done.is_set() + + +def test_migrate_entry_moves_mode_to_options() -> None: + """Migration should copy per-day sensor mode from data to options.""" + entry = _FakeEntry( + data={ + integration.CONF_API_KEY: "key", + integration.CONF_LATITUDE: 1.0, + integration.CONF_LONGITUDE: 2.0, + integration.CONF_CREATE_FORECAST_SENSORS: "D+1", + "http_referer": "https://legacy.example.com", + }, + options={"http_referer": "https://legacy.example.com"}, + version=1, + ) + hass = _FakeHass(entries=[entry]) + + assert asyncio.run(integration.async_migrate_entry(hass, entry)) is True + assert entry.options[integration.CONF_CREATE_FORECAST_SENSORS] == "D+1" + assert integration.CONF_CREATE_FORECAST_SENSORS not in entry.data + assert "http_referer" not in entry.data + assert "http_referer" not in entry.options + assert entry.version == 3 + + +def test_migrate_entry_normalizes_invalid_mode() -> None: + """Migration should normalize invalid per-day sensor mode values.""" + entry = _FakeEntry( + data={ + integration.CONF_API_KEY: "key", + integration.CONF_LATITUDE: 1.0, + integration.CONF_LONGITUDE: 2.0, + integration.CONF_CREATE_FORECAST_SENSORS: "bad-value", + }, + options={}, + version=1, + ) + hass = _FakeHass(entries=[entry]) + + assert asyncio.run(integration.async_migrate_entry(hass, entry)) is True + assert ( + entry.options[integration.CONF_CREATE_FORECAST_SENSORS] + == const.FORECAST_SENSORS_CHOICES[0] + ) + assert entry.version == 3 + + +def test_migrate_entry_normalizes_invalid_mode_in_options() -> None: + """Migration should normalize invalid per-day sensor mode values in options.""" + entry = _FakeEntry( + data={}, + options={integration.CONF_CREATE_FORECAST_SENSORS: "bad-value"}, + version=1, + ) + hass = _FakeHass(entries=[entry]) + + assert asyncio.run(integration.async_migrate_entry(hass, entry)) is True + assert ( + entry.options[integration.CONF_CREATE_FORECAST_SENSORS] + == const.FORECAST_SENSORS_CHOICES[0] + ) + assert entry.version == 3 + + +def test_migrate_entry_normalizes_invalid_mode_in_options_when_version_current() -> ( + None +): + """Migration should normalize invalid mode values even at the target version.""" + entry = _FakeEntry( + data={ + integration.CONF_API_KEY: "key", + integration.CONF_LATITUDE: 1.0, + integration.CONF_LONGITUDE: 2.0, + }, + options={integration.CONF_CREATE_FORECAST_SENSORS: "invalid-value"}, + version=integration.TARGET_ENTRY_VERSION, + ) + hass = _FakeHass(entries=[entry]) + + assert asyncio.run(integration.async_migrate_entry(hass, entry)) is True + assert entry.options[integration.CONF_CREATE_FORECAST_SENSORS] == "none" + assert entry.version == integration.TARGET_ENTRY_VERSION + + +def test_migrate_entry_marks_version_when_no_changes() -> None: + """Migration should still bump the version when no changes are needed.""" + entry = _FakeEntry( + options={integration.CONF_CREATE_FORECAST_SENSORS: "D+1"}, + version=1, + ) + hass = _FakeHass(entries=[entry]) + + assert asyncio.run(integration.async_migrate_entry(hass, entry)) is True + assert entry.version == 3 + + +def test_migrate_entry_cleans_legacy_keys_when_version_current() -> None: + """Migration should remove legacy keys even if already at target version.""" + entry = _FakeEntry( + data={ + integration.CONF_API_KEY: "key", + integration.CONF_LATITUDE: 1.0, + integration.CONF_LONGITUDE: 2.0, + integration.CONF_CREATE_FORECAST_SENSORS: "D+1", + "http_referer": "https://legacy.example.com", + }, + options={"http_referer": "https://legacy.example.com"}, + version=integration.TARGET_ENTRY_VERSION, + ) + hass = _FakeHass(entries=[entry]) + + assert asyncio.run(integration.async_migrate_entry(hass, entry)) is True + assert "http_referer" not in entry.data + assert "http_referer" not in entry.options + assert integration.CONF_CREATE_FORECAST_SENSORS not in entry.data + assert entry.version == integration.TARGET_ENTRY_VERSION + + +def test_migrate_entry_does_not_downgrade_version() -> None: + """Migration should preserve versions newer than the target.""" + entry = _FakeEntry( + data={ + integration.CONF_API_KEY: "key", + integration.CONF_LATITUDE: 1.0, + integration.CONF_LONGITUDE: 2.0, + "http_referer": "https://legacy.example.com", + }, + options={"http_referer": "https://legacy.example.com"}, + version=integration.TARGET_ENTRY_VERSION + 1, + ) + hass = _FakeHass(entries=[entry]) + + assert asyncio.run(integration.async_migrate_entry(hass, entry)) is True + assert "http_referer" not in entry.data + assert "http_referer" not in entry.options + assert entry.version == integration.TARGET_ENTRY_VERSION + 1 + + +def test_migrate_entry_removes_mode_from_data_when_in_options() -> None: + """Migration should remove per-day sensor mode from data when already in options.""" + entry = _FakeEntry( + data={ + integration.CONF_API_KEY: "key", + integration.CONF_LATITUDE: 1.0, + integration.CONF_LONGITUDE: 2.0, + integration.CONF_CREATE_FORECAST_SENSORS: "D+1", + }, + options={integration.CONF_CREATE_FORECAST_SENSORS: "D+1"}, + version=1, + ) + hass = _FakeHass(entries=[entry]) + + assert asyncio.run(integration.async_migrate_entry(hass, entry)) is True + assert integration.CONF_CREATE_FORECAST_SENSORS not in entry.data + assert entry.options[integration.CONF_CREATE_FORECAST_SENSORS] == "D+1" + + +@pytest.mark.parametrize("version", [None, "x"]) +def test_migrate_entry_handles_non_int_version(version: object) -> None: + """Migration should normalize non-integer versions before bumping.""" + entry = _FakeEntry(options={}, version=version) + hass = _FakeHass(entries=[entry]) + + assert asyncio.run(integration.async_migrate_entry(hass, entry)) is True + assert entry.version == 3 diff --git a/tests/test_options_flow.py b/tests/test_options_flow.py index 81afa947..f1be3852 100644 --- a/tests/test_options_flow.py +++ b/tests/test_options_flow.py @@ -15,6 +15,12 @@ CONF_LATITUDE, CONF_LONGITUDE, CONF_UPDATE_INTERVAL, + DEFAULT_FORECAST_DAYS, + DEFAULT_UPDATE_INTERVAL, + FORECAST_SENSORS_CHOICES, + MAX_FORECAST_DAYS, + MAX_UPDATE_INTERVAL_HOURS, + MIN_FORECAST_DAYS, ) from tests import test_config_flow as base @@ -79,7 +85,7 @@ def test_options_flow_forecast_days_below_min_sets_error() -> None: ) assert result["errors"] == { - CONF_FORECAST_DAYS: "invalid_option_combo", + CONF_FORECAST_DAYS: "invalid_forecast_days", CONF_CREATE_FORECAST_SENSORS: "invalid_option_combo", } @@ -128,3 +134,140 @@ def test_options_flow_valid_submission_returns_entry_data() -> None: CONF_LANGUAGE_CODE: "es", }, } + + +def test_options_flow_update_interval_below_min_sets_error() -> None: + """Sub-1 update intervals should raise a field error.""" + + flow = _flow() + + result = asyncio.run( + flow.async_step_init( + { + CONF_LANGUAGE_CODE: "en", + CONF_FORECAST_DAYS: 2, + CONF_CREATE_FORECAST_SENSORS: "none", + CONF_UPDATE_INTERVAL: 0, + } + ) + ) + + assert result["errors"] == {CONF_UPDATE_INTERVAL: "invalid_update_interval"} + + +def test_options_flow_invalid_update_interval_short_circuits() -> None: + """Invalid update interval should short-circuit without extra errors.""" + + flow = _flow() + + result = asyncio.run( + flow.async_step_init( + { + CONF_LANGUAGE_CODE: "en", + CONF_FORECAST_DAYS: 0, + CONF_CREATE_FORECAST_SENSORS: "D+1+2", + CONF_UPDATE_INTERVAL: "not-a-number", + } + ) + ) + + assert result["errors"] == {CONF_UPDATE_INTERVAL: "invalid_update_interval"} + + +def test_options_flow_update_interval_above_max_sets_error() -> None: + """Over-max update intervals should raise a field error.""" + + flow = _flow() + + result = asyncio.run( + flow.async_step_init( + { + CONF_LANGUAGE_CODE: "en", + CONF_FORECAST_DAYS: 2, + CONF_CREATE_FORECAST_SENSORS: "none", + CONF_UPDATE_INTERVAL: 999, + } + ) + ) + + assert result["errors"] == {CONF_UPDATE_INTERVAL: "invalid_update_interval"} + + +@pytest.mark.parametrize( + ("raw_value", "expected"), + [ + ("not-a-number", DEFAULT_UPDATE_INTERVAL), + (0, 1), + (999, MAX_UPDATE_INTERVAL_HOURS), + ], +) +def test_options_schema_update_interval_default_is_sanitized( + monkeypatch: pytest.MonkeyPatch, + raw_value: object, + expected: int, +) -> None: + """Options form should clamp invalid update interval defaults.""" + + captured_defaults: list[int | None] = [] + + def _capture_optional(key, **kwargs): + if key == CONF_UPDATE_INTERVAL: + captured_defaults.append(kwargs.get("default")) + return key + + monkeypatch.setattr(base.cf.vol, "Optional", _capture_optional) + + flow = _flow(options={CONF_UPDATE_INTERVAL: raw_value}) + asyncio.run(flow.async_step_init(user_input=None)) + + assert captured_defaults == [expected] + + +@pytest.mark.parametrize( + ("raw_value", "expected"), + [ + (0, str(MIN_FORECAST_DAYS)), + (999, str(MAX_FORECAST_DAYS)), + ("abc", str(DEFAULT_FORECAST_DAYS)), + ], +) +def test_options_schema_forecast_days_default_is_sanitized( + monkeypatch: pytest.MonkeyPatch, + raw_value: object, + expected: str, +) -> None: + """Options form should clamp invalid forecast day defaults.""" + + captured_defaults: list[str | None] = [] + + def _capture_optional(key, **kwargs): + if key == CONF_FORECAST_DAYS: + captured_defaults.append(kwargs.get("default")) + return key + + monkeypatch.setattr(base.cf.vol, "Optional", _capture_optional) + + flow = _flow(options={CONF_FORECAST_DAYS: raw_value}) + asyncio.run(flow.async_step_init(user_input=None)) + + assert captured_defaults == [expected] + + +def test_options_schema_sensor_mode_default_is_sanitized( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Options form should fall back to a valid sensor mode default.""" + + captured_defaults: list[str | None] = [] + + def _capture_optional(key, **kwargs): + if key == CONF_CREATE_FORECAST_SENSORS: + captured_defaults.append(kwargs.get("default")) + return key + + monkeypatch.setattr(base.cf.vol, "Optional", _capture_optional) + + flow = _flow(options={CONF_CREATE_FORECAST_SENSORS: "bad"}) + asyncio.run(flow.async_step_init(user_input=None)) + + assert captured_defaults == [FORECAST_SENSORS_CHOICES[0]] diff --git a/tests/test_sensor.py b/tests/test_sensor.py index e00a2740..82b10c1e 100644 --- a/tests/test_sensor.py +++ b/tests/test_sensor.py @@ -3,6 +3,7 @@ from __future__ import annotations import asyncio +import datetime import importlib.util import sys import types @@ -36,7 +37,17 @@ class _StubSensorEntity: # pragma: no cover - no runtime behavior needed - pass + def __init__(self, *args, **kwargs): + self._attr_unique_id = None + self._attr_device_info: dict[str, Any] | None = None + + @property + def unique_id(self): + return getattr(self, "_attr_unique_id", None) + + @property + def device_info(self): + return getattr(self, "_attr_device_info", None) class _StubSensorDeviceClass: @@ -187,27 +198,61 @@ def _stub_utcnow(): dt_mod.utcnow = _stub_utcnow + + +def _stub_parse_http_date(value: str | None): # pragma: no cover - stub only + from datetime import UTC, datetime + from email.utils import parsedate_to_datetime + + try: + parsed = parsedate_to_datetime(value) if value is not None else None + except (TypeError, ValueError, IndexError): + return None + + if parsed is None: + return None + + if parsed.tzinfo is None: + return parsed.replace(tzinfo=UTC) + + if isinstance(parsed, datetime): + return parsed + + return None + + +dt_mod.parse_http_date = _stub_parse_http_date sys.modules.setdefault("homeassistant.util.dt", dt_mod) util_mod = types.ModuleType("homeassistant.util") util_mod.dt = dt_mod sys.modules.setdefault("homeassistant.util", util_mod) -aiohttp_mod = types.ModuleType("aiohttp") +aiohttp_existing = sys.modules.get("aiohttp") +aiohttp_mod = aiohttp_existing or types.ModuleType("aiohttp") class _StubClientError(Exception): pass +class _StubClientSession: # pragma: no cover - structure only + pass + + class _StubClientTimeout: def __init__(self, total: float | None = None): self.total = total -aiohttp_mod.ClientError = _StubClientError -aiohttp_mod.ClientTimeout = _StubClientTimeout -sys.modules.setdefault("aiohttp", aiohttp_mod) +if not hasattr(aiohttp_mod, "ClientError"): + aiohttp_mod.ClientError = _StubClientError +if not hasattr(aiohttp_mod, "ClientSession"): + aiohttp_mod.ClientSession = _StubClientSession +if not hasattr(aiohttp_mod, "ClientTimeout"): + aiohttp_mod.ClientTimeout = _StubClientTimeout +if aiohttp_existing is None: + sys.modules["aiohttp"] = aiohttp_mod def _load_module(module_name: str, relative_path: str): @@ -221,8 +266,12 @@ def _load_module(module_name: str, relative_path: str): return module -_load_module("custom_components.pollenlevels.const", "const.py") +const = _load_module("custom_components.pollenlevels.const", "const.py") +coordinator_mod = _load_module( + "custom_components.pollenlevels.coordinator", "coordinator.py" +) sensor = _load_module("custom_components.pollenlevels.sensor", "sensor.py") +client_mod = importlib.import_module("custom_components.pollenlevels.client") class DummyHass: @@ -246,6 +295,7 @@ def __init__( self.data = data self.options = options or {} self.entry_id = entry_id + self.runtime_data = None class FakeResponse: @@ -304,40 +354,52 @@ def __init__(self, sequence: list[ResponseSpec | Exception]): self.calls = 0 def get(self, *_args, **_kwargs): + """Return the next fake response in the sequence.""" + if self.calls >= len(self.sequence): raise AssertionError( - "SequenceSession exhausted; no more responses " - f"(calls={self.calls}, sequence_len={len(self.sequence)})." + "SequenceSession exhausted; no more responses configured" ) + item = self.sequence[self.calls] self.calls += 1 if isinstance(item, Exception): raise item - return FakeResponse( - item.payload, status=item.status, headers=item.headers or {} - ) + return FakeResponse(item.payload, status=item.status, headers=item.headers) -class RegistryEntry: - """Simple stub representing an Entity Registry entry.""" +class RegistryEntry(NamedTuple): + """Entity registry entry stub.""" - def __init__(self, unique_id: str, entity_id: str) -> None: - self.unique_id = unique_id - self.entity_id = entity_id - self.domain = "sensor" - self.platform = sensor.DOMAIN + entity_id: str + unique_id: str + domain: str + platform: str class RegistryStub: - """Minimal async Entity Registry stub capturing removals.""" + """Stubbed entity registry that records removals.""" - def __init__(self, entries: list[RegistryEntry]) -> None: - self.entries = entries + def __init__(self, entries: list[RegistryEntry], entry_id: str) -> None: + self._entries = entries + self._entry_id = entry_id self.removals: list[str] = [] - async def async_remove(self, entity_id: str) -> None: + def async_entries_for_config_entry(self, _registry, entry_id: str): + assert entry_id == self._entry_id + return [ + types.SimpleNamespace( + entity_id=e.entity_id, + unique_id=e.unique_id, + domain=e.domain, + platform=e.platform, + ) + for e in self._entries + ] + + def async_remove(self, entity_id: str) -> None: self.removals.append(entity_id) @@ -347,13 +409,17 @@ def _setup_registry_stub( *, entry_id: str, ) -> RegistryStub: - registry = RegistryStub(entries) + """Patch the sensor module's entity registry helpers for cleanup tests.""" - monkeypatch.setattr(sensor.er, "async_get", lambda _hass: registry) + registry = RegistryStub(entries, entry_id=entry_id) + + # In Home Assistant, `async_remove()` is a method of the registry object returned by + # `entity_registry.async_get(hass)`, not a module-level function. + monkeypatch.setattr(sensor.er, "async_get", lambda hass: registry) monkeypatch.setattr( sensor.er, "async_entries_for_config_entry", - lambda reg, eid: entries if reg is registry and eid == entry_id else [], + registry.async_entries_for_config_entry, ) return registry @@ -388,11 +454,11 @@ def test_type_sensor_preserves_source_with_single_day( } fake_session = FakeSession(payload) - monkeypatch.setattr(sensor, "async_get_clientsession", lambda _hass: fake_session) + client = client_mod.GooglePollenApiClient(fake_session, "test") loop = asyncio.new_event_loop() hass = DummyHass(loop) - coordinator = sensor.PollenDataUpdateCoordinator( + coordinator = coordinator_mod.PollenDataUpdateCoordinator( hass=hass, api_key="test", lat=1.0, @@ -403,6 +469,7 @@ def test_type_sensor_preserves_source_with_single_day( forecast_days=1, create_d1=False, create_d2=False, + client=client, ) try: @@ -417,6 +484,352 @@ def test_type_sensor_preserves_source_with_single_day( assert entry["forecast"] == [] assert entry["tomorrow_has_index"] is False assert entry["tomorrow_value"] is None + assert "color_raw" not in entry + + +def test_coordinator_preserves_last_data_when_dailyinfo_missing() -> None: + """Missing dailyInfo keeps the last successful data instead of clearing.""" + + payload = { + "regionCode": "us_ca_san_francisco", + "dailyInfo": [ + { + "date": {"year": 2025, "month": 5, "day": 9}, + "pollenTypeInfo": [ + { + "code": "GRASS", + "displayName": "Grass", + "indexInfo": { + "value": 2, + "category": "LOW", + "indexDescription": "Low", + }, + } + ], + } + ], + } + + session = SequenceSession( + [ + ResponseSpec(status=200, payload=payload), + ResponseSpec(status=200, payload={}), + ] + ) + client = client_mod.GooglePollenApiClient(session, "test") + + loop = asyncio.new_event_loop() + hass = DummyHass(loop) + coordinator = coordinator_mod.PollenDataUpdateCoordinator( + hass=hass, + api_key="test", + lat=1.0, + lon=2.0, + hours=12, + language=None, + entry_id="entry", + forecast_days=1, + create_d1=False, + create_d2=False, + client=client, + ) + + try: + first_data = loop.run_until_complete(coordinator._async_update_data()) + coordinator.data = first_data + second_data = loop.run_until_complete(coordinator._async_update_data()) + finally: + loop.close() + + assert first_data["type_grass"]["value"] == 2 + assert second_data == first_data + assert second_data == coordinator.data + + +def test_coordinator_clamps_forecast_days_low() -> None: + """Forecast days are clamped to minimum for legacy or invalid values.""" + + loop = asyncio.new_event_loop() + hass = DummyHass(loop) + client = client_mod.GooglePollenApiClient(FakeSession({}), "test") + + try: + coordinator = coordinator_mod.PollenDataUpdateCoordinator( + hass=hass, + api_key="test", + lat=1.0, + lon=2.0, + hours=12, + language=None, + entry_id="entry", + forecast_days=0, + create_d1=False, + create_d2=False, + client=client, + ) + finally: + loop.close() + + assert coordinator.forecast_days == const.MIN_FORECAST_DAYS + + +def test_coordinator_first_refresh_missing_dailyinfo_raises() -> None: + """Missing dailyInfo on the first refresh should raise UpdateFailed.""" + + session = SequenceSession([ResponseSpec(status=200, payload={})]) + client = client_mod.GooglePollenApiClient(session, "test") + + loop = asyncio.new_event_loop() + hass = DummyHass(loop) + coordinator = coordinator_mod.PollenDataUpdateCoordinator( + hass=hass, + api_key="test", + lat=1.0, + lon=2.0, + hours=12, + language=None, + entry_id="entry", + forecast_days=1, + create_d1=False, + create_d2=False, + client=client, + ) + + try: + with pytest.raises(client_mod.UpdateFailed, match="dailyInfo"): + loop.run_until_complete(coordinator._async_update_data()) + finally: + loop.close() + + assert coordinator.data == {} + + +def test_coordinator_first_refresh_invalid_dailyinfo_type_raises() -> None: + """Non-list dailyInfo payload should raise UpdateFailed on first refresh.""" + + session = SequenceSession([ResponseSpec(status=200, payload={"dailyInfo": {}})]) + client = client_mod.GooglePollenApiClient(session, "test") + + loop = asyncio.new_event_loop() + hass = DummyHass(loop) + coordinator = coordinator_mod.PollenDataUpdateCoordinator( + hass=hass, + api_key="test", + lat=1.0, + lon=2.0, + hours=12, + language=None, + entry_id="entry", + forecast_days=1, + create_d1=False, + create_d2=False, + client=client, + ) + + try: + with pytest.raises(client_mod.UpdateFailed, match="dailyInfo"): + loop.run_until_complete(coordinator._async_update_data()) + finally: + loop.close() + + +def test_coordinator_invalid_dailyinfo_items_keep_last_data() -> None: + """Invalid dailyInfo items should preserve previous successful coordinator data.""" + + session = SequenceSession( + [ + ResponseSpec( + status=200, + payload={ + "dailyInfo": [ + { + "date": {"year": 2025, "month": 5, "day": 9}, + "pollenTypeInfo": [ + { + "code": "GRASS", + "displayName": "Grass", + "indexInfo": {"value": 2, "category": "LOW"}, + } + ], + } + ] + }, + ), + ResponseSpec(status=200, payload={"dailyInfo": ["bad-item"]}), + ] + ) + client = client_mod.GooglePollenApiClient(session, "test") + + loop = asyncio.new_event_loop() + hass = DummyHass(loop) + coordinator = coordinator_mod.PollenDataUpdateCoordinator( + hass=hass, + api_key="test", + lat=1.0, + lon=2.0, + hours=12, + language=None, + entry_id="entry", + forecast_days=1, + create_d1=False, + create_d2=False, + client=client, + ) + + try: + first_data = loop.run_until_complete(coordinator._async_update_data()) + coordinator.data = first_data + second_data = loop.run_until_complete(coordinator._async_update_data()) + finally: + loop.close() + + assert first_data["type_grass"]["value"] == 2 + assert second_data == first_data + + +def test_coordinator_mixed_dailyinfo_items_keep_last_data() -> None: + """Mixed valid/invalid dailyInfo items are treated as invalid payload.""" + + session = SequenceSession( + [ + ResponseSpec( + status=200, + payload={ + "dailyInfo": [ + { + "date": {"year": 2025, "month": 5, "day": 9}, + "pollenTypeInfo": [ + { + "code": "GRASS", + "displayName": "Grass", + "indexInfo": {"value": 2, "category": "LOW"}, + } + ], + } + ] + }, + ), + ResponseSpec( + status=200, + payload={ + "dailyInfo": [ + { + "date": {"year": 2025, "month": 5, "day": 10}, + "pollenTypeInfo": [], + }, + "bad-item", + ] + }, + ), + ] + ) + client = client_mod.GooglePollenApiClient(session, "test") + + loop = asyncio.new_event_loop() + hass = DummyHass(loop) + coordinator = coordinator_mod.PollenDataUpdateCoordinator( + hass=hass, + api_key="test", + lat=1.0, + lon=2.0, + hours=12, + language=None, + entry_id="entry", + forecast_days=2, + create_d1=False, + create_d2=False, + client=client, + ) + + try: + first_data = loop.run_until_complete(coordinator._async_update_data()) + coordinator.data = first_data + second_data = loop.run_until_complete(coordinator._async_update_data()) + finally: + loop.close() + + assert second_data == first_data + + +def test_coordinator_clamps_forecast_days_negative() -> None: + """Negative forecast days are clamped to minimum.""" + + loop = asyncio.new_event_loop() + hass = DummyHass(loop) + client = client_mod.GooglePollenApiClient(FakeSession({}), "test") + + try: + coordinator = coordinator_mod.PollenDataUpdateCoordinator( + hass=hass, + api_key="test", + lat=1.0, + lon=2.0, + hours=12, + language=None, + entry_id="entry", + forecast_days=-5, + create_d1=False, + create_d2=False, + client=client, + ) + finally: + loop.close() + + assert coordinator.forecast_days == const.MIN_FORECAST_DAYS + + +def test_coordinator_clamps_forecast_days_high() -> None: + """Forecast days are clamped to maximum for legacy or invalid values.""" + + loop = asyncio.new_event_loop() + hass = DummyHass(loop) + client = client_mod.GooglePollenApiClient(FakeSession({}), "test") + + try: + coordinator = coordinator_mod.PollenDataUpdateCoordinator( + hass=hass, + api_key="test", + lat=1.0, + lon=2.0, + hours=12, + language=None, + entry_id="entry", + forecast_days=10, + create_d1=False, + create_d2=False, + client=client, + ) + finally: + loop.close() + + assert coordinator.forecast_days == const.MAX_FORECAST_DAYS + + +def test_coordinator_keeps_forecast_days_within_range() -> None: + """Valid forecast days remain unchanged after initialization.""" + + loop = asyncio.new_event_loop() + hass = DummyHass(loop) + client = client_mod.GooglePollenApiClient(FakeSession({}), "test") + + try: + coordinator = coordinator_mod.PollenDataUpdateCoordinator( + hass=hass, + api_key="test", + lat=1.0, + lon=2.0, + hours=12, + language=None, + entry_id="entry", + forecast_days=3, + create_d1=False, + create_d2=False, + client=client, + ) + finally: + loop.close() + + assert coordinator.forecast_days == 3 def test_type_sensor_uses_forecast_metadata_when_today_missing( @@ -469,11 +882,11 @@ def test_type_sensor_uses_forecast_metadata_when_today_missing( } fake_session = FakeSession(payload) - monkeypatch.setattr(sensor, "async_get_clientsession", lambda _hass: fake_session) + client = client_mod.GooglePollenApiClient(fake_session, "test") loop = asyncio.new_event_loop() hass = DummyHass(loop) - coordinator = sensor.PollenDataUpdateCoordinator( + coordinator = coordinator_mod.PollenDataUpdateCoordinator( hass=hass, api_key="test", lat=1.0, @@ -484,6 +897,7 @@ def test_type_sensor_uses_forecast_metadata_when_today_missing( forecast_days=5, create_d1=False, create_d2=False, + client=client, ) try: @@ -578,11 +992,11 @@ def test_plant_sensor_includes_forecast_attributes( } fake_session = FakeSession(payload) - monkeypatch.setattr(sensor, "async_get_clientsession", lambda _hass: fake_session) + client = client_mod.GooglePollenApiClient(fake_session, "test") loop = asyncio.new_event_loop() hass = DummyHass(loop) - coordinator = sensor.PollenDataUpdateCoordinator( + coordinator = coordinator_mod.PollenDataUpdateCoordinator( hass=hass, api_key="test", lat=1.0, @@ -593,6 +1007,7 @@ def test_plant_sensor_includes_forecast_attributes( forecast_days=5, create_d1=False, create_d2=False, + client=client, ) try: @@ -620,58 +1035,338 @@ def test_plant_sensor_includes_forecast_attributes( assert entry["expected_peak"]["value"] == 4 -@pytest.mark.parametrize( - ( - "allow_d1", - "allow_d2", - "expected_removed", - "expected_entities", - ), - [ - (False, True, 1, ["sensor.pollen_type_grass_d1"]), - (True, False, 1, ["sensor.pollen_type_grass_d2"]), - ], -) -def test_cleanup_per_day_entities_removes_disabled_days( - monkeypatch: pytest.MonkeyPatch, - allow_d1: bool, - allow_d2: bool, - expected_removed: int, - expected_entities: list[str], -) -> None: - """D+1/D+2 entities are awaited and removed when disabled.""" - - entries = [ - RegistryEntry("entry_type_grass", "sensor.pollen_type_grass"), - RegistryEntry("entry_type_grass_d1", "sensor.pollen_type_grass_d1"), - RegistryEntry("entry_type_grass_d2", "sensor.pollen_type_grass_d2"), - ] - registry = _setup_registry_stub(monkeypatch, entries, entry_id="entry") - - loop = asyncio.new_event_loop() - hass = DummyHass(loop) - try: - removed = loop.run_until_complete( - sensor._cleanup_per_day_entities( - hass, "entry", allow_d1=allow_d1, allow_d2=allow_d2 - ) - ) - finally: - loop.close() - - assert removed == expected_removed - assert registry.removals == expected_entities +def test_plant_forecast_matches_codes_case_insensitively() -> None: + """Plant forecast should match even when code casing varies by day.""" + payload = { + "dailyInfo": [ + { + "date": {"year": 2025, "month": 6, "day": 1}, + "plantInfo": [ + { + "code": "ragweed", + "displayName": "Ragweed", + "indexInfo": {"value": 2, "category": "LOW"}, + } + ], + }, + { + "date": {"year": 2025, "month": 6, "day": 2}, + "plantInfo": [ + { + "code": "RAGWEED", + "displayName": "Ragweed", + "indexInfo": {"value": 4, "category": "HIGH"}, + } + ], + }, + ] + } + + fake_session = FakeSession(payload) + client = client_mod.GooglePollenApiClient(fake_session, "test") + + loop = asyncio.new_event_loop() + hass = DummyHass(loop) + coordinator = coordinator_mod.PollenDataUpdateCoordinator( + hass=hass, + api_key="test", + lat=1.0, + lon=2.0, + hours=12, + language=None, + entry_id="entry", + forecast_days=3, + create_d1=False, + create_d2=False, + client=client, + ) + + try: + data = loop.run_until_complete(coordinator._async_update_data()) + finally: + loop.close() + + entry = data["plants_ragweed"] + assert entry["code"] == "ragweed" + assert entry["tomorrow_has_index"] is True + assert entry["tomorrow_value"] == 4 + + +def test_coordinator_accepts_numeric_string_color_channels() -> None: + """Numeric string channels should be normalized into RGB/hex values.""" + + payload = { + "dailyInfo": [ + { + "date": {"year": 2025, "month": 7, "day": 1}, + "pollenTypeInfo": [ + { + "code": "GRASS", + "displayName": "Grass", + "indexInfo": { + "value": 1, + "category": "LOW", + "color": {"red": "1", "green": "0", "blue": "0"}, + }, + } + ], + } + ] + } + + fake_session = FakeSession(payload) + client = client_mod.GooglePollenApiClient(fake_session, "test") + + loop = asyncio.new_event_loop() + hass = DummyHass(loop) + coordinator = coordinator_mod.PollenDataUpdateCoordinator( + hass=hass, + api_key="test", + lat=1.0, + lon=2.0, + hours=12, + language=None, + entry_id="entry", + forecast_days=1, + create_d1=False, + create_d2=False, + client=client, + ) + + try: + data = loop.run_until_complete(coordinator._async_update_data()) + finally: + loop.close() + + assert data["type_grass"]["color_hex"] == "#FF0000" + assert data["type_grass"]["color_rgb"] == [255, 0, 0] + + +def test_coordinator_ignores_invalid_string_color_channels() -> None: + """Non-numeric string channels should not emit RGB/hex values.""" + + payload = { + "dailyInfo": [ + { + "date": {"year": 2025, "month": 7, "day": 1}, + "pollenTypeInfo": [ + { + "code": "GRASS", + "displayName": "Grass", + "indexInfo": { + "value": 1, + "category": "LOW", + "color": {"red": "foo"}, + }, + } + ], + } + ] + } + + fake_session = FakeSession(payload) + client = client_mod.GooglePollenApiClient(fake_session, "test") + + loop = asyncio.new_event_loop() + hass = DummyHass(loop) + coordinator = coordinator_mod.PollenDataUpdateCoordinator( + hass=hass, + api_key="test", + lat=1.0, + lon=2.0, + hours=12, + language=None, + entry_id="entry", + forecast_days=1, + create_d1=False, + create_d2=False, + client=client, + ) + + try: + data = loop.run_until_complete(coordinator._async_update_data()) + finally: + loop.close() + + assert data["type_grass"]["color_hex"] is None + assert data["type_grass"]["color_rgb"] is None -def test_coordinator_raises_auth_failed(monkeypatch: pytest.MonkeyPatch) -> None: - """A 403 response triggers ConfigEntryAuthFailed for re-auth flows.""" - fake_session = FakeSession({}, status=403) - monkeypatch.setattr(sensor, "async_get_clientsession", lambda _hass: fake_session) +def test_coordinator_ignores_nonfinite_color_channels() -> None: + """Non-finite color channel values should not crash or emit invalid colors.""" + + payload = { + "dailyInfo": [ + { + "date": {"year": 2025, "month": 7, "day": 1}, + "pollenTypeInfo": [ + { + "code": "GRASS", + "displayName": "Grass", + "indexInfo": { + "value": 1, + "category": "LOW", + "color": {"red": float("inf"), "green": float("nan")}, + }, + } + ], + } + ] + } + + fake_session = FakeSession(payload) + client = client_mod.GooglePollenApiClient(fake_session, "test") loop = asyncio.new_event_loop() hass = DummyHass(loop) - coordinator = sensor.PollenDataUpdateCoordinator( + coordinator = coordinator_mod.PollenDataUpdateCoordinator( + hass=hass, + api_key="test", + lat=1.0, + lon=2.0, + hours=12, + language=None, + entry_id="entry", + forecast_days=1, + create_d1=False, + create_d2=False, + client=client, + ) + + try: + data = loop.run_until_complete(coordinator._async_update_data()) + finally: + loop.close() + + assert data["type_grass"]["color_hex"] is None + assert data["type_grass"]["color_rgb"] is None + + +def test_coordinator_type_keys_are_deterministic_sorted() -> None: + """Type sensor keys are emitted in stable sorted order.""" + + payload = { + "dailyInfo": [ + { + "date": {"year": 2025, "month": 7, "day": 1}, + "pollenTypeInfo": [ + { + "code": "WEED", + "displayName": "Weed", + "indexInfo": {"value": 2, "category": "LOW"}, + }, + { + "code": "GRASS", + "displayName": "Grass", + "indexInfo": {"value": 1, "category": "LOW"}, + }, + ], + } + ] + } + + fake_session = FakeSession(payload) + client = client_mod.GooglePollenApiClient(fake_session, "test") + + loop = asyncio.new_event_loop() + hass = DummyHass(loop) + coordinator = coordinator_mod.PollenDataUpdateCoordinator( + hass=hass, + api_key="test", + lat=1.0, + lon=2.0, + hours=12, + language=None, + entry_id="entry", + forecast_days=1, + create_d1=False, + create_d2=False, + client=client, + ) + + try: + data = loop.run_until_complete(coordinator._async_update_data()) + finally: + loop.close() + + type_keys = [ + k + for k, v in data.items() + if isinstance(v, dict) + and v.get("source") == "type" + and not k.endswith(("_d1", "_d2")) + ] + assert type_keys == sorted(type_keys) + + +@pytest.mark.parametrize( + ( + "allow_d1", + "allow_d2", + "expected_removed", + "expected_entities", + ), + [ + (False, True, 1, ["sensor.pollen_type_grass_d1"]), + (True, False, 1, ["sensor.pollen_type_grass_d2"]), + ], +) +def test_cleanup_per_day_entities_removes_disabled_days( + monkeypatch: pytest.MonkeyPatch, + allow_d1: bool, + allow_d2: bool, + expected_removed: int, + expected_entities: list[str], +) -> None: + """D+1/D+2 entities are awaited and removed when disabled.""" + + entries = [ + RegistryEntry( + "sensor.pollen_type_grass", + "entry_type_grass", + "sensor", + sensor.DOMAIN, + ), + RegistryEntry( + "sensor.pollen_type_grass_d1", + "entry_type_grass_d1", + "sensor", + sensor.DOMAIN, + ), + RegistryEntry( + "sensor.pollen_type_grass_d2", + "entry_type_grass_d2", + "sensor", + sensor.DOMAIN, + ), + ] + registry = _setup_registry_stub(monkeypatch, entries, entry_id="entry") + + loop = asyncio.new_event_loop() + hass = DummyHass(loop) + try: + removed = loop.run_until_complete( + sensor._cleanup_per_day_entities( + hass, "entry", allow_d1=allow_d1, allow_d2=allow_d2 + ) + ) + finally: + loop.close() + + assert removed == expected_removed + assert registry.removals == expected_entities + + +def test_coordinator_raises_auth_failed() -> None: + """401 responses trigger ConfigEntryAuthFailed for re-auth flows.""" + + fake_session = FakeSession({}, status=401) + client = client_mod.GooglePollenApiClient(fake_session, "bad") + + loop = asyncio.new_event_loop() + hass = DummyHass(loop) + coordinator = coordinator_mod.PollenDataUpdateCoordinator( hass=hass, api_key="bad", lat=1.0, @@ -682,10 +1377,70 @@ def test_coordinator_raises_auth_failed(monkeypatch: pytest.MonkeyPatch) -> None forecast_days=1, create_d1=False, create_d2=False, + client=client, ) try: - with pytest.raises(sensor.ConfigEntryAuthFailed): + with pytest.raises(client_mod.ConfigEntryAuthFailed): + loop.run_until_complete(coordinator._async_update_data()) + finally: + loop.close() + + +def test_coordinator_handles_forbidden() -> None: + """403 responses raise UpdateFailed without triggering re-auth.""" + + fake_session = FakeSession({"error": {"message": "Forbidden"}}, status=403) + client = client_mod.GooglePollenApiClient(fake_session, "bad") + + loop = asyncio.new_event_loop() + hass = DummyHass(loop) + coordinator = coordinator_mod.PollenDataUpdateCoordinator( + hass=hass, + api_key="bad", + lat=1.0, + lon=2.0, + hours=12, + language=None, + entry_id="entry", + forecast_days=1, + create_d1=False, + create_d2=False, + client=client, + ) + + try: + with pytest.raises(client_mod.UpdateFailed): + loop.run_until_complete(coordinator._async_update_data()) + finally: + loop.close() + + +def test_coordinator_invalid_key_message_triggers_reauth() -> None: + """403 invalid API key messages should raise ConfigEntryAuthFailed.""" + + payload = {"error": {"message": "API key not valid. Please pass a valid API key."}} + fake_session = FakeSession(payload, status=403) + client = client_mod.GooglePollenApiClient(fake_session, "bad") + + loop = asyncio.new_event_loop() + hass = DummyHass(loop) + coordinator = coordinator_mod.PollenDataUpdateCoordinator( + hass=hass, + api_key="bad", + lat=1.0, + lon=2.0, + hours=12, + language=None, + entry_id="entry", + forecast_days=1, + create_d1=False, + create_d2=False, + client=client, + ) + + try: + with pytest.raises(client_mod.ConfigEntryAuthFailed): loop.run_until_complete(coordinator._async_update_data()) finally: loop.close() @@ -698,8 +1453,16 @@ def test_coordinator_retries_then_raises_on_rate_limit( session = SequenceSession( [ - ResponseSpec(status=429, payload={}, headers={"Retry-After": "3"}), - ResponseSpec(status=429, payload={}, headers={"Retry-After": "3"}), + ResponseSpec( + status=429, + payload={"error": {"message": "Quota exceeded"}}, + headers={"Retry-After": "3"}, + ), + ResponseSpec( + status=429, + payload={"error": {"message": "Quota exceeded"}}, + headers={"Retry-After": "3"}, + ), ] ) delays: list[float] = [] @@ -707,13 +1470,14 @@ def test_coordinator_retries_then_raises_on_rate_limit( async def _fast_sleep(delay: float) -> None: delays.append(delay) - monkeypatch.setattr(sensor.asyncio, "sleep", _fast_sleep) - monkeypatch.setattr(sensor.random, "uniform", lambda *_args, **_kwargs: 0.0) - monkeypatch.setattr(sensor, "async_get_clientsession", lambda _hass: session) + monkeypatch.setattr(client_mod.asyncio, "sleep", _fast_sleep) + monkeypatch.setattr(client_mod.random, "uniform", lambda *_args, **_kwargs: 0.0) + + client = client_mod.GooglePollenApiClient(session, "test") loop = asyncio.new_event_loop() hass = DummyHass(loop) - coordinator = sensor.PollenDataUpdateCoordinator( + coordinator = coordinator_mod.PollenDataUpdateCoordinator( hass=hass, api_key="test", lat=1.0, @@ -724,10 +1488,11 @@ async def _fast_sleep(delay: float) -> None: forecast_days=1, create_d1=False, create_d2=False, + client=client, ) try: - with pytest.raises(sensor.UpdateFailed, match="Quota exceeded"): + with pytest.raises(client_mod.UpdateFailed, match="Quota exceeded"): loop.run_until_complete(coordinator._async_update_data()) finally: loop.close() @@ -736,6 +1501,140 @@ async def _fast_sleep(delay: float) -> None: assert delays == [3.0] +def test_coordinator_retry_after_http_date(monkeypatch: pytest.MonkeyPatch) -> None: + """Retry-After as HTTP-date is converted to a delay before retry.""" + + retry_after = "Wed, 10 Dec 2025 12:00:05 GMT" + session = SequenceSession( + [ + ResponseSpec( + status=429, + payload={"error": {"message": "Quota exceeded"}}, + headers={"Retry-After": retry_after}, + ), + ResponseSpec( + status=429, + payload={"error": {"message": "Quota exceeded"}}, + headers={"Retry-After": retry_after}, + ), + ] + ) + delays: list[float] = [] + + async def _fast_sleep(delay: float) -> None: + delays.append(delay) + + monkeypatch.setattr(client_mod.asyncio, "sleep", _fast_sleep) + monkeypatch.setattr(client_mod.random, "uniform", lambda *_args, **_kwargs: 0.0) + monkeypatch.setattr( + client_mod.dt_util, + "utcnow", + lambda: datetime.datetime(2025, 12, 10, 12, 0, 0, tzinfo=datetime.UTC), + ) + + client = client_mod.GooglePollenApiClient(session, "test") + + loop = asyncio.new_event_loop() + hass = DummyHass(loop) + coordinator = coordinator_mod.PollenDataUpdateCoordinator( + hass=hass, + api_key="test", + lat=1.0, + lon=2.0, + hours=12, + language=None, + entry_id="entry", + forecast_days=1, + create_d1=False, + create_d2=False, + client=client, + ) + + try: + with pytest.raises(client_mod.UpdateFailed, match="Quota exceeded"): + loop.run_until_complete(coordinator._async_update_data()) + finally: + loop.close() + + assert session.calls == 2 + assert delays == [5.0] + + +@pytest.mark.parametrize( + ("retry_after", "now"), + [ + ("-10", None), + ("nan", None), + ("inf", None), + ( + "Wed, 10 Dec 2025 12:00:00 GMT", + datetime.datetime(2025, 12, 10, 12, 0, 5, tzinfo=datetime.UTC), + ), + ], +) +def test_coordinator_retry_after_invalid_values_use_safe_default( + monkeypatch: pytest.MonkeyPatch, + retry_after: str, + now: datetime.datetime | None, +) -> None: + """Invalid Retry-After values should fall back to a safe finite delay.""" + + session = SequenceSession( + [ + ResponseSpec( + status=429, + payload={"error": {"message": "Quota exceeded"}}, + headers={"Retry-After": retry_after}, + ), + ResponseSpec( + status=429, + payload={"error": {"message": "Quota exceeded"}}, + headers={"Retry-After": retry_after}, + ), + ] + ) + delays: list[float] = [] + + async def _fast_sleep(delay: float) -> None: + assert isinstance(delay, float) + assert delay == delay + assert delay != float("inf") + assert delay != float("-inf") + delays.append(delay) + + monkeypatch.setattr(client_mod.asyncio, "sleep", _fast_sleep) + monkeypatch.setattr(client_mod.random, "uniform", lambda *_args, **_kwargs: 0.0) + if now is not None: + monkeypatch.setattr(client_mod.dt_util, "utcnow", lambda: now) + + client = client_mod.GooglePollenApiClient(session, "test") + + loop = asyncio.new_event_loop() + hass = DummyHass(loop) + coordinator = coordinator_mod.PollenDataUpdateCoordinator( + hass=hass, + api_key="test", + lat=1.0, + lon=2.0, + hours=12, + language=None, + entry_id="entry", + forecast_days=1, + create_d1=False, + create_d2=False, + client=client, + ) + + try: + with pytest.raises(client_mod.UpdateFailed, match="Quota exceeded"): + loop.run_until_complete(coordinator._async_update_data()) + finally: + loop.close() + + assert session.calls == 2 + assert delays == [2.0] + + def test_coordinator_retries_then_raises_on_server_errors( monkeypatch: pytest.MonkeyPatch, ) -> None: @@ -749,13 +1648,14 @@ def test_coordinator_retries_then_raises_on_server_errors( async def _fast_sleep(delay: float) -> None: delays.append(delay) - monkeypatch.setattr(sensor.asyncio, "sleep", _fast_sleep) - monkeypatch.setattr(sensor.random, "uniform", lambda *_args, **_kwargs: 0.0) - monkeypatch.setattr(sensor, "async_get_clientsession", lambda _hass: session) + monkeypatch.setattr(client_mod.asyncio, "sleep", _fast_sleep) + monkeypatch.setattr(client_mod.random, "uniform", lambda *_args, **_kwargs: 0.0) + + client = client_mod.GooglePollenApiClient(session, "test") loop = asyncio.new_event_loop() hass = DummyHass(loop) - coordinator = sensor.PollenDataUpdateCoordinator( + coordinator = coordinator_mod.PollenDataUpdateCoordinator( hass=hass, api_key="test", lat=1.0, @@ -766,10 +1666,11 @@ async def _fast_sleep(delay: float) -> None: forecast_days=1, create_d1=False, create_d2=False, + client=client, ) try: - with pytest.raises(sensor.UpdateFailed, match="HTTP 502"): + with pytest.raises(client_mod.UpdateFailed, match="HTTP 502"): loop.run_until_complete(coordinator._async_update_data()) finally: loop.close() @@ -789,13 +1690,14 @@ def test_coordinator_retries_then_wraps_timeout( async def _fast_sleep(delay: float) -> None: delays.append(delay) - monkeypatch.setattr(sensor.asyncio, "sleep", _fast_sleep) - monkeypatch.setattr(sensor.random, "uniform", lambda *_args, **_kwargs: 0.0) - monkeypatch.setattr(sensor, "async_get_clientsession", lambda _hass: session) + monkeypatch.setattr(client_mod.asyncio, "sleep", _fast_sleep) + monkeypatch.setattr(client_mod.random, "uniform", lambda *_args, **_kwargs: 0.0) + + client = client_mod.GooglePollenApiClient(session, "test") loop = asyncio.new_event_loop() hass = DummyHass(loop) - coordinator = sensor.PollenDataUpdateCoordinator( + coordinator = coordinator_mod.PollenDataUpdateCoordinator( hass=hass, api_key="test", lat=1.0, @@ -806,10 +1708,11 @@ async def _fast_sleep(delay: float) -> None: forecast_days=1, create_d1=False, create_d2=False, + client=client, ) try: - with pytest.raises(sensor.UpdateFailed, match="Timeout"): + with pytest.raises(client_mod.UpdateFailed, match="Timeout"): loop.run_until_complete(coordinator._async_update_data()) finally: loop.close() @@ -824,20 +1727,24 @@ def test_coordinator_retries_then_wraps_client_error( """Client errors retry once then raise UpdateFailed with redacted message.""" session = SequenceSession( - [sensor.aiohttp.ClientError("net down"), sensor.aiohttp.ClientError("net down")] + [ + client_mod.ClientError("net down"), + client_mod.ClientError("net down"), + ] ) delays: list[float] = [] async def _fast_sleep(delay: float) -> None: delays.append(delay) - monkeypatch.setattr(sensor.asyncio, "sleep", _fast_sleep) - monkeypatch.setattr(sensor.random, "uniform", lambda *_args, **_kwargs: 0.0) - monkeypatch.setattr(sensor, "async_get_clientsession", lambda _hass: session) + monkeypatch.setattr(client_mod.asyncio, "sleep", _fast_sleep) + monkeypatch.setattr(client_mod.random, "uniform", lambda *_args, **_kwargs: 0.0) + + client = client_mod.GooglePollenApiClient(session, "secret") loop = asyncio.new_event_loop() hass = DummyHass(loop) - coordinator = sensor.PollenDataUpdateCoordinator( + coordinator = coordinator_mod.PollenDataUpdateCoordinator( hass=hass, api_key="secret", lat=1.0, @@ -848,10 +1755,11 @@ async def _fast_sleep(delay: float) -> None: forecast_days=1, create_d1=False, create_d2=False, + client=client, ) try: - with pytest.raises(sensor.UpdateFailed, match="net down"): + with pytest.raises(client_mod.UpdateFailed, match="net down"): loop.run_until_complete(coordinator._async_update_data()) finally: loop.close() @@ -860,8 +1768,8 @@ async def _fast_sleep(delay: float) -> None: assert delays == [0.8] -def test_async_setup_entry_missing_api_key_triggers_reauth() -> None: - """A missing API key results in ConfigEntryAuthFailed during setup.""" +def test_async_setup_entry_raises_not_ready_if_runtime_data_missing() -> None: + """Missing runtime data causes setup to raise ConfigEntryNotReady.""" loop = asyncio.new_event_loop() hass = DummyHass(loop) @@ -878,7 +1786,7 @@ async def _noop_add_entities(_entities, _update_before_add=False): return None try: - with pytest.raises(sensor.ConfigEntryAuthFailed): + with pytest.raises(sensor.ConfigEntryNotReady): loop.run_until_complete( sensor.async_setup_entry(hass, config_entry, _noop_add_entities) ) @@ -886,22 +1794,73 @@ async def _noop_add_entities(_entities, _update_before_add=False): loop.close() +@pytest.mark.asyncio +async def test_async_setup_entry_skips_disabled_d1_d2_sensors() -> None: + """Setup does not recreate D+1/D+2 sensors when forecast days disable them.""" + + hass = DummyHass(asyncio.get_running_loop()) + config_entry = FakeConfigEntry( + data={ + sensor.CONF_API_KEY: "key", + sensor.CONF_LATITUDE: 1.0, + sensor.CONF_LONGITUDE: 2.0, + sensor.CONF_UPDATE_INTERVAL: sensor.DEFAULT_UPDATE_INTERVAL, + sensor.CONF_FORECAST_DAYS: sensor.DEFAULT_FORECAST_DAYS, + }, + options={sensor.CONF_FORECAST_DAYS: 1}, + entry_id="entry", + ) + + client = client_mod.GooglePollenApiClient(FakeSession({}), "key") + coordinator = coordinator_mod.PollenDataUpdateCoordinator( + hass=hass, + api_key="key", + lat=1.0, + lon=2.0, + hours=sensor.DEFAULT_UPDATE_INTERVAL, + language=None, + entry_id="entry", + entry_title=sensor.DEFAULT_ENTRY_TITLE, + forecast_days=3, + create_d1=True, + create_d2=True, + client=client, + ) + coordinator.data = { + "date": {"source": "meta"}, + "region": {"source": "meta"}, + "type_grass": {"source": "type", "name": "Grass"}, + "type_grass_d1": {"source": "type", "name": "Grass D+1"}, + "type_grass_d2": {"source": "type", "name": "Grass D+2"}, + } + config_entry.runtime_data = sensor.PollenLevelsRuntimeData( + coordinator=coordinator, client=client + ) + + captured: list[Any] = [] + + def _capture_entities(entities, _update_before_add=False): + captured.extend(entities) + + await sensor.async_setup_entry(hass, config_entry, _capture_entities) + + unique_ids = { + entity.unique_id + for entity in captured + if getattr(entity, "unique_id", None) is not None + } + + assert "entry_type_grass" in unique_ids + assert all(not uid.endswith("_d1") for uid in unique_ids) + assert all(not uid.endswith("_d2") for uid in unique_ids) + + @pytest.mark.asyncio async def test_device_info_uses_default_title_when_blank( monkeypatch: pytest.MonkeyPatch, ) -> None: """Whitespace titles fall back to the default in translation placeholders.""" - async def _stub_first_refresh(self): # type: ignore[override] - self.data = {"date": {"source": "meta"}, "region": {"source": "meta"}} - - monkeypatch.setattr( - sensor.PollenDataUpdateCoordinator, - "async_config_entry_first_refresh", - _stub_first_refresh, - ) - monkeypatch.setattr(sensor, "async_get_clientsession", lambda _hass: None) - hass = DummyHass(asyncio.get_running_loop()) config_entry = FakeConfigEntry( data={ @@ -915,6 +1874,27 @@ async def _stub_first_refresh(self): # type: ignore[override] ) config_entry.title = " " + client = client_mod.GooglePollenApiClient(FakeSession({}), "key") + clean_title = sensor.DEFAULT_ENTRY_TITLE + coordinator = coordinator_mod.PollenDataUpdateCoordinator( + hass=hass, + api_key="key", + lat=1.0, + lon=2.0, + hours=sensor.DEFAULT_UPDATE_INTERVAL, + language=None, + entry_id="entry", + entry_title=clean_title, + forecast_days=sensor.DEFAULT_FORECAST_DAYS, + create_d1=False, + create_d2=False, + client=client, + ) + coordinator.data = {"date": {"source": "meta"}, "region": {"source": "meta"}} + config_entry.runtime_data = sensor.PollenLevelsRuntimeData( + coordinator=coordinator, client=client + ) + captured: list[Any] = [] def _capture_entities(entities, _update_before_add=False): @@ -936,16 +1916,6 @@ async def test_device_info_trims_custom_title( ) -> None: """Custom titles are trimmed before reaching translation placeholders.""" - async def _stub_first_refresh(self): # type: ignore[override] - self.data = {"date": {"source": "meta"}, "region": {"source": "meta"}} - - monkeypatch.setattr( - sensor.PollenDataUpdateCoordinator, - "async_config_entry_first_refresh", - _stub_first_refresh, - ) - monkeypatch.setattr(sensor, "async_get_clientsession", lambda _hass: None) - hass = DummyHass(asyncio.get_running_loop()) config_entry = FakeConfigEntry( data={ @@ -959,6 +1929,27 @@ async def _stub_first_refresh(self): # type: ignore[override] ) config_entry.title = " My Location " + client = client_mod.GooglePollenApiClient(FakeSession({}), "key") + clean_title = config_entry.title.strip() + coordinator = coordinator_mod.PollenDataUpdateCoordinator( + hass=hass, + api_key="key", + lat=1.0, + lon=2.0, + hours=sensor.DEFAULT_UPDATE_INTERVAL, + language=None, + entry_id="entry", + entry_title=clean_title, + forecast_days=sensor.DEFAULT_FORECAST_DAYS, + create_d1=False, + create_d2=False, + client=client, + ) + coordinator.data = {"date": {"source": "meta"}, "region": {"source": "meta"}} + config_entry.runtime_data = sensor.PollenLevelsRuntimeData( + coordinator=coordinator, client=client + ) + captured: list[Any] = [] def _capture_entities(entities, _update_before_add=False): diff --git a/tests/test_translations.py b/tests/test_translations.py index 3df0661e..1b1c944a 100644 --- a/tests/test_translations.py +++ b/tests/test_translations.py @@ -1,15 +1,21 @@ """Translation coverage tests for the Pollen Levels integration. -These tests parse ``config_flow.py`` with a simple AST walker to ensure every -translation key used in the config/options flows exists in each locale file. -If the flow code changes structure, update the helper below rather than -changing the assertions to keep the guarantees intact. +These tests ensure: +- All locale files have the exact same keyset as en.json (en.json is the source of truth). +- Translation keys referenced by config_flow.py (config + options flows) exist in en.json. +- Translation keys referenced by sensor.py via entity/device translation_key exist in en.json. +- Translation keys for sections (step.*.sections.*) are present if schema uses ``section(...)``. +- Translation keys for services declared in services.yaml exist in en.json. + +The config_flow extraction uses an AST walker. If config_flow.py changes structure in +unexpected ways, the helpers should fail loudly so we don't silently lose coverage. """ from __future__ import annotations import ast import json +import re from pathlib import Path from typing import Any @@ -23,6 +29,8 @@ TRANSLATIONS_DIR = COMPONENT_DIR / "translations" CONFIG_FLOW_PATH = COMPONENT_DIR / "config_flow.py" CONST_PATH = COMPONENT_DIR / "const.py" +SENSOR_PATH = COMPONENT_DIR / "sensor.py" +SERVICES_YAML_PATH = COMPONENT_DIR / "services.yaml" def _fail_unexpected_ast(context: str) -> None: @@ -56,6 +64,152 @@ def _load_translation(path: Path) -> dict[str, Any]: return json.load(file) +def _extract_services_from_services_yaml() -> set[str]: + """Extract top-level service names from services.yaml without requiring PyYAML.""" + + if not SERVICES_YAML_PATH.is_file(): + return set() + + services: set[str] = set() + for line in SERVICES_YAML_PATH.read_text(encoding="utf-8").splitlines(): + raw = line.rstrip("\n") + if not raw or raw.lstrip().startswith("#"): + continue + if raw.startswith(" "): + continue + match = re.match(r"^([a-zA-Z0-9_]+):\s*$", raw) + if match: + services.add(match.group(1)) + + return services + + +def _extract_service_labels_from_services_yaml() -> dict[str, dict[str, str]]: + """Extract service name/description values from services.yaml without PyYAML.""" + + if not SERVICES_YAML_PATH.is_file(): + return {} + + services: dict[str, dict[str, str]] = {} + current: str | None = None + for line in SERVICES_YAML_PATH.read_text(encoding="utf-8").splitlines(): + raw = line.rstrip("\n") + if not raw or raw.lstrip().startswith("#"): + continue + if not raw.startswith(" "): + match = re.match(r"^([a-zA-Z0-9_]+):\s*$", raw) + current = match.group(1) if match else None + if current is not None: + services.setdefault(current, {}) + continue + if current is None: + continue + match = re.match(r"^\s+(name|description):\s*(.+)\s*$", raw) + if match: + key, value = match.groups() + services[current][key] = value.strip().strip('"').strip("'") + + return services + + +def _extract_sensor_translation_key_usage() -> tuple[set[str], set[str]]: + """Extract translation keys referenced by sensor entities and devices. + + Entity keys: + - _attr_translation_key = "" -> entity.sensor..name + + Device keys: + - "translation_key": "" in a device_info dict literal + - values in a mapping like: translation_keys = {"type": "types", ...} + - default used in translation_keys.get(..., "") + + This stays intentionally narrow; unsupported AST changes should fail loudly. + """ + + if not SENSOR_PATH.is_file(): + raise AssertionError(f"Missing sensor.py at {SENSOR_PATH}") + + tree = ast.parse(SENSOR_PATH.read_text(encoding="utf-8")) + + entity_keys: set[str] = set() + device_keys: set[str] = set() + + # 1) Entity translation keys: _attr_translation_key = "" + for node in ast.walk(tree): + if not isinstance(node, (ast.Assign, ast.AnnAssign)): + continue + + if isinstance(node, ast.Assign): + if len(node.targets) != 1: + continue + target = node.targets[0] + value = node.value + else: + target = node.target + value = node.value + + if ( + isinstance(target, ast.Name) + and target.id == "_attr_translation_key" + and isinstance(value, ast.Constant) + and isinstance(value.value, str) + ): + entity_keys.add(value.value) + + # 2) Device translation keys from explicit dict literals: {"translation_key": ""} + for node in ast.walk(tree): + if not isinstance(node, ast.Dict): + continue + for k, v in zip(node.keys, node.values, strict=False): + if ( + isinstance(k, ast.Constant) + and k.value == "translation_key" + and isinstance(v, ast.Constant) + and isinstance(v.value, str) + ): + device_keys.add(v.value) + + # 3) Device translation keys from a mapping: translation_keys = {...} + for node in ast.walk(tree): + if not isinstance(node, ast.Assign) or len(node.targets) != 1: + continue + if not ( + isinstance(node.targets[0], ast.Name) + and node.targets[0].id == "translation_keys" + ): + continue + if not isinstance(node.value, ast.Dict): + _fail_unexpected_ast("sensor.py translation_keys assignment is not a dict") + for v in node.value.values: + if not (isinstance(v, ast.Constant) and isinstance(v.value, str)): + _fail_unexpected_ast( + "sensor.py translation_keys dict contains non-string values" + ) + device_keys.add(v.value) + + # 4) Default device translation key: translation_keys.get(..., "") + for node in ast.walk(tree): + if not isinstance(node, ast.Call): + continue + if not (isinstance(node.func, ast.Attribute) and node.func.attr == "get"): + continue + if not ( + isinstance(node.func.value, ast.Name) + and node.func.value.id == "translation_keys" + ): + continue + if len(node.args) >= 2: + default = node.args[1] + if isinstance(default, ast.Constant) and isinstance(default.value, str): + device_keys.add(default.value) + else: + _fail_unexpected_ast( + "sensor.py translation_keys.get default is not a string literal" + ) + + return entity_keys, device_keys + + def test_translations_match_english_keyset() -> None: """Verify all locale files mirror the English translation keyset.""" @@ -71,9 +225,9 @@ def test_translations_match_english_keyset() -> None: extra = locale_keys - english_keys if missing or extra: problems.append( - f"{translation_path.name}: " - f"missing {sorted(missing)} extra {sorted(extra)}" + f"{translation_path.name}: missing {sorted(missing)} extra {sorted(extra)}" ) + assert not problems, "Translation keys mismatch: " + "; ".join(problems) @@ -86,6 +240,83 @@ def test_config_flow_translation_keys_present() -> None: assert not missing, f"Missing config_flow translation keys: {sorted(missing)}" +def test_config_flow_extractor_includes_helper_error_keys() -> None: + """Regression: helper-propagated errors must be detected by AST extraction.""" + + keys = _extract_config_flow_keys() + assert "config.error.invalid_update_interval" in keys + assert "options.error.invalid_update_interval" in keys + assert "config.error.invalid_forecast_days" in keys + assert "options.error.invalid_forecast_days" in keys + + +def test_sensor_translation_keys_present() -> None: + """Ensure entity/device translation keys referenced by sensor.py exist in en.json.""" + + english = _flatten_keys(_load_translation(TRANSLATIONS_DIR / "en.json")) + entity_keys, device_keys = _extract_sensor_translation_key_usage() + + assert entity_keys, "No _attr_translation_key values found in sensor.py" + assert device_keys, "No device translation_key values found in sensor.py" + + missing: list[str] = [] + for key in sorted(entity_keys): + tkey = f"entity.sensor.{key}.name" + if tkey not in english: + missing.append(tkey) + + for key in sorted(device_keys): + tkey = f"device.{key}.name" + if tkey not in english: + missing.append(tkey) + + assert ( + not missing + ), "Missing sensor/device translation keys in en.json: " + ", ".join(missing) + + +def test_services_translation_keys_present() -> None: + """Ensure services declared in services.yaml have translations in en.json.""" + + english = _flatten_keys(_load_translation(TRANSLATIONS_DIR / "en.json")) + service_names = _extract_services_from_services_yaml() + if not service_names: + return + + missing: list[str] = [] + for service in sorted(service_names): + for suffix in ("name", "description"): + key = f"services.{service}.{suffix}" + if key not in english: + missing.append(key) + + assert not missing, "Missing service translation keys in en.json: " + ", ".join( + missing + ) + + +def test_services_yaml_labels_match_translations() -> None: + """Ensure services.yaml labels match en.json translations.""" + + services = _extract_service_labels_from_services_yaml() + if not services: + return + + en_data = _load_translation(TRANSLATIONS_DIR / "en.json") + en_services = en_data.get("services", {}) + + for service_name, labels in services.items(): + translations = en_services.get(service_name, {}) + for key in ("name", "description"): + value = labels.get(key) + if value is None: + continue + expected = translations.get(key) + assert ( + value == expected + ), f"Service {service_name} {key} mismatch: {value!r} != {expected!r}" + + def _extract_constant_assignments(tree: ast.AST) -> dict[str, str]: """Collect string literal assignments from an AST. @@ -116,56 +347,168 @@ def _extract_constant_assignments(tree: ast.AST) -> dict[str, str]: return constants +def _extract_schema_key_aliases( + tree: ast.AST, mapping: dict[str, str] +) -> dict[str, str]: + """Extract schema key wrapper aliases like location_field = vol.Required(CONF_LOCATION).""" + + aliases: dict[str, str] = {} + for node in ast.walk(tree): + target: ast.AST | None = None + if isinstance(node, ast.Assign): + if len(node.targets) != 1: + continue + target = node.targets[0] + elif isinstance(node, ast.AnnAssign): + target = node.target + + if not isinstance(target, ast.Name): + continue + + value = node.value if hasattr(node, "value") else None + if not isinstance(value, ast.Call): + continue + + if not ( + isinstance(value.func, ast.Attribute) + and value.func.attr in {"Required", "Optional"} + ): + continue + + if not value.args: + continue + + selector = value.args[0] + if isinstance(selector, ast.Constant) and isinstance(selector.value, str): + aliases[target.id] = selector.value + elif isinstance(selector, ast.Name): + resolved = _resolve_name(selector.id, mapping) + if resolved: + aliases[target.id] = resolved + + return aliases + + def _resolve_name(name: str, mapping: dict[str, str]) -> str | None: """Resolve a variable name to its string value if known.""" return mapping.get(name) -def _fields_from_schema_call(call: ast.Call, mapping: dict[str, str]) -> set[str]: - """Extract field keys from a vol.Schema(...) call. +def _fields_from_section_value( + value: ast.AST, mapping: dict[str, str] +) -> tuple[set[str], set[str]]: + """Extract fields and nested section IDs from a section(...) value.""" - Looks for patterns like: - vol.Schema({vol.Required(CONF_USERNAME): str, ...}) - """ + if isinstance(value, ast.Dict): + return _fields_from_schema_dict(value, mapping) + if isinstance(value, ast.Call): + if isinstance(value.func, ast.Attribute) and value.func.attr == "Schema": + return _fields_from_schema_call(value, mapping) + _fail_unexpected_ast("unexpected section value AST") + return set(), set() - if not call.args or not isinstance(call.args[0], ast.Dict): - _fail_unexpected_ast("schema call arguments") - arg = call.args[0] + +def _fields_from_schema_dict( + schema_dict: ast.Dict, mapping: dict[str, str] +) -> tuple[set[str], set[str]]: + """Extract field keys and section IDs from an AST dict representing a schema.""" fields: set[str] = set() - for key in arg.keys: - if not isinstance(key, ast.Call) or not isinstance(key.func, ast.Attribute): - _fail_unexpected_ast("schema key wrapper") - if key.func.attr not in {"Required", "Optional"}: - _fail_unexpected_ast(f"unexpected schema call {key.func.attr}") - if not key.args: - _fail_unexpected_ast("schema key args") - selector = key.args[0] - if isinstance(selector, ast.Constant) and isinstance(selector.value, str): - fields.add(selector.value) - elif isinstance(selector, ast.Name): - resolved = _resolve_name(selector.id, mapping) + sections: set[str] = set() + for key_node, value_node in zip(schema_dict.keys, schema_dict.values, strict=False): + if isinstance(key_node, ast.Name): + resolved = _resolve_name(key_node.id, mapping) if resolved: fields.add(resolved) + continue + _fail_unexpected_ast(f"unmapped schema key {key_node.id}") + if not isinstance(key_node, ast.Call): + _fail_unexpected_ast("schema key wrapper") + + if isinstance(key_node.func, ast.Attribute) and key_node.func.attr in { + "Required", + "Optional", + }: + if not key_node.args: + _fail_unexpected_ast("schema key args") + selector = key_node.args[0] + selector_value: str | None = None + if isinstance(selector, ast.Constant) and isinstance(selector.value, str): + selector_value = selector.value + elif isinstance(selector, ast.Name): + selector_value = _resolve_name(selector.id, mapping) + if selector_value is None: + _fail_unexpected_ast(f"unmapped selector {selector.id}") else: - _fail_unexpected_ast(f"unmapped selector {selector.id}") - else: - _fail_unexpected_ast("selector type") - return fields + _fail_unexpected_ast("selector type") + + if ( + isinstance(value_node, ast.Call) + and isinstance(value_node.func, ast.Name) + and value_node.func.id == "section" + ): + if selector_value is None: + _fail_unexpected_ast("section selector missing") + sections.add(selector_value) + if not value_node.args: + _fail_unexpected_ast("section() missing schema value") + section_fields, nested_sections = _fields_from_section_value( + value_node.args[0], mapping + ) + fields.update(section_fields) + sections.update(nested_sections) + continue + + if selector_value is not None: + fields.add(selector_value) + continue + + if isinstance(key_node.func, ast.Name) and key_node.func.id == "section": + if not key_node.args: + _fail_unexpected_ast("section() missing section id") + section_id_node = key_node.args[0] + if isinstance(section_id_node, ast.Constant) and isinstance( + section_id_node.value, str + ): + sections.add(section_id_node.value) + elif isinstance(section_id_node, ast.Name): + resolved = _resolve_name(section_id_node.id, mapping) + if resolved: + sections.add(resolved) + else: + _fail_unexpected_ast(f"unmapped section id {section_id_node.id}") + else: + _fail_unexpected_ast("section id type") + + section_fields, nested_sections = _fields_from_section_value( + value_node, mapping + ) + fields.update(section_fields) + sections.update(nested_sections) + continue + + _fail_unexpected_ast("unexpected schema call wrapper") + return fields, sections + + +def _fields_from_schema_call( + call: ast.Call, mapping: dict[str, str] +) -> tuple[set[str], set[str]]: + """Extract field keys and section IDs from a vol.Schema(...) call.""" + + if not call.args or not isinstance(call.args[0], ast.Dict): + _fail_unexpected_ast("schema call arguments") + + return _fields_from_schema_dict(call.args[0], mapping) def _extract_schema_fields( tree: ast.AST, mapping: dict[str, str] -) -> dict[str, set[str]]: - """Map schema helper names to their field keys. - - Collects: - - Functions like _user_schema / _options_schema returning vol.Schema(...) - - Top-level assignments like USER_SCHEMA = vol.Schema(...) - """ +) -> dict[str, tuple[set[str], set[str]]]: + """Map schema helper names to (fields, sections).""" - fields: dict[str, set[str]] = {} + schemas: dict[str, tuple[set[str], set[str]]] = {} for node in ast.walk(tree): if isinstance(node, ast.FunctionDef) and node.name in { "_user_schema", @@ -176,9 +519,13 @@ def _extract_schema_fields( ] for ret in returns: if isinstance(ret.value, ast.Call): - fields.setdefault(node.name, set()).update( - _fields_from_schema_call(ret.value, mapping) + fields, sections = _fields_from_schema_call(ret.value, mapping) + prev_fields, prev_sections = schemas.get(node.name, (set(), set())) + schemas[node.name] = ( + prev_fields | fields, + prev_sections | sections, ) + if isinstance(node, ast.Assign): if ( isinstance(node.targets[0], ast.Name) @@ -187,10 +534,43 @@ def _extract_schema_fields( and node.value.func.attr == "Schema" ): name = node.targets[0].id - fields.setdefault(name, set()).update( - _fields_from_schema_call(node.value, mapping) + fields, sections = _fields_from_schema_call(node.value, mapping) + prev_fields, prev_sections = schemas.get(name, (set(), set())) + schemas[name] = ( + prev_fields | fields, + prev_sections | sections, ) - return fields + return schemas + + +def _extract_helper_error_keys(tree: ast.AST) -> dict[str, set[str]]: + """Discover module-level helper functions that emit error keys via _parse_int_option(..., error_key=...).""" + + helpers: dict[str, set[str]] = {} + for node in getattr(tree, "body", []): + if not isinstance(node, ast.FunctionDef): + continue + + emitted: set[str] = set() + for call in ast.walk(node): + if not (isinstance(call, ast.Call) and isinstance(call.func, ast.Name)): + continue + if call.func.id != "_parse_int_option": + continue + for kw in call.keywords: + if kw.arg != "error_key": + continue + if isinstance(kw.value, ast.Constant) and isinstance( + kw.value.value, str + ): + emitted.add(kw.value.value) + else: + _fail_unexpected_ast( + f"error_key in {node.name} is not a string literal" + ) + if emitted: + helpers[node.name] = emitted + return helpers def _is_options_flow_class(name: str) -> bool: @@ -213,6 +593,7 @@ def _extract_config_flow_keys() -> set[str]: - config.step..title - config.step..description - config.step..data. + - config.step..sections. - config.error. - config.abort. And the equivalent options.* keys for options flows. @@ -232,6 +613,8 @@ def _extract_config_flow_keys() -> set[str]: "CONF_API_KEY": "api_key", "CONF_LATITUDE": "latitude", "CONF_LONGITUDE": "longitude", + "CONF_LOCATION": "location", + "CONF_NAME": "name", "CONF_LANGUAGE": "language", "CONF_SCAN_INTERVAL": "scan_interval", } @@ -240,8 +623,12 @@ def _extract_config_flow_keys() -> set[str]: if const_tree is not None: mapping.update(_extract_constant_assignments(const_tree)) mapping.update(_extract_constant_assignments(config_tree)) + mapping.update(_extract_schema_key_aliases(config_tree, mapping)) + + schema_info = _extract_schema_fields(config_tree, mapping) - schema_fields = _extract_schema_fields(config_tree, mapping) + # Helper functions can return error keys indirectly (e.g., interval_error/days_error). + helper_error_keys = _extract_helper_error_keys(config_tree) language_error_returns: set[str] = set() @@ -271,6 +658,15 @@ def _extract_error_values(value: ast.AST) -> set[str]: values.update(language_error_returns) return values + def _extract_error_key_kw(call: ast.Call) -> str | None: + for kw in call.keywords: + if kw.arg != "error_key": + continue + if isinstance(kw.value, ast.Constant) and isinstance(kw.value.value, str): + return kw.value.value + _fail_unexpected_ast("error_key kwarg is not a string literal") + return None + class _ScopedErrorsVisitor(ast.NodeVisitor): def __init__(self) -> None: self.class_stack: list[str | None] = [] @@ -290,6 +686,26 @@ def visit_AnnAssign(self, node: ast.AnnAssign) -> None: # noqa: N802 self._record_errors(node.target, node.value) self.generic_visit(node) + def visit_Call(self, node: ast.Call) -> None: # noqa: N802 + # Collect helper-propagated errors used in a class scope, e.g.: + # interval_value, interval_error = _parse_update_interval(...); errors[...] = interval_error + class_name = self.class_stack[-1] if self.class_stack else None + if class_name is None: + self.generic_visit(node) + return + + if isinstance(node.func, ast.Name): + if node.func.id == "_parse_int_option": + err = _extract_error_key_kw(node) + if err: + self.by_class.setdefault(class_name, set()).add(err) + elif node.func.id in helper_error_keys: + self.by_class.setdefault(class_name, set()).update( + helper_error_keys[node.func.id] + ) + + self.generic_visit(node) + def _record_errors(self, target: ast.AST, value: ast.AST | None) -> None: if ( isinstance(target, ast.Subscript) @@ -310,7 +726,9 @@ def _record_errors(self, target: ast.AST, value: ast.AST | None) -> None: class FlowVisitor(ast.NodeVisitor): def __init__(self) -> None: self.class_stack: list[str] = [] - self.local_schema_vars: dict[str, set[str]] = dict(schema_fields) + self.local_schema_vars: dict[str, tuple[set[str], set[str]]] = dict( + schema_info + ) def visit_ClassDef(self, node: ast.ClassDef) -> None: # noqa: N802 self.class_stack.append(node.name) @@ -350,6 +768,7 @@ def _handle_show_form(self, node: ast.Call, prefix: str) -> None: step_id: str | None = None schema_name: str | None = None inline_schema_fields: set[str] = set() + inline_sections: set[str] = set() for kw in node.keywords: if kw.arg == "step_id" and isinstance(kw.value, ast.Constant): @@ -362,9 +781,11 @@ def _handle_show_form(self, node: ast.Call, prefix: str) -> None: isinstance(kw.value.func, ast.Attribute) and kw.value.func.attr == "Schema" ): - inline_schema_fields.update( - _fields_from_schema_call(kw.value, mapping) + fields, sections = _fields_from_schema_call( + kw.value, mapping ) + inline_schema_fields.update(fields) + inline_sections.update(sections) if kw.arg == "errors": if isinstance(kw.value, ast.Dict): for err_value in kw.value.values: @@ -398,11 +819,16 @@ def _handle_show_form(self, node: ast.Call, prefix: str) -> None: keys.add(f"{prefix}.step.{step_id}.description") if schema_name and schema_name in self.local_schema_vars: - for field in self.local_schema_vars[schema_name]: + fields, sections = self.local_schema_vars[schema_name] + for field in fields: keys.add(f"{prefix}.step.{step_id}.data.{field}") + for section_id in sections: + keys.add(f"{prefix}.step.{step_id}.sections.{section_id}") for field in inline_schema_fields: keys.add(f"{prefix}.step.{step_id}.data.{field}") + for section_id in inline_sections: + keys.add(f"{prefix}.step.{step_id}.sections.{section_id}") def _handle_abort(self, node: ast.Call, prefix: str) -> None: for kw in node.keywords: diff --git a/tests/test_util.py b/tests/test_util.py index 17184d1e..e1415886 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -1,6 +1,8 @@ """Tests for shared utilities.""" -from custom_components.pollenlevels.util import redact_api_key +import pytest + +from custom_components.pollenlevels.util import redact_api_key, safe_parse_int def test_redact_api_key_handles_non_utf8_bytes(): @@ -21,3 +23,25 @@ def test_redact_api_key_returns_empty_string_for_none(): """None inputs should yield an empty string.""" assert redact_api_key(None, "anything") == "" + + +@pytest.mark.parametrize( + ("value", "expected"), + [ + (3, 3), + (3.0, 3), + ("3", 3), + ("3.0", 3), + (True, None), + (False, None), + (None, None), + ("3.5", None), + (3.5, None), + ("nan", None), + ("inf", None), + ], +) +def test_safe_parse_int(value, expected): + """safe_parse_int accepts integer-like values and rejects invalid input.""" + + assert safe_parse_int(value) == expected