diff --git a/TODO.md b/TODO.md index f7b31347..ede33cff 100644 --- a/TODO.md +++ b/TODO.md @@ -10,6 +10,41 @@ - Fix existing type errors (~30 errors as of Jan 2026) - Test visual editor for both cards. +## High Priority Investigation + +### Fix Locks Out of Sync Issue + +**Status:** ✅ **RESOLVED** (January 2026) + +**Solution Implemented:** Moved sync logic ownership from binary sensor to coordinator. + +**Key Changes:** + +1. **Coordinator now owns sync operations** (`coordinator.py`): + - `async_request_sync()` - executes set/clear operations with automatic retries + - `mark_synced()` / `mark_out_of_sync()` - manages per-slot sync state + - `get_sync_state()` - returns sync status for binary sensor to display + - `_pending_retries` - tracks per-slot retry callbacks (infinite retries until success) + +2. **Binary sensor simplified** (`binary_sensor.py`): + - Now read-only: displays sync state from coordinator via `is_on` property + - Detects out-of-sync conditions and requests sync via coordinator + - No longer owns retry logic or performs sync operations directly + +3. **Retry behavior:** + - Infinite retries every 10 seconds until success + - New sync requests replace pending retries (latest state always wins) + - E.g., if "set" fails and user disables slot, pending retry is cancelled and + replaced with "clear" operation + +**Files Modified:** + +- `custom_components/lock_code_manager/coordinator.py` - sync operation methods +- `custom_components/lock_code_manager/binary_sensor.py` - simplified to read-only +- `custom_components/lock_code_manager/__init__.py` - split async_update_listener +- `tests/test_coordinator.py` - 14 new tests for sync operations +- `tests/test_binary_sensor.py` - updated tests for new architecture + ## Testing - Strategy UI module has unit tests in `ts/*.test.ts` and Python tests for @@ -72,59 +107,6 @@ and prevent regressions. **Priority:** Low-Medium **Status:** Not started -#### Move Sync Logic to Coordinator - -**Current Architecture Issues:** - -- In-sync binary sensor reads PIN config from text entities via - `_get_entity_state()` -- Reads lock state from coordinator data -- Compares them and triggers sync operations (set_usercode/clear_usercode) -- Cross-entity dependencies create race conditions during startup -- Current guard uses `_attr_is_on is None` to avoid initial sync operations - -**Proposed Solution:** -Move sync logic entirely into the coordinator: - -- Coordinator already has access to both desired state (from config) and actual - state (from lock) -- Coordinator performs sync operations during its `_async_update_data()` cycle -- Binary sensor becomes read-only, just displays coordinator's computed in-sync - status -- Text/number/switch entities remain as config views - -**Example Implementation:** - -```python -# In coordinator._async_update_data() -actual_code = await self.provider.get_usercodes() -desired_code = self.config_entry.data[slot]["pin"] - -if actual_code != desired_code and slot_enabled: - await self.provider.set_usercode(slot, desired_code) - -return {"in_sync": actual_code == desired_code, "actual_code": actual_code} -``` - -**Benefits:** - -- Eliminates cross-entity state reading -- Removes `_initial_state_loaded` flag and startup detection logic -- No race conditions during startup -- Simpler, more centralized sync logic -- Coordinator is single source of truth - -**Considerations:** - -- Major architectural change -- Would need to update binary sensor to be read-only -- Config updates still flow through text/switch entities -- Need to ensure coordinator runs sync on config changes - -**Estimated Effort:** High (16-24 hours) -**Priority:** Medium -**Status:** Not started - ### Convert Config and Internal Dicts to Dataclasses Convert config entry data to typed dataclasses with `from_dict`/`from_entry` diff --git a/custom_components/lock_code_manager/__init__.py b/custom_components/lock_code_manager/__init__.py index 9e468215..f7793d73 100644 --- a/custom_components/lock_code_manager/__init__.py +++ b/custom_components/lock_code_manager/__init__.py @@ -3,6 +3,7 @@ from __future__ import annotations import asyncio +from dataclasses import dataclass import logging from pathlib import Path from typing import Any @@ -403,34 +404,51 @@ async def async_unload_entry( return unload_ok -async def async_update_listener( - hass: HomeAssistant, config_entry: LockCodeManagerConfigEntry -) -> None: - """Update listener.""" - # No need to update if there are no options because that only happens at the end - # of this function - if not config_entry.options: - return +# --------------------------------------------------------------------------- +# Config update listener helpers +# --------------------------------------------------------------------------- - hass_data = hass.data[DOMAIN] - runtime_data = config_entry.runtime_data - ent_reg = er.async_get(hass) - entities_to_remove: set[str] = set() - entities_to_add: set[str] = set() - entry_id = config_entry.entry_id - entry_title = config_entry.title - _LOGGER.info("%s (%s): Creating and/or updating entities", entry_id, entry_title) +@dataclass +class _ConfigDiff: + """Computed differences between old and new configuration.""" + + curr_slots: dict[int, Any] + new_slots: dict[int, Any] + curr_locks: list[str] + new_locks: list[str] + slots_to_add: dict[int, Any] + slots_to_remove: dict[int, Any] + locks_to_add: list[str] + locks_to_remove: list[str] - setup_tasks = runtime_data.setup_tasks +def _compute_config_diff(config_entry: LockCodeManagerConfigEntry) -> _ConfigDiff: + """Compute differences between current and new configuration.""" curr_slots: dict[int, Any] = {**config_entry.data.get(CONF_SLOTS, {})} new_slots: dict[int, Any] = {**config_entry.options.get(CONF_SLOTS, {})} curr_locks: list[str] = [*config_entry.data.get(CONF_LOCKS, [])] new_locks: list[str] = [*config_entry.options.get(CONF_LOCKS, [])] - # Set up any platforms that the new slot configs need that haven't already been - # setup + return _ConfigDiff( + curr_slots=curr_slots, + new_slots=new_slots, + curr_locks=curr_locks, + new_locks=new_locks, + slots_to_add={k: v for k, v in new_slots.items() if k not in curr_slots}, + slots_to_remove={k: v for k, v in curr_slots.items() if k not in new_slots}, + locks_to_add=[lock for lock in new_locks if lock not in curr_locks], + locks_to_remove=[lock for lock in curr_locks if lock not in new_locks], + ) + + +async def _setup_new_platforms( + hass: HomeAssistant, + config_entry: LockCodeManagerConfigEntry, + new_slots: dict[int, Any], +) -> None: + """Set up any platforms that the new slot configs need.""" + setup_tasks = config_entry.runtime_data.setup_tasks for platform in { platform for slot_config in new_slots.values() @@ -446,19 +464,17 @@ async def async_update_listener( ) await asyncio.gather(*setup_tasks.values()) - # Identify changes that need to be made - slots_to_add: dict[int, Any] = { - k: v for k, v in new_slots.items() if k not in curr_slots - } - slots_to_remove: dict[int, Any] = { - k: v for k, v in curr_slots.items() if k not in new_slots - } - locks_to_add: list[str] = [lock for lock in new_locks if lock not in curr_locks] - locks_to_remove: list[str] = [lock for lock in curr_locks if lock not in new_locks] - callbacks = runtime_data.callbacks +async def _handle_locks_removed( + hass: HomeAssistant, + config_entry: LockCodeManagerConfigEntry, + locks_to_remove: list[str], +) -> None: + """Handle removal of locks from config.""" + entry_id = config_entry.entry_id + entry_title = config_entry.title + callbacks = config_entry.runtime_data.callbacks - # Remove old lock entities (slot sensors) for lock_entity_id in locks_to_remove: _LOGGER.debug( "%s (%s): Removing lock %s entities", entry_id, entry_title, lock_entity_id @@ -474,92 +490,116 @@ async def async_update_listener( hass, config_entry, lock_entity_id=lock_entity_id, remove_permanently=True ) - # Notify any existing entities that additional locks have been added then create - # slot PIN sensors for the new locks - if locks_to_add: - _LOGGER.debug( - "%s (%s): Adding following locks: %s", - entry_id, - entry_title, - locks_to_add, - ) - added_locks: list[BaseLock] = [] - for lock_entity_id in locks_to_add: - if lock_entity_id in hass_data[CONF_LOCKS]: - _LOGGER.debug( - "%s (%s): Reusing lock instance for lock %s", - entry_id, - entry_title, - hass_data[CONF_LOCKS][lock_entity_id], - ) - lock = runtime_data.locks[lock_entity_id] = hass_data[CONF_LOCKS][ - lock_entity_id - ] - else: - lock = hass_data[CONF_LOCKS][lock_entity_id] = runtime_data.locks[ - lock_entity_id - ] = async_create_lock_instance( - hass, - dr.async_get(hass), - ent_reg, - config_entry, - lock_entity_id, - ) - _LOGGER.debug( - "%s (%s): Creating lock instance for lock %s", - entry_id, - entry_title, - lock, - ) - await lock.async_setup(config_entry) - - added_locks.append(lock) - - # Check if lock is connected (but don't wait - entity creation doesn't require it) - if not await lock.async_internal_is_connection_up(): - _LOGGER.debug( - "%s (%s): Lock %s is not connected yet. Entities will be created " - "but will be unavailable until the lock comes online. This is normal " - "during startup if Z-Wave JS is still initializing.", - entry_id, - entry_title, - lock.lock.entity_id, - ) - - for slot_num in new_slots: - _LOGGER.debug( - "%s (%s): Adding lock %s slot %s sensor and event entity", - entry_id, - entry_title, - lock_entity_id, - slot_num, - ) - callbacks.invoke_lock_slot_adders(lock, slot_num, ent_reg) - - # Notify existing entities about the new locks - if added_locks: - callbacks.invoke_lock_added_handlers(added_locks) - - # Remove slot sensors that are no longer in the config - for slot_num in slots_to_remove.keys(): + +async def _handle_locks_added( + hass: HomeAssistant, + config_entry: LockCodeManagerConfigEntry, + locks_to_add: list[str], + new_slots: dict[int, Any], + ent_reg: er.EntityRegistry, +) -> None: + """Handle addition of new locks to config.""" + if not locks_to_add: + return + + entry_id = config_entry.entry_id + entry_title = config_entry.title + hass_data = hass.data[DOMAIN] + runtime_data = config_entry.runtime_data + callbacks = runtime_data.callbacks + + _LOGGER.debug( + "%s (%s): Adding following locks: %s", entry_id, entry_title, locks_to_add + ) + added_locks: list[BaseLock] = [] + + for lock_entity_id in locks_to_add: + if lock_entity_id in hass_data[CONF_LOCKS]: + _LOGGER.debug( + "%s (%s): Reusing lock instance for lock %s", + entry_id, + entry_title, + hass_data[CONF_LOCKS][lock_entity_id], + ) + lock = runtime_data.locks[lock_entity_id] = hass_data[CONF_LOCKS][ + lock_entity_id + ] + else: + lock = hass_data[CONF_LOCKS][lock_entity_id] = runtime_data.locks[ + lock_entity_id + ] = async_create_lock_instance( + hass, + dr.async_get(hass), + ent_reg, + config_entry, + lock_entity_id, + ) + _LOGGER.debug( + "%s (%s): Creating lock instance for lock %s", + entry_id, + entry_title, + lock, + ) + await lock.async_setup(config_entry) + + added_locks.append(lock) + + # Check if lock is connected (but don't wait - entity creation doesn't require it) + if not await lock.async_internal_is_connection_up(): + _LOGGER.debug( + "%s (%s): Lock %s is not connected yet. Entities will be created " + "but will be unavailable until the lock comes online. This is normal " + "during startup if Z-Wave JS is still initializing.", + entry_id, + entry_title, + lock.lock.entity_id, + ) + + for slot_num in new_slots: + _LOGGER.debug( + "%s (%s): Adding lock %s slot %s sensor and event entity", + entry_id, + entry_title, + lock_entity_id, + slot_num, + ) + callbacks.invoke_lock_slot_adders(lock, slot_num, ent_reg) + + # Notify existing entities about the new locks + if added_locks: + callbacks.invoke_lock_added_handlers(added_locks) + + +async def _handle_slots_removed( + config_entry: LockCodeManagerConfigEntry, + slots_to_remove: dict[int, Any], +) -> None: + """Handle removal of slots from config.""" + entry_id = config_entry.entry_id + entry_title = config_entry.title + callbacks = config_entry.runtime_data.callbacks + + for slot_num in slots_to_remove: _LOGGER.debug( "%s (%s): Removing slot %s sensors", entry_id, entry_title, slot_num ) await callbacks.invoke_entity_removers_for_slot(slot_num) - # For each new slot, add standard entities and configuration entities. We also - # add slot sensors for existing locks only since new locks were already set up - # above. + +def _handle_slots_added( + config_entry: LockCodeManagerConfigEntry, + slots_to_add: dict[int, Any], + locks_to_add: list[str], + ent_reg: er.EntityRegistry, +) -> None: + """Handle addition of new slots to config.""" + entry_id = config_entry.entry_id + entry_title = config_entry.title + runtime_data = config_entry.runtime_data + callbacks = runtime_data.callbacks + for slot_num, slot_config in slots_to_add.items(): - entities_to_remove.clear() - # First we store the set of entities we are adding so we can track when they are - # done - entities_to_add = { - CONF_ENABLED, - CONF_NAME, - CONF_PIN, - EVENT_PIN_USED, - } + entities_to_add = {CONF_ENABLED, CONF_NAME, CONF_PIN, EVENT_PIN_USED} # Check if we need to add a number of uses entity if slot_config.get(CONF_NUMBER_OF_USES) not in (None, ""): @@ -572,6 +612,7 @@ async def async_update_listener( slot_num, ) callbacks.invoke_standard_adders(slot_num, ent_reg) + for key in entities_to_add: _LOGGER.debug( "%s (%s): Adding %s entity for slot %s", @@ -583,6 +624,7 @@ async def async_update_listener( if key in callbacks.add_keyed_entity: callbacks.invoke_keyed_adders(key, slot_num, ent_reg) + # Add slot sensors for existing locks only (new locks already set up above) for lock_entity_id, lock in runtime_data.locks.items(): if lock_entity_id in locks_to_add: continue @@ -595,50 +637,79 @@ async def async_update_listener( ) callbacks.invoke_lock_slot_adders(lock, slot_num, ent_reg) - # For all slots that are in both the old and new config, check if any of the - # configuration options have changed + +async def _handle_slots_modified( + config_entry: LockCodeManagerConfigEntry, + curr_slots: dict[int, Any], + new_slots: dict[int, Any], + ent_reg: er.EntityRegistry, +) -> None: + """Handle modification of existing slots (number_of_uses changes).""" + entry_id = config_entry.entry_id + entry_title = config_entry.title + callbacks = config_entry.runtime_data.callbacks + for slot_num in set(curr_slots).intersection(new_slots): - entities_to_remove.clear() - entities_to_add.clear() - # Check if number of uses has changed old_val = curr_slots[slot_num].get(CONF_NUMBER_OF_USES) new_val = new_slots[slot_num].get(CONF_NUMBER_OF_USES) - # If number of uses value hasn't changed, skip if old_val == new_val: continue - # If number of uses value has been removed, fire a signal to remove - # corresponding entity + # Number of uses removed if old_val not in (None, "") and new_val in (None, ""): - entities_to_remove.add(CONF_NUMBER_OF_USES) - # If number of uses value has been added, fire a signal to add - # corresponding entity - elif old_val in (None, "") and new_val not in (None, ""): - entities_to_add.add(CONF_NUMBER_OF_USES) - - for key in entities_to_remove: _LOGGER.debug( "%s (%s): Removing %s entity for slot %s due to changed configuration", entry_id, entry_title, - key, + CONF_NUMBER_OF_USES, slot_num, ) - await callbacks.invoke_entity_removers_for_key(slot_num, key) - - for key in entities_to_add: + await callbacks.invoke_entity_removers_for_key( + slot_num, CONF_NUMBER_OF_USES + ) + # Number of uses added + elif old_val in (None, "") and new_val not in (None, ""): _LOGGER.debug( "%s (%s): Adding %s entity for slot %s due to changed configuration", entry_id, entry_title, - key, + CONF_NUMBER_OF_USES, slot_num, ) - callbacks.invoke_keyed_adders(key, slot_num, ent_reg) + callbacks.invoke_keyed_adders(CONF_NUMBER_OF_USES, slot_num, ent_reg) + + +async def async_update_listener( + hass: HomeAssistant, config_entry: LockCodeManagerConfigEntry +) -> None: + """Handle config entry updates by computing diffs and applying changes.""" + # No need to update if there are no options (happens at the end of this function) + if not config_entry.options: + return + + entry_id = config_entry.entry_id + entry_title = config_entry.title + _LOGGER.info("%s (%s): Creating and/or updating entities", entry_id, entry_title) + + # Compute what changed + diff = _compute_config_diff(config_entry) + ent_reg = er.async_get(hass) + + # Set up any new platforms needed + await _setup_new_platforms(hass, config_entry, diff.new_slots) + + # Process changes in order: removals first, then additions + await _handle_locks_removed(hass, config_entry, diff.locks_to_remove) + await _handle_locks_added( + hass, config_entry, diff.locks_to_add, diff.new_slots, ent_reg + ) + await _handle_slots_removed(config_entry, diff.slots_to_remove) + _handle_slots_added(config_entry, diff.slots_to_add, diff.locks_to_add, ent_reg) + await _handle_slots_modified(config_entry, diff.curr_slots, diff.new_slots, ent_reg) - # Existing entities will listen to updates and act on it - new_data = {CONF_LOCKS: new_locks, CONF_SLOTS: new_slots} + # Finalize: update config entry data and clear options + new_data = {CONF_LOCKS: diff.new_locks, CONF_SLOTS: diff.new_slots} _LOGGER.info( "%s (%s): Done creating and/or updating entities", entry_id, entry_title ) diff --git a/custom_components/lock_code_manager/binary_sensor.py b/custom_components/lock_code_manager/binary_sensor.py index 30fb715c..b04353c9 100644 --- a/custom_components/lock_code_manager/binary_sensor.py +++ b/custom_components/lock_code_manager/binary_sensor.py @@ -3,7 +3,6 @@ from __future__ import annotations import asyncio -from collections.abc import Callable from dataclasses import dataclass from datetime import datetime, timedelta import logging @@ -15,7 +14,6 @@ from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN from homeassistant.components.text import DOMAIN as TEXT_DOMAIN from homeassistant.const import ( - CONF_ENTITY_ID, CONF_NAME, CONF_PIN, STATE_OFF, @@ -23,6 +21,7 @@ STATE_UNAVAILABLE, STATE_UNKNOWN, EntityCategory, + Platform, ) from homeassistant.core import ( Event, @@ -34,16 +33,16 @@ from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.event import ( TrackStates, - async_call_later, async_track_state_change_event, async_track_state_change_filtered, ) -from homeassistant.helpers.update_coordinator import CoordinatorEntity, UpdateFailed +from homeassistant.helpers.update_coordinator import CoordinatorEntity from .const import ( ATTR_ACTIVE, ATTR_CODE, ATTR_IN_SYNC, + CONF_CALENDAR, CONF_NUMBER_OF_USES, DOMAIN, EVENT_PIN_USED, @@ -51,12 +50,10 @@ from .coordinator import LockUsercodeUpdateCoordinator from .data import LockCodeManagerConfigEntry, get_slot_data from .entity import BaseLockCodeManagerCodeSlotPerLockEntity, BaseLockCodeManagerEntity -from .exceptions import LockDisconnected from .providers import BaseLock _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = timedelta(seconds=30) -RETRY_DELAY = timedelta(seconds=10) async def async_setup_entry( @@ -116,20 +113,6 @@ class LockCodeManagerActiveEntity(BaseLockCodeManagerEntity, BinarySensorEntity) """Active binary sensor entity for lock code manager.""" _attr_entity_category = EntityCategory.DIAGNOSTIC - _condition_entity_unsub: Callable[[], None] | None = None - _subscribed_condition_entity_id: str | None = None - - @property - def _condition_entity_id(self) -> str | None: - """Return condition entity ID for this slot.""" - return get_slot_data(self.config_entry, self.slot_num).get(CONF_ENTITY_ID) - - @callback - def _cleanup_condition_subscription(self) -> None: - """Clean up condition entity subscription if one exists.""" - if self._condition_entity_unsub: - self._condition_entity_unsub() - self._condition_entity_unsub = None @callback def _update_state(self, _: datetime | None = None) -> None: @@ -146,8 +129,7 @@ def _update_state(self, _: datetime | None = None) -> None: if key in (EVENT_PIN_USED, CONF_NAME, CONF_PIN, ATTR_IN_SYNC): continue - # Handle condition entity - ON means access granted - if key == CONF_ENTITY_ID and (hass_state := self.hass.states.get(state)): + if key == CONF_CALENDAR and (hass_state := self.hass.states.get(state)): states[key] = ( hass_state.state == STATE_ON if hass_state.state in (STATE_ON, STATE_OFF) @@ -178,58 +160,28 @@ async def _config_entry_update_listener( """Update listener.""" if config_entry.options: return - # Re-subscribe if condition entity changed - self._update_condition_entity_subscription() self._update_state() @callback - def _handle_condition_entity_state_change( + def _handle_calendar_state_changes( self, event: Event[EventStateChangedData] ) -> None: - """Handle condition entity state change.""" - self._update_state() - - @callback - def _update_condition_entity_subscription(self) -> None: - """Update subscription for condition entity if it changed.""" - current_entity_id = self._condition_entity_id - old_entity_id = self._subscribed_condition_entity_id - - # No change needed if entity ID hasn't changed - if current_entity_id == old_entity_id: - return - - # Unsubscribe from old entity if we had one - self._cleanup_condition_subscription() - - # Subscribe to new entity if we have one - if current_entity_id: - self._condition_entity_unsub = async_track_state_change_event( - self.hass, - [current_entity_id], - self._handle_condition_entity_state_change, - ) - - self._subscribed_condition_entity_id = current_entity_id - _LOGGER.debug( - "%s (%s): Updated condition entity subscription for %s: %s -> %s", - self.config_entry.entry_id, - self.config_entry.title, - self.entity_id, - old_entity_id, - current_entity_id, - ) + """Handle calendar state changes.""" + if event.data["entity_id"] == self._calendar_entity_id: + self._update_state() async def async_added_to_hass(self) -> None: """Handle entity added to hass.""" await BinarySensorEntity.async_added_to_hass(self) await BaseLockCodeManagerEntity.async_added_to_hass(self) - # Register cleanup for condition entity subscription (called on entity removal) - self.async_on_remove(self._cleanup_condition_subscription) - - # Track state changes for configured condition entity - self._update_condition_entity_subscription() + self.async_on_remove( + async_track_state_change_filtered( + self.hass, + TrackStates(False, set(), {Platform.CALENDAR}), + self._handle_calendar_state_changes, + ).async_remove + ) self.async_on_remove( self.config_entry.add_update_listener(self._config_entry_update_listener) @@ -270,18 +222,18 @@ def __init__( f"{self._get_uid(ATTR_CODE)}|{lock_entity_id}" ) self._lock = asyncio.Lock() - self._attr_is_on: bool | None = None # None means not yet initialized self._tracked_entity_ids: set[str] = set() - self._retry_unsub: Callable[[], None] | None = None - self._retry_active = False - self._state_tracking_unsub: Callable[[], None] | None = None - self._tracking_all_states: bool = False @property def should_poll(self) -> bool: """Return whether entity should poll.""" return True + @property + def is_on(self) -> bool | None: + """Return sync state from coordinator.""" + return self.coordinator.get_sync_state(int(self.slot_num)) + @property def available(self) -> bool: """Return whether binary sensor is available or not.""" @@ -290,16 +242,22 @@ def available(self) -> bool: ) async def async_update(self) -> None: - """Update entity.""" - if self._attr_is_on is None: + """Update entity. + + Called periodically to check sync state and trigger sync if needed. + The coordinator handles the actual sync operations and retries. + """ + # Skip if sync state not yet initialized + if self.is_on is None: return + # Skip if already in sync, lock busy, or lock unavailable if ( self._lock.locked() or self.is_on or not (state := self.hass.states.get(self.lock.lock.entity_id)) or state.state in (STATE_UNAVAILABLE, STATE_UNKNOWN) - or (not self.coordinator.last_update_success and not self._retry_active) + or not self.coordinator.last_update_success ): return @@ -316,69 +274,6 @@ def _get_entity_state(self, key: str) -> str | None: return None return state.state - def _update_sync_state(self, is_on: bool) -> None: - """Update sync state and write to Home Assistant.""" - self._attr_is_on = is_on - self.async_write_ha_state() - - @callback - def _handle_coordinator_update(self) -> None: - """Handle updated data from the coordinator. - - Triggers sync check when coordinator data changes to ensure we sync - when the lock reports different codes than expected (e.g., someone - manually changed a code on the lock). - - Multiple tasks may be queued during rapid updates, but they serialize - via asyncio.Lock and each reads the latest coordinator data when it runs. - After the first successful sync, subsequent tasks exit early (is_on=True). - """ - super()._handle_coordinator_update() - # Skip if not yet initialized or sync already in progress - if self._attr_is_on is not None and not self._lock.locked(): - self.hass.async_create_task( - self._async_update_state(), - name=f"lcm_sync_check_{self.entity_id}", - ) - - def _cancel_retry(self) -> None: - """Cancel any scheduled retry callback.""" - if self._retry_unsub: - self._retry_unsub() - self._retry_unsub = None - self._retry_active = False - - def _schedule_retry(self) -> None: - """Schedule a retry if one isn't already pending.""" - if self._retry_unsub: - return - - _LOGGER.debug( - "%s (%s): Scheduling retry for %s slot %s in %ss", - self.config_entry.entry_id, - self.config_entry.title, - self.lock.lock.entity_id, - self.slot_num, - RETRY_DELAY.total_seconds(), - ) - - self._retry_unsub = async_call_later( - self.hass, - RETRY_DELAY.total_seconds(), - self._handle_retry_callback, - ) - - async def _handle_retry_callback(self, _now: datetime) -> None: - """Handle retry callback.""" - if self._retry_unsub: - self._retry_unsub() - self._retry_unsub = None - self._retry_active = True - try: - await self.async_update() - finally: - self._retry_active = False - def _build_entity_id_map(self) -> bool: """Build and cache entity IDs for this slot.""" missing = False @@ -450,19 +345,13 @@ def _resolve_slot_state( entity_id = event.data["entity_id"] if event else None to_state = event.data["new_state"] if event else None - if not self.coordinator.last_update_success and not self._retry_active: + if not self.coordinator.last_update_success: return None if not self._build_entity_id_map(): return None - # NOTE: We intentionally don't call _update_state_tracking_filter() here. - # While it would be nice to switch from tracking all states to only specific - # entities, modifying subscriptions from within a callback causes timing issues. - # The performance impact of tracking all states is minimal since this method - # has early-return guards that skip processing for irrelevant entities. - - if self._attr_is_on is None and int(self.slot_num) not in self.coordinator.data: + if self.is_on is None and int(self.slot_num) not in self.coordinator.data: _LOGGER.debug( "%s (%s): Slot %s not yet in coordinator data, skipping", self.config_entry.entry_id, @@ -497,65 +386,26 @@ def _resolve_slot_state( coordinator_code=coordinator_code, ) - async def _perform_sync_operation(self, slot_state: _SlotState) -> bool: - """Perform sync operation (set or clear usercode). - - Returns True if sync was performed, False if lock disconnected. - """ - try: - if slot_state.active_state == STATE_ON: - await self.lock.async_internal_set_usercode( - int(self.slot_num), slot_state.pin_state, slot_state.name_state - ) - _LOGGER.debug( - "%s (%s): Set usercode for %s slot %s", - self.config_entry.entry_id, - self.config_entry.title, - self.lock.lock.entity_id, - self.slot_num, - ) - else: # active_state == STATE_OFF - await self.lock.async_internal_clear_usercode(int(self.slot_num)) - _LOGGER.debug( - "%s (%s): Cleared usercode for %s slot %s", - self.config_entry.entry_id, - self.config_entry.title, - self.lock.lock.entity_id, - self.slot_num, - ) - self._cancel_retry() - return True - except LockDisconnected as err: - _LOGGER.debug( - "%s (%s): Unable to %s usercode for %s slot %s: %s", - self.config_entry.entry_id, - self.config_entry.title, - "set" if slot_state.active_state == STATE_ON else "clear", - self.lock.lock.entity_id, - self.slot_num, - err, - ) - self._schedule_retry() - return False - async def _async_update_state( self, event: Event[EventStateChangedData] | None = None ) -> None: """Update binary sensor state by checking dependent entity states. - On initial load (when _attr_is_on is None): Sets sync state without operations. - On subsequent updates: Performs sync operations when out of sync. + On initial load (when is_on is None): Sets sync state without operations. + On subsequent updates: Requests sync operations from coordinator when out of sync. + The coordinator handles the actual operations and retry scheduling. """ async with self._lock: slot_state = self._resolve_slot_state(event) if slot_state is None: return + slot_num = int(self.slot_num) # Calculate expected sync state expected_in_sync = self._calculate_expected_sync(slot_state) # Initial load: Set sync state without performing operations (prevents startup flapping) - if self._attr_is_on is None: + if self.is_on is None: # Guard: Verify active state is valid if slot_state.active_state not in (STATE_ON, STATE_OFF): _LOGGER.debug( @@ -568,7 +418,11 @@ async def _async_update_state( ) return - self._update_sync_state(expected_in_sync) + # Mark initial sync state in coordinator (both synced and out-of-sync) + if expected_in_sync: + self.coordinator.mark_synced(slot_num) + else: + self.coordinator.mark_out_of_sync(slot_num) _LOGGER.debug( "%s (%s): Initial state loaded for %s slot %s, in_sync=%s", self.config_entry.entry_id, @@ -579,34 +433,19 @@ async def _async_update_state( ) return - # Normal operation: Perform sync if needed + # Normal operation: Request sync via coordinator if needed if not expected_in_sync: - self._update_sync_state(False) - - # Perform sync operation - sync_performed = await self._perform_sync_operation(slot_state) - - # Refresh coordinator to verify operation completed - # Rate limiting at provider level prevents excessive calls - if sync_performed: - try: - await self.coordinator.async_refresh() - except UpdateFailed as err: - _LOGGER.debug( - "%s (%s): Coordinator refresh failed after sync for %s " - "slot %s, scheduling retry: %s", - self.config_entry.entry_id, - self.config_entry.title, - self.lock.lock.entity_id, - self.slot_num, - err, - ) - self._schedule_retry() - - elif not self._attr_is_on: - # Was out of sync, now in sync - self._update_sync_state(True) - self._cancel_retry() + operation = "set" if slot_state.active_state == STATE_ON else "clear" + await self.coordinator.async_request_sync( + slot_num=slot_num, + operation=operation, + usercode=slot_state.pin_state if operation == "set" else None, + name=slot_state.name_state if operation == "set" else None, + ) + + elif not self.is_on: + # Was out of sync, now in sync - mark as synced + self.coordinator.mark_synced(slot_num) async def async_added_to_hass(self) -> None: """Handle entity added to hass.""" @@ -614,55 +453,16 @@ async def async_added_to_hass(self) -> None: await BaseLockCodeManagerCodeSlotPerLockEntity.async_added_to_hass(self) await CoordinatorEntity.async_added_to_hass(self) - # Register cleanup for state tracking subscription - self.async_on_remove(self._cleanup_state_tracking) - - self._setup_state_tracking() - await self._async_update_state() - - @callback - def _cleanup_state_tracking(self) -> None: - """Clean up state tracking subscription if one exists.""" - if self._state_tracking_unsub: - self._state_tracking_unsub() - self._state_tracking_unsub = None - self._tracking_all_states = False - - @callback - def _setup_state_tracking(self) -> None: - """Set up state change tracking for dependent entities. - - If all entity IDs are available, tracks only those specific entities. - Otherwise, tracks all state changes until entities become available. - """ - # Clean up existing subscription before setting up new one - self._cleanup_state_tracking() - if self._build_entity_id_map(): - # All entities found - track only those - self._state_tracking_unsub = async_track_state_change_event( - self.hass, self._tracked_entity_ids, self._async_update_state + self.async_on_remove( + async_track_state_change_event( + self.hass, self._tracked_entity_ids, self._async_update_state + ) ) - self._tracking_all_states = False else: - # Some entities not yet registered - track all states temporarily - # _async_update_state has guards that return early if entities aren't ready - tracker = async_track_state_change_filtered( - self.hass, - TrackStates(True, set(), set()), - self._async_update_state, + self.async_on_remove( + async_track_state_change_filtered( + self.hass, TrackStates(True, set(), set()), self._async_update_state + ).async_remove ) - self._state_tracking_unsub = tracker.async_remove - self._tracking_all_states = True - _LOGGER.debug( - "%s (%s): Waiting for dependent entities for %s slot %s, " - "tracking all state changes", - self.config_entry.entry_id, - self.config_entry.title, - self.lock.lock.entity_id, - self.slot_num, - ) - - async def _async_remove(self) -> None: - """Handle removal cleanup.""" - self._cancel_retry() + await self._async_update_state() diff --git a/custom_components/lock_code_manager/coordinator.py b/custom_components/lock_code_manager/coordinator.py index 3f94a927..f3aa9e84 100644 --- a/custom_components/lock_code_manager/coordinator.py +++ b/custom_components/lock_code_manager/coordinator.py @@ -3,22 +3,25 @@ from __future__ import annotations from collections.abc import Callable -from datetime import datetime +from datetime import datetime, timedelta import logging -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, Any, Literal from homeassistant.core import HomeAssistant, callback -from homeassistant.helpers.event import async_track_time_interval +from homeassistant.helpers.event import async_call_later, async_track_time_interval from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed from .const import DOMAIN -from .exceptions import LockCodeManagerError +from .exceptions import LockCodeManagerError, LockDisconnected if TYPE_CHECKING: from .providers import BaseLock _LOGGER = logging.getLogger(__name__) +# Retry delay for failed sync operations +RETRY_DELAY = timedelta(seconds=10) + class LockUsercodeUpdateCoordinator(DataUpdateCoordinator[dict[int, int | str]]): """Class to manage usercode updates.""" @@ -28,6 +31,10 @@ def __init__(self, hass: HomeAssistant, lock: BaseLock, config_entry: Any) -> No self._lock = lock self._drift_unsub: Callable[[], None] | None = None self._connection_unsub: Callable[[], None] | None = None + # Sync state tracking: slot_num -> is_synced (None = unknown) + self._sync_state: dict[int, bool] = {} + # Pending retry callbacks: slot_num -> cancel function + self._pending_retries: dict[int, Callable[[], None]] = {} # Disable periodic polling when push updates are supported. # Polling is still used for initial load. update_interval = None if lock.supports_push else lock.usercode_scan_interval @@ -138,6 +145,153 @@ async def _async_connection_check(self, now: datetime) -> None: "Connection check failed for %s: %s", self._lock.lock.entity_id, err ) + # ========================================================================= + # Sync Operation Methods + # ========================================================================= + + def get_sync_state(self, slot_num: int) -> bool | None: + """Get sync state for a slot. + + Returns: + True if slot is synced, False if out of sync, None if unknown. + + """ + return self._sync_state.get(slot_num) + + @callback + def mark_synced(self, slot_num: int) -> None: + """Mark slot as synced. + + Called by binary sensor when it verifies the slot is in sync. + """ + if self._sync_state.get(slot_num) is not True: + self._sync_state[slot_num] = True + self._cancel_retry(slot_num) + self.async_set_updated_data(self.data) + + @callback + def mark_out_of_sync(self, slot_num: int) -> None: + """Mark slot as out of sync. + + Called by binary sensor when it detects slot is out of sync on initial load. + This does NOT trigger a sync operation - use async_request_sync for that. + """ + if self._sync_state.get(slot_num) is not False: + self._sync_state[slot_num] = False + self.async_set_updated_data(self.data) + + async def async_request_sync( + self, + slot_num: int, + operation: Literal["set", "clear"], + usercode: str | None = None, + name: str | None = None, + ) -> bool: + """Request sync operation for a slot. + + Args: + slot_num: The slot number to sync. + operation: "set" to set usercode, "clear" to clear it. + usercode: The usercode to set (required for "set" operation). + name: Optional name for the slot. + + Returns: + True if operation succeeded, False if it failed (retry scheduled). + + """ + # Cancel any pending retry for this slot - new request takes precedence + self._cancel_retry(slot_num) + + # Mark as out of sync and notify listeners + self._sync_state[slot_num] = False + self.async_set_updated_data(self.data) + + _LOGGER.debug( + "Sync requested for %s slot %s: %s", + self._lock.lock.entity_id, + slot_num, + operation, + ) + + try: + if operation == "set": + if usercode is None: + raise ValueError("usercode is required for 'set' operation") + await self._lock.async_internal_set_usercode(slot_num, usercode, name) + else: + await self._lock.async_internal_clear_usercode(slot_num) + + # Refresh to verify the operation completed + await self.async_request_refresh() + return True + + except LockDisconnected as err: + _LOGGER.debug( + "Sync failed for %s slot %s (%s): %s - scheduling retry", + self._lock.lock.entity_id, + slot_num, + operation, + err, + ) + self._schedule_retry(slot_num, operation, usercode, name) + return False + + except UpdateFailed as err: + _LOGGER.debug( + "Sync verification failed for %s slot %s: %s - scheduling retry", + self._lock.lock.entity_id, + slot_num, + err, + ) + self._schedule_retry(slot_num, operation, usercode, name) + return False + + def _schedule_retry( + self, + slot_num: int, + operation: Literal["set", "clear"], + usercode: str | None, + name: str | None, + ) -> None: + """Schedule retry for failed sync. + + Retries infinitely until: + - Operation succeeds, OR + - A new sync request comes in (replaces pending operation) + + This ensures the latest requested state always takes precedence. + E.g., if set fails and user disables the slot, we switch to clear. + """ + self._cancel_retry(slot_num) + + _LOGGER.debug( + "Scheduling retry for %s slot %s in %ss", + self._lock.lock.entity_id, + slot_num, + RETRY_DELAY.total_seconds(), + ) + + @callback + def _retry_callback(_now: datetime) -> None: + """Handle retry callback.""" + self._pending_retries.pop(slot_num, None) + self.hass.async_create_task( + self.async_request_sync(slot_num, operation, usercode, name), + f"Retry sync for {self._lock.lock.entity_id} slot {slot_num}", + ) + + self._pending_retries[slot_num] = async_call_later( + self.hass, + RETRY_DELAY.total_seconds(), + _retry_callback, + ) + + @callback + def _cancel_retry(self, slot_num: int) -> None: + """Cancel pending retry for slot.""" + if unsub := self._pending_retries.pop(slot_num, None): + unsub() + async def async_shutdown(self) -> None: """Shut down the coordinator and clean up resources.""" if self._drift_unsub: diff --git a/tests/providers/test_base.py b/tests/providers/test_base.py index fe9741e4..9ec5a1ac 100644 --- a/tests/providers/test_base.py +++ b/tests/providers/test_base.py @@ -122,14 +122,14 @@ async def test_config_entry_state_change_resubscribes( lock.subscribe_calls = 0 lock.unsubscribe_calls = 0 - lock.coordinator.async_refresh = AsyncMock() + lock.coordinator.async_request_refresh = AsyncMock() await hass.config_entries.async_reload(mock_lock_config_entry.entry_id) await hass.async_block_till_done() assert lock.unsubscribe_calls == 1 assert lock.subscribe_calls == 1 - lock.coordinator.async_refresh.assert_awaited() + lock.coordinator.async_request_refresh.assert_awaited() await hass.config_entries.async_unload(lcm_config_entry.entry_id) @@ -223,9 +223,19 @@ async def test_rate_limiting_set_usercode( # Reset the last operation time to ensure clean test lock_provider._last_operation_time = 0.0 + # Reset service call tracking to isolate this test from setup-induced calls + # (the coordinator sync logic may trigger calls during startup) + hass.data[LOCK_DATA][LOCK_1_ENTITY_ID]["service_calls"]["set_usercode"] = [] + + # Cancel any pending coordinator retries to prevent interference + for slot_num in list(lock_provider.coordinator._pending_retries.keys()): + lock_provider.coordinator._cancel_retry(slot_num) + + # Use slots 99 and 100 which don't have entities - this prevents the binary + # sensor sync logic from interfering with our rate limiting measurements # First operation should execute immediately start_time = time.monotonic() - await lock_provider.async_internal_set_usercode(1, "1111", "Test 1") + await lock_provider.async_internal_set_usercode(99, "1111", "Test 1") first_duration = time.monotonic() - start_time # First operation should be fast (< 0.2 seconds without rate limiting delay) @@ -233,7 +243,7 @@ async def test_rate_limiting_set_usercode( # Second operation should be rate limited start_time = time.monotonic() - await lock_provider.async_internal_set_usercode(2, "2222", "Test 2") + await lock_provider.async_internal_set_usercode(100, "2222", "Test 2") second_duration = time.monotonic() - start_time # Second operation should take at least the rate limit time diff --git a/tests/test_binary_sensor.py b/tests/test_binary_sensor.py index 71cfa4bb..213c949f 100644 --- a/tests/test_binary_sensor.py +++ b/tests/test_binary_sensor.py @@ -23,7 +23,6 @@ from homeassistant.const import ( ATTR_ENTITY_ID, CONF_ENABLED, - CONF_ENTITY_ID, CONF_NAME, CONF_PIN, SERVICE_TURN_OFF, @@ -38,6 +37,7 @@ from homeassistant.util import dt as dt_util from custom_components.lock_code_manager.const import ( + CONF_CALENDAR, CONF_LOCKS, CONF_SLOTS, DOMAIN, @@ -185,7 +185,7 @@ async def test_binary_sensor_entity( assert service_calls.get("set_usercode", []) == initial_set_calls new_config = copy.deepcopy(BASE_CONFIG) - new_config[CONF_SLOTS][2][CONF_ENTITY_ID] = "calendar.test_2" + new_config[CONF_SLOTS][2][CONF_CALENDAR] = "calendar.test_2" hass.config_entries.async_update_entry( lock_code_manager_config_entry, options=new_config @@ -318,11 +318,7 @@ async def test_startup_out_of_sync_slots_sync_once( hass: HomeAssistant, mock_lock_config_entry, ): - """Ensure out-of-sync slots sync once each without extra operations. - - With coordinator-triggered syncs, out-of-sync slots are detected and synced - automatically during startup via coordinator update callbacks. - """ + """Ensure out-of-sync slots sync once each without extra operations.""" # Arrange two slots that need syncing on startup config = { CONF_LOCKS: [LOCK_1_ENTITY_ID], @@ -346,9 +342,17 @@ async def test_startup_out_of_sync_slots_sync_once( assert hass.states.get(in_sync_slot_2) service_calls = hass.data[LOCK_DATA][LOCK_1_ENTITY_ID]["service_calls"] + # No set calls should have happened before we trigger updates + assert service_calls["set_usercode"] == [] + + # Trigger sync for both slots (first update is skipped after initial load) + await async_update_entity(hass, in_sync_slot_1) + await async_update_entity(hass, in_sync_slot_2) + await hass.async_block_till_done() + await async_update_entity(hass, in_sync_slot_1) + await async_update_entity(hass, in_sync_slot_2) + await hass.async_block_till_done() - # Syncs now happen automatically via coordinator update callbacks - # Both slots should have synced exactly once during/after startup set_calls = service_calls["set_usercode"] assert len(set_calls) == 2 assert (1, "9999", "test1") in set_calls @@ -425,12 +429,12 @@ async def test_in_sync_waits_for_missing_pin_state( lock_code_manager_config_entry, ): """Test that in-sync entity waits for dependent entities to report state.""" - entity_component = hass.data["entity_components"]["binary_sensor"] - in_sync_entity_obj = entity_component.get_entity(SLOT_1_IN_SYNC_ENTITY) - assert in_sync_entity_obj is not None + lock_provider = lock_code_manager_config_entry.runtime_data.locks[LOCK_1_ENTITY_ID] + coordinator = lock_provider.coordinator + assert coordinator is not None - # Simulate pre-initialization state - in_sync_entity_obj._attr_is_on = None + # Simulate pre-initialization state by removing sync state from coordinator + coordinator._sync_state.pop(1, None) # Remove the PIN entity state so _ensure_entities_ready() fails hass.states.async_remove(SLOT_1_PIN_ENTITY) @@ -439,8 +443,8 @@ async def test_in_sync_waits_for_missing_pin_state( await async_update_entity(hass, SLOT_1_IN_SYNC_ENTITY) await hass.async_block_till_done() - # Entity should still be waiting on initial state - assert in_sync_entity_obj._attr_is_on is None, ( + # Entity should still be waiting on initial state (sync state is None) + assert coordinator.get_sync_state(1) is None, ( "In-sync sensor should not initialize when PIN state is missing" ) @@ -451,7 +455,7 @@ async def test_in_sync_waits_for_missing_pin_state( await async_update_entity(hass, SLOT_1_IN_SYNC_ENTITY) await hass.async_block_till_done() - assert in_sync_entity_obj._attr_is_on is True, ( + assert coordinator.get_sync_state(1) is True, ( "In-sync sensor should initialize once dependent states are available" ) @@ -619,17 +623,14 @@ async def test_coordinator_refresh_failure_schedules_retry( coordinator = lock_provider.coordinator assert coordinator is not None - entity_component = hass.data["entity_components"]["binary_sensor"] - in_sync_entity_obj = entity_component.get_entity(SLOT_1_IN_SYNC_ENTITY) - - # Verify no retry is scheduled initially - assert in_sync_entity_obj._retry_unsub is None + # Verify no retry is scheduled initially (now tracked in coordinator) + assert 1 not in coordinator._pending_retries - # Patch coordinator refresh to fail BEFORE changing PIN - # This way the failure happens during the sync triggered by the PIN change + # Patch coordinator async_request_refresh to raise UpdateFailed + # This simulates a refresh failure after sync operation with patch.object( coordinator, - "async_refresh", + "async_request_refresh", new=AsyncMock(side_effect=UpdateFailed("Connection failed")), ): # Change PIN to trigger sync - coordinator refresh will fail @@ -642,180 +643,10 @@ async def test_coordinator_refresh_failure_schedules_retry( ) await hass.async_block_till_done() - # Retry should be scheduled due to coordinator refresh failure - assert in_sync_entity_obj._retry_unsub is not None, ( - "Retry should be scheduled when coordinator refresh fails after sync" + # Retry should be scheduled in coordinator due to refresh failure + assert 1 in coordinator._pending_retries, ( + "Retry should be scheduled in coordinator when refresh fails after sync" ) # Clean up - cancel the retry - in_sync_entity_obj._cancel_retry() - - -async def test_coordinator_update_triggers_sync_on_external_change( - hass: HomeAssistant, - mock_lock_config_entry, -): - """Test that coordinator updates trigger sync when lock code changes externally. - - This test replicates the issue where someone manually changes a code on the - lock (or the lock reports a different code), and the integration should - automatically sync to restore the configured code. - - Before the fix: coordinator updates only called async_write_ha_state(), - which updated the display but didn't trigger sync operations. - - After the fix: _handle_coordinator_update() triggers _async_update_state() - which performs sync operations when out of sync. - """ - # Use config without calendar so both slots are active - config = { - CONF_LOCKS: [LOCK_1_ENTITY_ID], - CONF_SLOTS: { - 1: {CONF_NAME: "test1", CONF_PIN: "1234", CONF_ENABLED: True}, - }, - } - - config_entry = MockConfigEntry( - domain=DOMAIN, - data=config, - unique_id="Test Coordinator Sync", - title="Test LCM", - ) - config_entry.add_to_hass(hass) - await hass.config_entries.async_setup(config_entry.entry_id) - await hass.async_block_till_done() - - # Verify initial state - should be in sync - in_sync_entity = "binary_sensor.test_1_code_slot_1_in_sync" - state = hass.states.get(in_sync_entity) - assert state.state == STATE_ON, "Slot should be in sync initially" - - # Get the lock provider and coordinator - lock_provider = config_entry.runtime_data.locks[LOCK_1_ENTITY_ID] - coordinator = lock_provider.coordinator - service_calls = hass.data[LOCK_DATA][LOCK_1_ENTITY_ID]["service_calls"] - - # Clear any service calls from initial setup - service_calls["set_usercode"].clear() - - # Simulate external change: someone changed the code on the lock to "9999" - # This simulates what happens when the lock reports a different code - hass.data[LOCK_DATA][LOCK_1_ENTITY_ID]["codes"][1] = "9999" - - # Trigger coordinator refresh - this should detect the mismatch and sync - await coordinator.async_refresh() - await hass.async_block_till_done() - - # The fix: coordinator update should have triggered sync to restore "1234" - assert len(service_calls["set_usercode"]) == 1, ( - "Coordinator update should trigger sync when lock code differs from config" - ) - assert service_calls["set_usercode"][0] == (1, "1234", "test1"), ( - "Sync should restore the configured PIN" - ) - - # Verify slot is back in sync - state = hass.states.get(in_sync_entity) - assert state.state == STATE_ON, ( - "Slot should be in sync after coordinator-triggered sync" - ) - - await hass.config_entries.async_unload(config_entry.entry_id) - - -async def test_condition_entity_subscription_updates_on_config_change( - hass: HomeAssistant, - mock_lock_config_entry, -): - """Test that condition entity subscription updates when config entry changes.""" - # Create two input_booleans to use as condition entities - hass.states.async_set("input_boolean.access_1", STATE_ON) - hass.states.async_set("input_boolean.access_2", STATE_OFF) - await hass.async_block_till_done() - - # Set up a slot with the first input_boolean as condition - config = { - CONF_LOCKS: [LOCK_1_ENTITY_ID], - CONF_SLOTS: { - 1: { - CONF_NAME: "test1", - CONF_PIN: "1234", - CONF_ENABLED: True, - CONF_ENTITY_ID: "input_boolean.access_1", - }, - }, - } - - config_entry = MockConfigEntry( - domain=DOMAIN, - data=config, - unique_id="Test Condition Subscription", - title="Test LCM", - ) - config_entry.add_to_hass(hass) - await hass.config_entries.async_setup(config_entry.entry_id) - await hass.async_block_till_done() - - active_entity = "binary_sensor.test_lcm_code_slot_1_active" - - # Initial state: access_1 is ON, so slot should be active - state = hass.states.get(active_entity) - assert state.state == STATE_ON, "Slot should be active when condition entity is ON" - - # Turn off access_1 - slot should become inactive - hass.states.async_set("input_boolean.access_1", STATE_OFF) - await hass.async_block_till_done() - - state = hass.states.get(active_entity) - assert state.state == STATE_OFF, ( - "Slot should be inactive when condition entity is OFF" - ) - - # Turn it back on - hass.states.async_set("input_boolean.access_1", STATE_ON) - await hass.async_block_till_done() - - state = hass.states.get(active_entity) - assert state.state == STATE_ON, "Slot should be active when condition entity is ON" - - # Now update the config entry to use a different condition entity - new_config = copy.deepcopy(config) - new_config[CONF_SLOTS][1][CONF_ENTITY_ID] = "input_boolean.access_2" - - hass.config_entries.async_update_entry(config_entry, data=new_config) - await hass.async_block_till_done() - - # Now the slot should be inactive because access_2 is OFF - state = hass.states.get(active_entity) - assert state.state == STATE_OFF, ( - "Slot should be inactive after switching to condition entity that is OFF" - ) - - # The slot should now react to access_2, NOT access_1 - # Turn on access_2 - slot should become active - hass.states.async_set("input_boolean.access_2", STATE_ON) - await hass.async_block_till_done() - - state = hass.states.get(active_entity) - assert state.state == STATE_ON, "Slot should react to new condition entity" - - # Turn off access_2 - hass.states.async_set("input_boolean.access_2", STATE_OFF) - await hass.async_block_till_done() - - state = hass.states.get(active_entity) - assert state.state == STATE_OFF, ( - "Slot should react to new condition entity being OFF" - ) - - # Verify the slot does NOT react to the old condition entity anymore - # Turn on access_1 - slot should stay inactive - hass.states.async_set("input_boolean.access_1", STATE_ON) - await hass.async_block_till_done() - - state = hass.states.get(active_entity) - assert state.state == STATE_OFF, ( - "Slot should NOT react to old condition entity after config change" - ) - - await hass.config_entries.async_unload(config_entry.entry_id) + coordinator._cancel_retry(1) diff --git a/tests/test_coordinator.py b/tests/test_coordinator.py index a04a5dab..1a824364 100644 --- a/tests/test_coordinator.py +++ b/tests/test_coordinator.py @@ -4,6 +4,7 @@ from datetime import timedelta from unittest.mock import AsyncMock, patch +import pytest from pytest_homeassistant_custom_component.common import MockConfigEntry from homeassistant.core import HomeAssistant, callback @@ -641,3 +642,433 @@ async def test_drift_check_handles_hard_refresh_error( # Data should remain unchanged assert coordinator.data == {1: "1234"} + + +# ========================================================================= +# Sync Operation Tests +# ========================================================================= + + +async def test_get_sync_state_returns_none_for_unknown_slot( + hass: HomeAssistant, +): + """Test that get_sync_state returns None for slots not in _sync_state.""" + entity_reg = er.async_get(hass) + config_entry = MockConfigEntry(domain=DOMAIN) + config_entry.add_to_hass(hass) + + lock_entity = entity_reg.async_get_or_create( + "lock", "test", "test_lock", config_entry=config_entry + ) + + lock = MockLockWithHardRefresh( + hass, dr.async_get(hass), entity_reg, config_entry, lock_entity + ) + coordinator = LockUsercodeUpdateCoordinator(hass, lock, config_entry) + + # No sync state set, should return None + assert coordinator.get_sync_state(1) is None + assert coordinator.get_sync_state(99) is None + + +async def test_get_sync_state_returns_correct_state( + hass: HomeAssistant, +): + """Test that get_sync_state returns the correct sync state.""" + entity_reg = er.async_get(hass) + config_entry = MockConfigEntry(domain=DOMAIN) + config_entry.add_to_hass(hass) + + lock_entity = entity_reg.async_get_or_create( + "lock", "test", "test_lock", config_entry=config_entry + ) + + lock = MockLockWithHardRefresh( + hass, dr.async_get(hass), entity_reg, config_entry, lock_entity + ) + coordinator = LockUsercodeUpdateCoordinator(hass, lock, config_entry) + + # Set internal state directly + coordinator._sync_state[1] = True + coordinator._sync_state[2] = False + + assert coordinator.get_sync_state(1) is True + assert coordinator.get_sync_state(2) is False + + +async def test_mark_synced_sets_state_and_notifies( + hass: HomeAssistant, +): + """Test that mark_synced sets sync state and notifies listeners.""" + entity_reg = er.async_get(hass) + config_entry = MockConfigEntry(domain=DOMAIN) + config_entry.add_to_hass(hass) + + lock_entity = entity_reg.async_get_or_create( + "lock", "test", "test_lock", config_entry=config_entry + ) + + lock = MockLockWithHardRefresh( + hass, dr.async_get(hass), entity_reg, config_entry, lock_entity + ) + coordinator = LockUsercodeUpdateCoordinator(hass, lock, config_entry) + coordinator.data = {1: "1234"} + + listener_called = False + + @callback + def listener(): + nonlocal listener_called + listener_called = True + + coordinator.async_add_listener(listener) + + # Mark slot 1 as synced + coordinator.mark_synced(1) + + assert coordinator.get_sync_state(1) is True + assert listener_called + + +async def test_mark_synced_cancels_pending_retry( + hass: HomeAssistant, +): + """Test that mark_synced cancels any pending retry for the slot.""" + entity_reg = er.async_get(hass) + config_entry = MockConfigEntry(domain=DOMAIN) + config_entry.add_to_hass(hass) + + lock_entity = entity_reg.async_get_or_create( + "lock", "test", "test_lock", config_entry=config_entry + ) + + lock = MockLockWithHardRefresh( + hass, dr.async_get(hass), entity_reg, config_entry, lock_entity + ) + coordinator = LockUsercodeUpdateCoordinator(hass, lock, config_entry) + coordinator.data = {1: "1234"} + + # Simulate a pending retry + cancel_called = False + + def fake_cancel(): + nonlocal cancel_called + cancel_called = True + + coordinator._pending_retries[1] = fake_cancel + + # Mark as synced should cancel the pending retry + coordinator.mark_synced(1) + + assert cancel_called + assert 1 not in coordinator._pending_retries + + +async def test_mark_synced_no_op_if_already_synced( + hass: HomeAssistant, +): + """Test that mark_synced doesn't notify if already synced.""" + entity_reg = er.async_get(hass) + config_entry = MockConfigEntry(domain=DOMAIN) + config_entry.add_to_hass(hass) + + lock_entity = entity_reg.async_get_or_create( + "lock", "test", "test_lock", config_entry=config_entry + ) + + lock = MockLockWithHardRefresh( + hass, dr.async_get(hass), entity_reg, config_entry, lock_entity + ) + coordinator = LockUsercodeUpdateCoordinator(hass, lock, config_entry) + coordinator.data = {1: "1234"} + coordinator._sync_state[1] = True # Already synced + + listener_called = False + + @callback + def listener(): + nonlocal listener_called + listener_called = True + + coordinator.async_add_listener(listener) + + # Mark synced again - should be a no-op + coordinator.mark_synced(1) + + assert not listener_called + + +async def test_async_request_sync_set_operation_success( + hass: HomeAssistant, +): + """Test async_request_sync for a successful set operation.""" + entity_reg = er.async_get(hass) + config_entry = MockConfigEntry(domain=DOMAIN) + config_entry.add_to_hass(hass) + + lock_entity = entity_reg.async_get_or_create( + "lock", "test", "test_lock", config_entry=config_entry + ) + + lock = MockLockWithHardRefresh( + hass, dr.async_get(hass), entity_reg, config_entry, lock_entity + ) + coordinator = LockUsercodeUpdateCoordinator(hass, lock, config_entry) + coordinator.data = {1: ""} + + mock_set = AsyncMock() + mock_refresh = AsyncMock() + + with ( + patch.object(lock, "async_internal_set_usercode", mock_set), + patch.object(coordinator, "async_request_refresh", mock_refresh), + ): + result = await coordinator.async_request_sync(1, "set", "1234", "Test User") + + assert result is True + mock_set.assert_called_once_with(1, "1234", "Test User") + mock_refresh.assert_called_once() + + +async def test_async_request_sync_clear_operation_success( + hass: HomeAssistant, +): + """Test async_request_sync for a successful clear operation.""" + entity_reg = er.async_get(hass) + config_entry = MockConfigEntry(domain=DOMAIN) + config_entry.add_to_hass(hass) + + lock_entity = entity_reg.async_get_or_create( + "lock", "test", "test_lock", config_entry=config_entry + ) + + lock = MockLockWithHardRefresh( + hass, dr.async_get(hass), entity_reg, config_entry, lock_entity + ) + coordinator = LockUsercodeUpdateCoordinator(hass, lock, config_entry) + coordinator.data = {1: "1234"} + + mock_clear = AsyncMock() + mock_refresh = AsyncMock() + + with ( + patch.object(lock, "async_internal_clear_usercode", mock_clear), + patch.object(coordinator, "async_request_refresh", mock_refresh), + ): + result = await coordinator.async_request_sync(1, "clear") + + assert result is True + mock_clear.assert_called_once_with(1) + mock_refresh.assert_called_once() + + +async def test_async_request_sync_marks_out_of_sync_immediately( + hass: HomeAssistant, +): + """Test that async_request_sync marks slot as out of sync before operation.""" + entity_reg = er.async_get(hass) + config_entry = MockConfigEntry(domain=DOMAIN) + config_entry.add_to_hass(hass) + + lock_entity = entity_reg.async_get_or_create( + "lock", "test", "test_lock", config_entry=config_entry + ) + + lock = MockLockWithHardRefresh( + hass, dr.async_get(hass), entity_reg, config_entry, lock_entity + ) + coordinator = LockUsercodeUpdateCoordinator(hass, lock, config_entry) + coordinator.data = {1: "1234"} + coordinator._sync_state[1] = True # Start as synced + + sync_state_during_operation = None + + async def capture_state(*args, **kwargs): + nonlocal sync_state_during_operation + sync_state_during_operation = coordinator.get_sync_state(1) + + with ( + patch.object(lock, "async_internal_set_usercode", capture_state), + patch.object(coordinator, "async_request_refresh", AsyncMock()), + ): + await coordinator.async_request_sync(1, "set", "5678") + + # During the operation, sync state should have been False + assert sync_state_during_operation is False + + +async def test_async_request_sync_cancels_existing_retry( + hass: HomeAssistant, +): + """Test that async_request_sync cancels any pending retry for the slot.""" + entity_reg = er.async_get(hass) + config_entry = MockConfigEntry(domain=DOMAIN) + config_entry.add_to_hass(hass) + + lock_entity = entity_reg.async_get_or_create( + "lock", "test", "test_lock", config_entry=config_entry + ) + + lock = MockLockWithHardRefresh( + hass, dr.async_get(hass), entity_reg, config_entry, lock_entity + ) + coordinator = LockUsercodeUpdateCoordinator(hass, lock, config_entry) + coordinator.data = {1: "1234"} + + # Simulate a pending retry + cancel_called = False + + def fake_cancel(): + nonlocal cancel_called + cancel_called = True + + coordinator._pending_retries[1] = fake_cancel + + with ( + patch.object(lock, "async_internal_set_usercode", AsyncMock()), + patch.object(coordinator, "async_request_refresh", AsyncMock()), + ): + await coordinator.async_request_sync(1, "set", "5678") + + assert cancel_called + # After success, no new retry should be pending + assert 1 not in coordinator._pending_retries + + +async def test_async_request_sync_schedules_retry_on_lock_disconnected( + hass: HomeAssistant, +): + """Test that async_request_sync schedules retry on LockDisconnected.""" + entity_reg = er.async_get(hass) + config_entry = MockConfigEntry(domain=DOMAIN) + config_entry.add_to_hass(hass) + + lock_entity = entity_reg.async_get_or_create( + "lock", "test", "test_lock", config_entry=config_entry + ) + + lock = MockLockWithHardRefresh( + hass, dr.async_get(hass), entity_reg, config_entry, lock_entity + ) + coordinator = LockUsercodeUpdateCoordinator(hass, lock, config_entry) + coordinator.data = {1: "1234"} + + mock_set = AsyncMock(side_effect=LockDisconnected("Lock offline")) + + with patch.object(lock, "async_internal_set_usercode", mock_set): + result = await coordinator.async_request_sync(1, "set", "5678") + + assert result is False + # A retry should be scheduled + assert 1 in coordinator._pending_retries + + +async def test_async_request_sync_raises_on_missing_usercode_for_set( + hass: HomeAssistant, +): + """Test that async_request_sync raises ValueError if usercode is None for set.""" + entity_reg = er.async_get(hass) + config_entry = MockConfigEntry(domain=DOMAIN) + config_entry.add_to_hass(hass) + + lock_entity = entity_reg.async_get_or_create( + "lock", "test", "test_lock", config_entry=config_entry + ) + + lock = MockLockWithHardRefresh( + hass, dr.async_get(hass), entity_reg, config_entry, lock_entity + ) + coordinator = LockUsercodeUpdateCoordinator(hass, lock, config_entry) + coordinator.data = {1: ""} + + with pytest.raises(ValueError, match="usercode is required"): + await coordinator.async_request_sync(1, "set", None) + + +async def test_cancel_retry_cancels_and_removes( + hass: HomeAssistant, +): + """Test that _cancel_retry cancels the callback and removes from dict.""" + entity_reg = er.async_get(hass) + config_entry = MockConfigEntry(domain=DOMAIN) + config_entry.add_to_hass(hass) + + lock_entity = entity_reg.async_get_or_create( + "lock", "test", "test_lock", config_entry=config_entry + ) + + lock = MockLockWithHardRefresh( + hass, dr.async_get(hass), entity_reg, config_entry, lock_entity + ) + coordinator = LockUsercodeUpdateCoordinator(hass, lock, config_entry) + + cancel_called = False + + def fake_cancel(): + nonlocal cancel_called + cancel_called = True + + coordinator._pending_retries[1] = fake_cancel + + coordinator._cancel_retry(1) + + assert cancel_called + assert 1 not in coordinator._pending_retries + + +async def test_cancel_retry_no_op_for_unknown_slot( + hass: HomeAssistant, +): + """Test that _cancel_retry is a no-op for slots without pending retries.""" + entity_reg = er.async_get(hass) + config_entry = MockConfigEntry(domain=DOMAIN) + config_entry.add_to_hass(hass) + + lock_entity = entity_reg.async_get_or_create( + "lock", "test", "test_lock", config_entry=config_entry + ) + + lock = MockLockWithHardRefresh( + hass, dr.async_get(hass), entity_reg, config_entry, lock_entity + ) + coordinator = LockUsercodeUpdateCoordinator(hass, lock, config_entry) + + # Should not raise + coordinator._cancel_retry(99) + + +async def test_new_request_replaces_pending_retry( + hass: HomeAssistant, +): + """Test that a new sync request replaces a pending retry with new operation.""" + entity_reg = er.async_get(hass) + config_entry = MockConfigEntry(domain=DOMAIN) + config_entry.add_to_hass(hass) + + lock_entity = entity_reg.async_get_or_create( + "lock", "test", "test_lock", config_entry=config_entry + ) + + lock = MockLockWithHardRefresh( + hass, dr.async_get(hass), entity_reg, config_entry, lock_entity + ) + coordinator = LockUsercodeUpdateCoordinator(hass, lock, config_entry) + coordinator.data = {1: "1234"} + + # First request fails, schedules retry for "set" + mock_set = AsyncMock(side_effect=LockDisconnected("Lock offline")) + with patch.object(lock, "async_internal_set_usercode", mock_set): + await coordinator.async_request_sync(1, "set", "5678") + + old_retry = coordinator._pending_retries.get(1) + assert old_retry is not None + + # Second request for "clear" should replace the "set" retry + mock_clear = AsyncMock(side_effect=LockDisconnected("Lock offline")) + with patch.object(lock, "async_internal_clear_usercode", mock_clear): + await coordinator.async_request_sync(1, "clear") + + # Old retry should have been cancelled and replaced + new_retry = coordinator._pending_retries.get(1) + assert new_retry is not None + assert new_retry is not old_retry