From cbe98dcea65fcea0c5836fe6ca8368e7f93582bc Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Fri, 19 Sep 2025 18:17:44 +0300 Subject: [PATCH 01/51] Initial asyncio commit --- hazelcast/asyncio/__init__.py | 0 hazelcast/asyncio/client.py | 460 +++++++ hazelcast/internal/__init__.py | 0 hazelcast/internal/asyncio_cluster.py | 390 ++++++ hazelcast/internal/asyncio_compact.py | 162 +++ hazelcast/internal/asyncio_connection.py | 1051 ++++++++++++++++ hazelcast/internal/asyncio_future.py | 5 + hazelcast/internal/asyncio_invocation.py | 445 +++++++ hazelcast/internal/asyncio_listener.py | 295 +++++ hazelcast/internal/asyncio_proxy/__init__.py | 0 hazelcast/internal/asyncio_proxy/base.py | 289 +++++ hazelcast/internal/asyncio_proxy/manager.py | 89 ++ hazelcast/internal/asyncio_proxy/map.py | 967 ++++++++++++++ hazelcast/internal/asyncio_reactor.py | 172 +++ hazelcast/proxy/__init__.py | 16 +- tests/integration/asyncio/__init__.py | 0 .../asyncio/authentication_tests/__init__.py | 0 .../authentication_test.py | 62 + .../authentication_tests/hazelcast-token.xml | 26 + .../hazelcast-user-pass.xml | 25 + tests/integration/asyncio/backup_acks_test.py | 94 ++ tests/integration/asyncio/base.py | 120 ++ tests/integration/asyncio/client_test.py | 142 +++ tests/integration/asyncio/proxy/__init__.py | 0 tests/integration/asyncio/proxy/hazelcast.xml | 47 + tests/integration/asyncio/proxy/map_test.py | 1121 +++++++++++++++++ 26 files changed, 5968 insertions(+), 10 deletions(-) create mode 100644 hazelcast/asyncio/__init__.py create mode 100644 hazelcast/asyncio/client.py create mode 100644 hazelcast/internal/__init__.py create mode 100644 hazelcast/internal/asyncio_cluster.py create mode 100644 hazelcast/internal/asyncio_compact.py create mode 100644 hazelcast/internal/asyncio_connection.py create mode 100644 hazelcast/internal/asyncio_future.py create mode 100644 hazelcast/internal/asyncio_invocation.py create mode 100644 hazelcast/internal/asyncio_listener.py create mode 100644 hazelcast/internal/asyncio_proxy/__init__.py create mode 100644 hazelcast/internal/asyncio_proxy/base.py create mode 100644 hazelcast/internal/asyncio_proxy/manager.py create mode 100644 hazelcast/internal/asyncio_proxy/map.py create mode 100644 hazelcast/internal/asyncio_reactor.py create mode 100644 tests/integration/asyncio/__init__.py create mode 100644 tests/integration/asyncio/authentication_tests/__init__.py create mode 100644 tests/integration/asyncio/authentication_tests/authentication_test.py create mode 100644 tests/integration/asyncio/authentication_tests/hazelcast-token.xml create mode 100644 tests/integration/asyncio/authentication_tests/hazelcast-user-pass.xml create mode 100644 tests/integration/asyncio/backup_acks_test.py create mode 100644 tests/integration/asyncio/base.py create mode 100644 tests/integration/asyncio/client_test.py create mode 100644 tests/integration/asyncio/proxy/__init__.py create mode 100644 tests/integration/asyncio/proxy/hazelcast.xml create mode 100644 tests/integration/asyncio/proxy/map_test.py diff --git a/hazelcast/asyncio/__init__.py b/hazelcast/asyncio/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/hazelcast/asyncio/client.py b/hazelcast/asyncio/client.py new file mode 100644 index 0000000000..e54aa14cff --- /dev/null +++ b/hazelcast/asyncio/client.py @@ -0,0 +1,460 @@ +import asyncio +import logging +import sys +import typing + +from hazelcast.internal.asyncio_cluster import ClusterService, _InternalClusterService +from hazelcast.internal.asyncio_compact import CompactSchemaService +from hazelcast.config import Config +from hazelcast.internal.asyncio_connection import ConnectionManager, DefaultAddressProvider +from hazelcast.core import DistributedObjectEvent, DistributedObjectInfo +from hazelcast.cp import CPSubsystem, ProxySessionManager +from hazelcast.discovery import HazelcastCloudAddressProvider +from hazelcast.errors import IllegalStateError, InvalidConfigurationError +from hazelcast.internal.asyncio_invocation import InvocationService, Invocation +from hazelcast.lifecycle import LifecycleService, LifecycleState, _InternalLifecycleService +from hazelcast.internal.asyncio_listener import ClusterViewListenerService, ListenerService +from hazelcast.near_cache import NearCacheManager +from hazelcast.partition import PartitionService, _InternalPartitionService +from hazelcast.protocol.codec import ( + client_add_distributed_object_listener_codec, + client_get_distributed_objects_codec, + client_remove_distributed_object_listener_codec, +) +from hazelcast.internal.asyncio_proxy.manager import ( + MAP_SERVICE, + ProxyManager, +) +from hazelcast.internal.asyncio_proxy.base import Proxy +from hazelcast.internal.asyncio_proxy.map import Map +from hazelcast.internal.asyncio_reactor import AsyncioReactor +from hazelcast.serialization import SerializationServiceV1 +from hazelcast.sql import SqlService, _InternalSqlService +from hazelcast.statistics import Statistics +from hazelcast.types import KeyType, ValueType, ItemType, MessageType +from hazelcast.util import AtomicInteger, RoundRobinLB + +__all__ = ("HazelcastClient",) + +from hazelcast.vector import IndexConfig + +_logger = logging.getLogger(__name__) + + +class HazelcastClient: + """Hazelcast client instance to access and manipulate distributed data + structures on the Hazelcast clusters. + """ + + _CLIENT_ID = AtomicInteger() + + @classmethod + async def create_and_start(cls, config: Config = None, **kwargs) -> "HazelcastClient": + client = HazelcastClient(config, **kwargs) + await client._start() + return client + + def __init__(self, config: Config = None, **kwargs): + """The client can be configured either by: + + - providing a configuration object as the first parameter of the + constructor + + .. code:: python + + from hazelcast import HazelcastClient + from hazelcast.config import Config + + config = Config() + config.cluster_name = "a-cluster" + client = HazelcastClient(config) + + - passing configuration options as keyword arguments + + .. code:: python + + from hazelcast import HazelcastClient + + client = HazelcastClient( + cluster_name="a-cluster", + ) + + + See the :class:`hazelcast.config.Config` documentation for the possible + configuration options. + + Args: + config: Optional configuration object. + **kwargs: Optional keyword arguments of the client configuration. + """ + if config: + if kwargs: + raise InvalidConfigurationError( + "Ambiguous client configuration is found. Either provide " + "the config object as the only parameter, or do not " + "pass it and use keyword arguments to configure the " + "client." + ) + else: + config = Config.from_dict(kwargs) + + self._config = config + self._context = _ClientContext() + client_id = HazelcastClient._CLIENT_ID.get_and_increment() + self._name = self._create_client_name(client_id) + self._reactor = AsyncioReactor() + self._serialization_service = SerializationServiceV1(config) + self._near_cache_manager = NearCacheManager(config, self._serialization_service) + self._internal_lifecycle_service = _InternalLifecycleService(config) + self._lifecycle_service = LifecycleService(self._internal_lifecycle_service) + self._internal_cluster_service = _InternalClusterService(self, config) + self._cluster_service = ClusterService(self._internal_cluster_service) + self._invocation_service = InvocationService(self, config, self._reactor) + self._compact_schema_service = CompactSchemaService( + self._serialization_service.compact_stream_serializer, + self._invocation_service, + self._cluster_service, + self._reactor, + self._config, + ) + self._address_provider = self._create_address_provider() + self._internal_partition_service = _InternalPartitionService(self) + self._partition_service = PartitionService( + self._internal_partition_service, + self._serialization_service, + self._compact_schema_service.send_schema_and_retry, + ) + self._connection_manager = ConnectionManager( + self, + config, + self._reactor, + self._address_provider, + self._internal_lifecycle_service, + self._internal_partition_service, + self._internal_cluster_service, + self._invocation_service, + self._near_cache_manager, + self._send_state_to_cluster, + ) + self._load_balancer = self._init_load_balancer(config) + self._listener_service = ListenerService( + self, + config, + self._connection_manager, + self._invocation_service, + self._compact_schema_service, + ) + self._proxy_manager = ProxyManager(self._context) + self._cp_subsystem = CPSubsystem(self._context) + self._proxy_session_manager = ProxySessionManager(self._context) + self._lock_reference_id_generator = AtomicInteger(1) + self._statistics = Statistics( + self, + config, + self._reactor, + self._connection_manager, + self._invocation_service, + self._near_cache_manager, + ) + self._cluster_view_listener = ClusterViewListenerService( + self, + self._connection_manager, + self._internal_partition_service, + self._internal_cluster_service, + self._invocation_service, + ) + self._shutdown_lock = asyncio.Lock() + self._invocation_service.init( + self._internal_partition_service, + self._connection_manager, + self._listener_service, + self._compact_schema_service, + ) + self._internal_sql_service = _InternalSqlService( + self._connection_manager, + self._serialization_service, + self._invocation_service, + self._compact_schema_service.send_schema_and_retry, + ) + self._sql_service = SqlService(self._internal_sql_service) + self._init_context() + + def _init_context(self): + self._context.init_context( + self, + self._config, + self._invocation_service, + self._internal_partition_service, + self._internal_cluster_service, + self._connection_manager, + self._serialization_service, + self._listener_service, + self._proxy_manager, + self._near_cache_manager, + self._lock_reference_id_generator, + self._name, + self._proxy_session_manager, + self._reactor, + self._compact_schema_service, + ) + + async def _start(self): + self._reactor.start() + try: + self._internal_lifecycle_service.start() + self._invocation_service.start() + membership_listeners = self._config.membership_listeners + self._internal_cluster_service.start(self._connection_manager, membership_listeners) + self._cluster_view_listener.start() + await self._connection_manager.start(self._load_balancer) + sync_start = not self._config.async_start + if sync_start: + await self._internal_cluster_service.wait_initial_member_list_fetched() + await self._connection_manager.connect_to_all_cluster_members(sync_start) + self._listener_service.start() + await self._invocation_service.add_backup_listener() + self._load_balancer.init(self._cluster_service) + self._statistics.start() + except Exception: + await self.shutdown() + raise + _logger.info("Client started") + + async def get_map(self, name: str) -> Map[KeyType, ValueType]: + """Returns the distributed map instance with the specified name. + + Args: + name: Name of the distributed map. + + Returns: + Distributed map instance with the specified name. + """ + return await self._proxy_manager.get_or_create(MAP_SERVICE, name) + + + async def add_distributed_object_listener( + self, listener_func: typing.Callable[[DistributedObjectEvent], None] + ) -> str: + """Adds a listener which will be notified when a new distributed object + is created or destroyed. + + Args: + listener_func: Function to be called when a distributed object is + created or destroyed. + + Returns: + A registration id which is used as a key to remove the listener. + """ + is_smart = self._config.smart_routing + codec = client_add_distributed_object_listener_codec + request = codec.encode_request(is_smart) + + def handle_distributed_object_event(name, service_name, event_type, source): + event = DistributedObjectEvent(name, service_name, event_type, source) + listener_func(event) + + def event_handler(client_message): + return codec.handle(client_message, handle_distributed_object_event) + + return await self._listener_service.register_listener( + request, + codec.decode_response, + client_remove_distributed_object_listener_codec.encode_request, + event_handler, + ) + + async def remove_distributed_object_listener(self, registration_id: str) -> bool: + """Removes the specified distributed object listener. + + Returns silently if there is no such listener added before. + + Args: + registration_id: The id of registered listener. + + Returns: + ``True`` if registration is removed, ``False`` otherwise. + """ + return await self._listener_service.deregister_listener(registration_id) + + async def get_distributed_objects(self) -> typing.List[Proxy]: + """Returns all distributed objects such as; queue, map, set, list, + topic, lock, multimap. + + Also, as a side effect, it clears the local instances of the destroyed + proxies. + + Returns: + List of instances created by Hazelcast. + """ + request = client_get_distributed_objects_codec.encode_request() + invocation = Invocation(request, response_handler=lambda m: m) + await self._invocation_service.ainvoke(invocation) + + local_distributed_object_infos = { + DistributedObjectInfo(dist_obj.service_name, dist_obj.name) + for dist_obj in self._proxy_manager.get_distributed_objects() + } + + response = client_get_distributed_objects_codec.decode_response(invocation.future.result()) + async with asyncio.TaskGroup() as tg: + for dist_obj_info in response: + local_distributed_object_infos.discard(dist_obj_info) + tg.create_task(self._proxy_manager.get_or_create( + dist_obj_info.service_name, dist_obj_info.name, create_on_remote=False + )) + + async with asyncio.TaskGroup() as tg: + for dist_obj_info in local_distributed_object_infos: + tg.create_task(self._proxy_manager.destroy_proxy( + dist_obj_info.service_name, dist_obj_info.name, destroy_on_remote=False + )) + + return self._proxy_manager.get_distributed_objects() + + async def shutdown(self) -> None: + """Shuts down this HazelcastClient.""" + async with self._shutdown_lock: + if self._internal_lifecycle_service.running: + self._internal_lifecycle_service.fire_lifecycle_event(LifecycleState.SHUTTING_DOWN) + self._internal_lifecycle_service.shutdown() + self._proxy_session_manager.shutdown().result() + self._near_cache_manager.destroy_near_caches() + await self._connection_manager.shutdown() + self._invocation_service.shutdown() + self._statistics.shutdown() + self._reactor.shutdown() + self._internal_lifecycle_service.fire_lifecycle_event(LifecycleState.SHUTDOWN) + + @property + def name(self) -> str: + """Name of the client.""" + return self._name + + @property + def lifecycle_service(self) -> LifecycleService: + """Lifecycle service allows you to check if the client is running and + add and remove lifecycle listeners. + """ + return self._lifecycle_service + + @property + def partition_service(self) -> PartitionService: + """Partition service allows you to get partition count, introspect + the partition owners, and partition ids of keys. + """ + return self._partition_service + + @property + def cluster_service(self) -> ClusterService: + """ClusterService: Cluster service allows you to get the list of + the cluster members and add and remove membership listeners. + """ + return self._cluster_service + + @property + def cp_subsystem(self) -> CPSubsystem: + """CP Subsystem offers set of in-memory linearizable data structures.""" + return self._cp_subsystem + + @property + def sql(self) -> SqlService: + """Returns a service to execute distributed SQL queries.""" + return self._sql_service + + def _create_address_provider(self): + config = self._config + cluster_members = config.cluster_members + address_list_provided = len(cluster_members) > 0 + cloud_discovery_token = config.cloud_discovery_token + cloud_enabled = cloud_discovery_token is not None + if address_list_provided and cloud_enabled: + raise IllegalStateError( + "Only one discovery method can be enabled at a time. " + "Cluster members given explicitly: %s, Hazelcast Cloud enabled: %s" + % (address_list_provided, cloud_enabled) + ) + + if cloud_enabled: + connection_timeout = self._get_connection_timeout(config) + return HazelcastCloudAddressProvider(cloud_discovery_token, connection_timeout) + + return DefaultAddressProvider(cluster_members) + + def _create_client_name(self, client_id): + client_name = self._config.client_name + if client_name: + return client_name + return "hz.client_%s" % client_id + + async def _send_state_to_cluster(self): + return await self._compact_schema_service.send_all_schemas() + + @staticmethod + def _get_connection_timeout(config): + timeout = config.connection_timeout + return sys.maxsize if timeout == 0 else timeout + + @staticmethod + def _init_load_balancer(config): + load_balancer = config.load_balancer + if not load_balancer: + load_balancer = RoundRobinLB() + return load_balancer + + +class _ClientContext: + """ + Context holding all the required services, managers and the configuration + for a Hazelcast client. + """ + + def __init__(self): + self.client = None + self.config = None + self.invocation_service = None + self.partition_service = None + self.cluster_service = None + self.connection_manager = None + self.serialization_service = None + self.listener_service = None + self.proxy_manager = None + self.near_cache_manager = None + self.lock_reference_id_generator = None + self.name = None + self.proxy_session_manager = None + self.reactor = None + self.compact_schema_service = None + + def init_context( + self, + client, + config, + invocation_service, + partition_service, + cluster_service, + connection_manager, + serialization_service, + listener_service, + proxy_manager, + near_cache_manager, + lock_reference_id_generator, + name, + proxy_session_manager, + reactor, + compact_schema_service, + ): + self.client = client + self.config = config + self.invocation_service = invocation_service + self.partition_service = partition_service + self.cluster_service = cluster_service + self.connection_manager = connection_manager + self.serialization_service = serialization_service + self.listener_service = listener_service + self.proxy_manager = proxy_manager + self.near_cache_manager = near_cache_manager + self.lock_reference_id_generator = lock_reference_id_generator + self.name = name + self.proxy_session_manager = proxy_session_manager + self.reactor = reactor + self.compact_schema_service = compact_schema_service diff --git a/hazelcast/internal/__init__.py b/hazelcast/internal/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/hazelcast/internal/asyncio_cluster.py b/hazelcast/internal/asyncio_cluster.py new file mode 100644 index 0000000000..28bb024964 --- /dev/null +++ b/hazelcast/internal/asyncio_cluster.py @@ -0,0 +1,390 @@ +import asyncio +import logging +import threading +import typing +import uuid +from collections import OrderedDict + +from hazelcast.core import EndpointQualifier, ProtocolType, MemberInfo, Address +from hazelcast.errors import TargetDisconnectedError, IllegalStateError +from hazelcast.util import check_not_none + +_logger = logging.getLogger(__name__) + + +class _MemberListSnapshot: + __slots__ = ("version", "members") + + def __init__(self, version, members): + self.version = version + self.members = members + + +class ClientInfo: + """Local information of the client. + + Attributes: + uuid: Unique id of this client instance. + address: Local address that is used to communicate with cluster. + name: Name of the client. + labels: Read-only set of all labels of this client. + """ + + __slots__ = ("uuid", "address", "name", "labels") + + def __init__( + self, client_uuid: uuid.UUID, address: Address, name: str, labels: typing.Set[str] + ): + self.uuid = client_uuid + self.address = address + self.name = name + self.labels = labels + + def __repr__(self): + return "ClientInfo(uuid=%s, address=%s, name=%s, labels=%s)" % ( + self.uuid, + self.address, + self.name, + self.labels, + ) + + +_EMPTY_SNAPSHOT = _MemberListSnapshot(-1, OrderedDict()) +_INITIAL_MEMBERS_TIMEOUT_SECONDS = 120 +_CLIENT_ENDPOINT_QUALIFIER = EndpointQualifier(ProtocolType.CLIENT, None) +_MEMBER_ENDPOINT_QUALIFIER = EndpointQualifier(ProtocolType.MEMBER, None) + + +class ClusterService: + """ + Cluster service for Hazelcast clients. + + It provides access to the members in the cluster + and one can register for changes in the cluster members. + """ + + def __init__(self, internal_cluster_service): + self._service = internal_cluster_service + + def add_listener( + self, + member_added: typing.Callable[[MemberInfo], None] = None, + member_removed: typing.Callable[[MemberInfo], None] = None, + fire_for_existing=False, + ) -> str: + """ + Adds a membership listener to listen for membership updates. + + It will be notified when a member is added to the cluster or removed + from the cluster. There is no check for duplicate registrations, + so if you register the listener twice, it will get events twice. + + Args: + member_added: Function to be called when a member is added to the + cluster. + member_removed: Function to be called when a member is removed + from the cluster. + fire_for_existing: Whether or not fire member_added for existing + members. + + Returns: + Registration id of the listener which will be used for removing + this listener. + """ + return self._service.add_listener(member_added, member_removed, fire_for_existing) + + def remove_listener(self, registration_id: str) -> bool: + """ + Removes the specified membership listener. + + Args: + registration_id: Registration id of the listener to be removed. + + Returns: + ``True`` if the registration is removed, ``False`` otherwise. + """ + return self._service.remove_listener(registration_id) + + def get_members( + self, member_selector: typing.Callable[[MemberInfo], bool] = None + ) -> typing.List[MemberInfo]: + """ + Lists the current members in the cluster. + + Every member in the cluster returns the members in the same order. + To obtain the oldest member in the cluster, you can retrieve the first + item in the list. + + Args: + member_selector: Function to filter members to return. If not + provided, the returned list will contain all the available + cluster members. + + Returns: + Current members in the cluster + """ + return self._service.get_members(member_selector) + + +class _InternalClusterService: + def __init__(self, client, config): + self._client = client + self._connection_manager = None + self._labels = frozenset(config.labels) + self._listeners = {} + self._member_list_snapshot = _EMPTY_SNAPSHOT + self._initial_list_fetched = asyncio.Event() + + def start(self, connection_manager, membership_listeners): + self._connection_manager = connection_manager + for listener in membership_listeners: + self.add_listener(*listener) + + def get_member(self, member_uuid): + check_not_none(uuid, "UUID must not be null") + snapshot = self._member_list_snapshot + return snapshot.members.get(member_uuid, None) + + def get_members(self, member_selector=None): + snapshot = self._member_list_snapshot + if not member_selector: + return list(snapshot.members.values()) + + members = [] + for member in snapshot.members.values(): + if member_selector(member): + members.append(member) + return members + + def size(self): + """ + Returns: + int: Size of the cluster. + """ + snapshot = self._member_list_snapshot + return len(snapshot.members) + + def get_local_client(self): + """ + Returns: + hazelcast.cluster.ClientInfo: The client info. + """ + connection_manager = self._connection_manager + connection = connection_manager.get_random_connection() + local_address = None if not connection else connection.local_address + return ClientInfo( + connection_manager.client_uuid, local_address, self._client.name, self._labels + ) + + def add_listener(self, member_added=None, member_removed=None, fire_for_existing=False): + registration_id = str(uuid.uuid4()) + self._listeners[registration_id] = (member_added, member_removed) + + if fire_for_existing and member_added: + snapshot = self._member_list_snapshot + for member in snapshot.members.values(): + member_added(member) + + return registration_id + + def remove_listener(self, registration_id): + try: + self._listeners.pop(registration_id) + return True + except KeyError: + return False + + async def wait_initial_member_list_fetched(self): + """Blocks until the initial member list is fetched from the cluster. + + If it is not received within the timeout, an error is raised. + + Raises: + IllegalStateError: If the member list could not be fetched + """ + try: + await asyncio.wait_for(self._initial_list_fetched.wait(), _INITIAL_MEMBERS_TIMEOUT_SECONDS) + except TimeoutError: + raise IllegalStateError("Could not get initial member list from cluster!") + + def clear_member_list_version(self): + _logger.debug("Resetting the member list version") + + current = self._member_list_snapshot + if current is not _EMPTY_SNAPSHOT: + self._member_list_snapshot = _MemberListSnapshot(0, current.members) + + def clear_member_list(self): + _logger.debug("Resetting the member list") + + current = self._member_list_snapshot + if current is not _EMPTY_SNAPSHOT: + previous_members = current.members + snapshot = _MemberListSnapshot(0, {}) + self._member_list_snapshot = snapshot + dead_members, new_members = self._detect_membership_events( + previous_members, snapshot.members + ) + self._fire_membership_events(dead_members, new_members) + + def handle_members_view_event(self, version, member_infos): + snapshot = self._create_snapshot(version, member_infos) + if _logger.isEnabledFor(logging.DEBUG): + _logger.debug( + "Handling new snapshot with membership version: %s, member string: %s", + version, + self._members_string(snapshot.members), + ) + + current = self._member_list_snapshot + if version > current.version: + self._apply_new_state_and_fire_events(current, snapshot) + + if current is _EMPTY_SNAPSHOT: + self._initial_list_fetched.set() + + def _apply_new_state_and_fire_events(self, current, snapshot): + self._member_list_snapshot = snapshot + dead_members, new_members = self._detect_membership_events( + current.members, snapshot.members + ) + self._fire_membership_events(dead_members, new_members) + + def _fire_membership_events(self, dead_members, new_members): + # Removal events should be fired first + for dead_member in dead_members: + for _, handler in self._listeners.values(): + if handler: + try: + handler(dead_member) + except: + _logger.exception("Exception in membership listener") + + for new_member in new_members: + for handler, _ in self._listeners.values(): + if handler: + try: + handler(new_member) + except: + _logger.exception("Exception in membership listener") + + def _detect_membership_events(self, previous_members, current_members): + new_members = [] + dead_members = set(previous_members.values()) + for member in current_members.values(): + try: + dead_members.remove(member) + except KeyError: + new_members.append(member) + + for dead_member in dead_members: + connection = self._connection_manager.get_connection(dead_member.uuid) + if connection: + connection.close_connection( + None, + TargetDisconnectedError( + "The client has closed the connection to this member, " + "after receiving a member left event from the cluster. " + "%s" % connection + ), + ) + + if (len(new_members) + len(dead_members)) > 0: + if len(current_members) > 0: + _logger.info(self._members_string(current_members)) + + return dead_members, new_members + + @staticmethod + def _members_string(members): + n = len(members) + return "\n\nMembers [%s] {\n\t%s\n}\n" % (n, "\n\t".join(map(str, members.values()))) + + @staticmethod + def _create_snapshot(version, member_infos): + new_members = OrderedDict() + for member_info in member_infos: + address_map = member_info.address_map + if address_map: + address = address_map.get( + _CLIENT_ENDPOINT_QUALIFIER, + address_map.get(_MEMBER_ENDPOINT_QUALIFIER, None), + ) + member_info.address = address + else: + # It might be None on 4.0 servers. + member_info.address_map = { + _MEMBER_ENDPOINT_QUALIFIER: member_info.address, + } + + new_members[member_info.uuid] = member_info + return _MemberListSnapshot(version, new_members) + + +class VectorClock: + """Vector clock consisting of distinct replica logical clocks. + + The vector clock may be read from different thread but concurrent + updates must be synchronized externally. There is no guarantee for + concurrent updates. + + See Also: + https://en.wikipedia.org/wiki/Vector_clock + """ + + def __init__(self): + self._replica_timestamps = {} + + def is_after(self, other: "VectorClock") -> bool: + """Returns ``True`` if this vector clock is causally strictly after the + provided vector clock. This means that it the provided clock is neither + equal to, greater than or concurrent to this vector clock. + + Args: + other: Vector clock to be compared + + Returns: + ``True`` if this vector clock is strictly after the other vector + clock, ``False`` otherwise. + """ + any_timestamp_greater = False + for replica_id, other_timestamp in other.entry_set(): + local_timestamp = self._replica_timestamps.get(replica_id) + + if local_timestamp is None or local_timestamp < other_timestamp: + return False + elif local_timestamp > other_timestamp: + any_timestamp_greater = True + + # there is at least one local timestamp greater or local vector clock has additional timestamps + return any_timestamp_greater or other.size() < self.size() + + def set_replica_timestamp(self, replica_id: str, timestamp: int) -> None: + """Sets the logical timestamp for the given replica ID. + + Args: + replica_id: Replica ID. + timestamp: Timestamp for the given replica ID. + """ + self._replica_timestamps[replica_id] = timestamp + + def entry_set(self) -> typing.List[typing.Tuple[str, int]]: + """Returns the entry set of the replica timestamps in a format of list + of tuples. + + Each tuple contains the replica ID and the timestamp associated with + it. + + Returns: + List of tuples. + """ + return list(self._replica_timestamps.items()) + + def size(self) -> int: + """Returns the number of timestamps that are in the replica timestamps + dictionary. + + Returns: + Number of timestamps in the replica timestamps. + """ + return len(self._replica_timestamps) diff --git a/hazelcast/internal/asyncio_compact.py b/hazelcast/internal/asyncio_compact.py new file mode 100644 index 0000000000..94b22587c4 --- /dev/null +++ b/hazelcast/internal/asyncio_compact.py @@ -0,0 +1,162 @@ +import asyncio +import logging +import typing + +from hazelcast.errors import HazelcastSerializationError, IllegalStateError +from hazelcast.internal.asyncio_invocation import Invocation +from hazelcast.protocol.codec import ( + client_fetch_schema_codec, + client_send_schema_codec, + client_send_all_schemas_codec, +) + +if typing.TYPE_CHECKING: + from hazelcast.cluster import ClusterService + from hazelcast.config import Config + from hazelcast.protocol.client_message import OutboundMessage + from hazelcast.internal.asyncio_invocation import InvocationService + from hazelcast.internal.asyncio_reactor import AsyncioReactor + from hazelcast.serialization.compact import ( + CompactStreamSerializer, + Schema, + SchemaNotReplicatedError, + ) + +_logger = logging.getLogger(__name__) + + +class CompactSchemaService: + _SEND_SCHEMA_RETRY_COUNT = 100 + + def __init__( + self, + compact_serializer: "CompactStreamSerializer", + invocation_service: "InvocationService", + cluster_service: "ClusterService", + reactor: "AsyncioReactor", + config: "Config", + ): + self._compact_serializer = compact_serializer + self._invocation_service = invocation_service + self._cluster_service = cluster_service + self._reactor = reactor + self._invocation_retry_pause = config.invocation_retry_pause + self._has_replicated_schemas = False + + def fetch_schema(self, schema_id: int) -> asyncio.Future: + _logger.debug( + "Could not find schema with the id %s locally. It will be fetched from the cluster.", + schema_id, + ) + + request = client_fetch_schema_codec.encode_request(schema_id) + fetch_schema_invocation = Invocation( + request, + response_handler=client_fetch_schema_codec.decode_response, + ) + self._invocation_service.invoke(fetch_schema_invocation) + return fetch_schema_invocation.future + + def send_schema_and_retry( + self, + error: "SchemaNotReplicatedError", + func: typing.Callable[..., asyncio.Future], + *args: typing.Any, + **kwargs: typing.Any, + ) -> asyncio.Future: + schema = error.schema + clazz = error.clazz + request = client_send_schema_codec.encode_request(schema) + + def callback(): + self._has_replicated_schemas = True + self._compact_serializer.register_schema_to_type(schema, clazz) + return func(*args, **kwargs) + + return self._replicate_schema( + schema, request, CompactSchemaService._SEND_SCHEMA_RETRY_COUNT, callback + ) + + def _replicate_schema( + self, + schema: "Schema", + request: "OutboundMessage", + remaining_retries: int, + callback: typing.Callable[..., asyncio.Future], + ) -> asyncio.Future: + def continuation(future: asyncio.Future): + replicated_members = future.result() + members = self._cluster_service.get_members() + for member in members: + if member.uuid not in replicated_members: + break + else: + # Loop completed normally. + # All members in our member list all known to have the schema + return callback() + + # There is a member in our member list that the schema + # is not known to be replicated yet. We should retry + # sending it in a random member. + if remaining_retries <= 1: + # We tried to send it a couple of times, but the member list + # in our local and the member list returned by the initiator + # nodes did not match. + raise IllegalStateError( + f"The schema {schema} cannot be replicated in the cluster, " + f"after {CompactSchemaService._SEND_SCHEMA_RETRY_COUNT} retries. " + f"It might be the case that the client is connected to the two " + f"halves of the cluster that is experiencing a split-brain, " + f"and continue putting the data associated with that schema " + f"might result in data loss. It might be possible to replicate " + f"the schema after some time, when the cluster is healed." + ) + + delayed_future: asyncio.Future = asyncio.get_running_loop().create_future() + self._reactor.add_timer( + self._invocation_retry_pause, + lambda: delayed_future.set_result(None), + ) + + def retry(_): + return self._replicate_schema( + schema, request.copy(), remaining_retries - 1, callback + ) + + return delayed_future.add_done_callback(retry) + + fut = self._send_schema_replication_request(request) + fut.add_done_callback(continuation) + return fut + + def _send_schema_replication_request(self, request: "OutboundMessage") -> asyncio.Future: + invocation = Invocation(request, response_handler=client_send_schema_codec.decode_response) + self._invocation_service.invoke(invocation) + return invocation.future + + async def send_all_schemas(self) -> None: + schemas = self._compact_serializer.get_schemas() + if not schemas: + _logger.debug("There is no schema to send to the cluster.") + return None + + _logger.debug("Sending the following schemas to the cluster: %s", schemas) + request = client_send_all_schemas_codec.encode_request(schemas) + invocation = Invocation(request, urgent=True) + self._invocation_service.invoke(invocation) + return await invocation.future + + def register_fetched_schema(self, schema_id: int, schema: typing.Optional["Schema"]) -> None: + if not schema: + raise HazelcastSerializationError( + f"The schema with the id {schema_id} can not be found in the cluster." + ) + + self._compact_serializer.register_schema_to_id(schema) + + def has_replicated_schemas(self): + """ + Returns ``True`` is the client has replicated + any Compact schemas to the cluster. + """ + return self._has_replicated_schemas diff --git a/hazelcast/internal/asyncio_connection.py b/hazelcast/internal/asyncio_connection.py new file mode 100644 index 0000000000..47b4db05d0 --- /dev/null +++ b/hazelcast/internal/asyncio_connection.py @@ -0,0 +1,1051 @@ +import asyncio +import io +import logging +import random +import struct +import threading +import time +import uuid +from typing import override, Coroutine + +from hazelcast import __version__ +from hazelcast.config import ReconnectMode +from hazelcast.core import ( + AddressHelper, + CLIENT_TYPE, + SERIALIZATION_VERSION, + EndpointQualifier, + ProtocolType, +) +from hazelcast.errors import ( + AuthenticationError, + TargetDisconnectedError, + HazelcastClientNotActiveError, + InvalidConfigurationError, + ClientNotAllowedInClusterError, + IllegalStateError, + ClientOfflineError, +) +from hazelcast.future import ImmediateFuture, ImmediateExceptionFuture +from hazelcast.internal.asyncio_invocation import Invocation +from hazelcast.lifecycle import LifecycleState +from hazelcast.protocol.client_message import ( + SIZE_OF_FRAME_LENGTH_AND_FLAGS, + Frame, + InboundMessage, + ClientMessageBuilder, +) +from hazelcast.protocol.codec import ( + client_authentication_codec, + client_authentication_custom_codec, + client_ping_codec, +) +from hazelcast.util import ( + AtomicInteger, + calculate_version, + UNKNOWN_VERSION, + member_of_larger_same_version_group, +) + +_logger = logging.getLogger(__name__) + +_INF = float("inf") +_SQL_CONNECTION_RANDOM_ATTEMPTS = 10 +_CLIENT_PUBLIC_ENDPOINT_QUALIFIER = EndpointQualifier(ProtocolType.CLIENT, "public") + + +class WaitStrategy: + def __init__(self, initial_backoff, max_backoff, multiplier, cluster_connect_timeout, jitter): + self._initial_backoff = initial_backoff + self._max_backoff = max_backoff + self._multiplier = multiplier + self._cluster_connect_timeout = cluster_connect_timeout + self._jitter = jitter + self._attempt = None + self._cluster_connect_attempt_begin = None + self._current_backoff = None + + if cluster_connect_timeout == _INF: + self._cluster_connect_timeout_text = "INFINITE" + else: + self._cluster_connect_timeout_text = "%.2fs" % self._cluster_connect_timeout + + def reset(self): + self._attempt = 0 + self._cluster_connect_attempt_begin = time.time() + self._current_backoff = min(self._max_backoff, self._initial_backoff) + + def sleep(self): + self._attempt += 1 + time_passed = time.time() - self._cluster_connect_attempt_begin + if time_passed > self._cluster_connect_timeout: + _logger.warning( + "Unable to get live cluster connection, cluster connect timeout (%s) is reached. " + "Attempt %d.", + self._cluster_connect_timeout_text, + self._attempt, + ) + return False + + # random between (-jitter * current_backoff, jitter * current_backoff) + sleep_time = self._current_backoff + self._current_backoff * self._jitter * ( + 2 * random.random() - 1 + ) + sleep_time = min(sleep_time, self._cluster_connect_timeout - time_passed) + _logger.warning( + "Unable to get live cluster connection, retry in %.2fs, attempt: %d, " + "cluster connect timeout: %s, max backoff: %.2fs", + sleep_time, + self._attempt, + self._cluster_connect_timeout_text, + self._max_backoff, + ) + time.sleep(sleep_time) + self._current_backoff = min(self._current_backoff * self._multiplier, self._max_backoff) + return True + + +class AuthenticationStatus: + AUTHENTICATED = 0 + CREDENTIALS_FAILED = 1 + SERIALIZATION_VERSION_MISMATCH = 2 + NOT_ALLOWED_IN_CLUSTER = 3 + + +class ClientState: + INITIAL = 0 + """ + Clients start with this state. + Once a client connects to a cluster, it directly switches to + `INITIALIZED_ON_CLUSTER` instead of `CONNECTED_TO_CLUSTER` because on + startup a client has no local state to send to the cluster. + """ + + CONNECTED_TO_CLUSTER = 1 + """ + When a client switches to a new cluster, it moves to this state. It means + that the client has connected to a new cluster but not sent its local + state to the new cluster yet. + """ + + INITIALIZED_ON_CLUSTER = 2 + """ + When a client sends its local state to the cluster it has connected, it + switches to this state. + Invocations are allowed in this state. + """ + + +class ConnectionManager: + """ConnectionManager is responsible for managing ``Connection`` objects.""" + + def __init__( + self, + client, + config, + reactor, + address_provider, + lifecycle_service, + partition_service, + cluster_service, + invocation_service, + near_cache_manager, + send_state_to_cluster_fn, + ): + self.live = False + self.active_connections = {} # uuid to connection, must be modified under the _lock + self.client_uuid = uuid.uuid4() + + self._client = client + self._config = config + self._reactor = reactor + self._address_provider = address_provider + self._lifecycle_service = lifecycle_service + self._partition_service = partition_service + self._cluster_service = cluster_service + self._invocation_service = invocation_service + self._near_cache_manager = near_cache_manager + self._send_state_to_cluster_fn = send_state_to_cluster_fn + self._client_state = ClientState.INITIAL # must be modified under the _lock + self._established_initial_cluster_connection = False # must be modified under the _lock + self._smart_routing_enabled = config.smart_routing + self._wait_strategy = self._init_wait_strategy(config) + self._reconnect_mode = config.reconnect_mode + self._heartbeat_manager = HeartbeatManager( + self, self._client, config, reactor, invocation_service + ) + self._connection_listeners = [] + self._connect_all_members_timer = None + self._async_start = config.async_start + self._connect_to_cluster_thread_running = False + self._shuffle_member_list = config.shuffle_member_list + self._lock = asyncio.Lock() + self._connection_id_generator = AtomicInteger() + self._labels = frozenset(config.labels) + self._cluster_id = None + self._load_balancer = None + self._use_public_ip = ( + isinstance(address_provider, DefaultAddressProvider) and config.use_public_ip + ) + + def add_listener(self, on_connection_opened=None, on_connection_closed=None): + """Registers a ConnectionListener. + + If the same listener is registered multiple times, it will be notified multiple times. + + Args: + on_connection_opened (function): Function to be called when a connection is opened. (Default value = None) + on_connection_closed (function): Function to be called when a connection is removed. (Default value = None) + """ + self._connection_listeners.append((on_connection_opened, on_connection_closed)) + + def get_connection(self, member_uuid): + return self.active_connections.get(member_uuid, None) + + def get_random_connection(self): + # Try getting the connection from the load balancer, if smart routing is enabled + if self._smart_routing_enabled: + member = self._load_balancer.next() + if member: + connection = self.get_connection(member.uuid) + if connection: + return connection + + # Otherwise iterate over connections and return the first one + for connection in list(self.active_connections.values()): + return connection + + # Failed to get a connection + return None + + def get_random_connection_for_sql(self): + """Returns a random connection for SQL. + + The connection is tried to be selected in the following order. + + - Random connection to a data member from the larger same-version + group. + - Random connection to a data member. + - Any random connection + - ``None``, if there is no connection. + + Returns: + Connection: A random connection for SQL. + """ + if self._smart_routing_enabled: + # There might be a race - the chosen member might be just connected or disconnected. + # Try a couple of times, the member_of_larger_same_version_group returns a random + # connection, we might be lucky... + for _ in range(_SQL_CONNECTION_RANDOM_ATTEMPTS): + members = self._cluster_service.get_members() + member = member_of_larger_same_version_group(members) + if not member: + break + + connection = self.get_connection(member.uuid) + if connection: + return connection + + # Otherwise iterate over connections and return the first one + # that's not to a lite member. + first_connection = None + for member_uuid, connection in list(self.active_connections.items()): + if not first_connection: + first_connection = connection + + member = self._cluster_service.get_member(member_uuid) + if not member or member.lite_member: + continue + + return connection + + # Failed to get a connection to a data member. + return first_connection + + async def start(self, load_balancer): + if self.live: + return + + self.live = True + self._load_balancer = load_balancer + self._heartbeat_manager.start() + await self._connect_to_cluster() + + async def shutdown(self): + if not self.live: + return + + self.live = False + if self._connect_all_members_timer: + self._connect_all_members_timer.cancel() + + self._heartbeat_manager.shutdown() + + # Need to create copy of connection values to avoid modification errors on runtime + async with asyncio.TaskGroup() as tg: + for connection in list(self.active_connections.values()): + tg.create_task(connection.close_connection("Hazelcast client is shutting down", None)) + + self.active_connections.clear() + del self._connection_listeners[:] + + async def connect_to_all_cluster_members(self, sync_start): + if not self._smart_routing_enabled: + return + + if sync_start: + async with asyncio.TaskGroup() as tg: + for member in self._cluster_service.get_members(): + tg.create_task(self._get_or_connect_to_member(member)) + + self._start_connect_all_members_timer() + + async def on_connection_close(self, closed_connection): + remote_uuid = closed_connection.remote_uuid + remote_address = closed_connection.remote_address + + if not remote_address: + _logger.debug( + "Destroying %s, but it has no remote address, hence nothing is " + "removed from the connection dictionary", + closed_connection, + ) + return + + disconnected = False + removed = False + trigger_reconnection = False + async with self._lock: + connection = self.active_connections.get(remote_uuid, None) + if connection == closed_connection: + self.active_connections.pop(remote_uuid, None) + removed = True + _logger.info( + "Removed connection to %s:%s, connection: %s", + remote_address, + remote_uuid, + connection, + ) + + if not self.active_connections: + trigger_reconnection = True + if self._client_state == ClientState.INITIALIZED_ON_CLUSTER: + disconnected = True + + if disconnected: + self._lifecycle_service.fire_lifecycle_event(LifecycleState.DISCONNECTED) + + if trigger_reconnection: + await self._trigger_cluster_reconnection() + + if removed: + async with asyncio.TaskGroup() as tg: + # TODO: see on_connection_open + for _, on_connection_closed in self._connection_listeners: + if on_connection_closed: + try: + maybe_coro = on_connection_closed(closed_connection) + if isinstance(maybe_coro, Coroutine): + tg.create_task(maybe_coro) + except Exception: + _logger.exception("Exception in connection listener") + else: + _logger.debug( + "Destroying %s, but there is no mapping for %s in the connection dictionary", + closed_connection, + remote_uuid, + ) + + def check_invocation_allowed(self): + state = self._client_state + if state == ClientState.INITIALIZED_ON_CLUSTER and self.active_connections: + return + + if state == ClientState.INITIAL: + if self._async_start: + raise ClientOfflineError() + else: + raise IOError("No connection found to cluster since the client is starting.") + elif self._reconnect_mode == ReconnectMode.ASYNC: + raise ClientOfflineError() + else: + raise IOError("No connection found to cluster") + + def initialized_on_cluster(self) -> bool: + """ + Returns ``True`` if the client is initialized on the cluster, by + sending its local state, if necessary. + """ + return self._client_state == ClientState.INITIALIZED_ON_CLUSTER + + async def _get_or_connect_to_address(self, address): + for connection in list(self.active_connections.values()): + if connection.remote_address == address: + return connection + translated = self._translate(address) + connection = await self._create_connection(translated) + response = await self._authenticate(connection) + await self._on_auth(response, connection) + return connection + + async def _get_or_connect_to_member(self, member): + connection = self.active_connections.get(member.uuid, None) + if connection: + return connection + + translated = self._translate_member_address(member) + connection = await self._create_connection(translated) + response = await self._authenticate(connection) #.continue_with(self._on_auth, connection) + await self._on_auth(response, connection) + return connection + + async def _create_connection(self, address): + factory = self._reactor.connection_factory + return await factory( + self, + self._connection_id_generator.get_and_increment(), + address, + self._config, + self._invocation_service.handle_client_message, + ) + + def _translate(self, address): + translated = self._address_provider.translate(address) + if not translated: + raise ValueError( + "Address provider %s could not translate address %s" + % (self._address_provider.__class__.__name__, address) + ) + + return translated + + def _translate_member_address(self, member): + if self._use_public_ip: + public_address = member.address_map.get(_CLIENT_PUBLIC_ENDPOINT_QUALIFIER, None) + if public_address: + return public_address + + return member.address + + return self._translate(member.address) + + async def _trigger_cluster_reconnection(self): + if self._reconnect_mode == ReconnectMode.OFF: + _logger.info("Reconnect mode is OFF. Shutting down the client") + await self._shutdown_client() + return + + if self._lifecycle_service.running: + await self._start_connect_to_cluster_thread() + + def _init_wait_strategy(self, config): + cluster_connect_timeout = config.cluster_connect_timeout + if cluster_connect_timeout == -1: + # If the no timeout is specified by the + # user, or set to -1 explicitly, set + # the timeout to infinite. + cluster_connect_timeout = _INF + + return WaitStrategy( + config.retry_initial_backoff, + config.retry_max_backoff, + config.retry_multiplier, + cluster_connect_timeout, + config.retry_jitter, + ) + + def _start_connect_all_members_timer(self): + connecting_uuids = set() + + async def run(): + if not self._lifecycle_service.running: + return + + async with asyncio.TaskGroup() as tg: + member_uuids = [] + for member in self._cluster_service.get_members(): + member_uuid = member.uuid + if self.active_connections.get(member_uuid, None): + continue + if member_uuid in connecting_uuids: + continue + connecting_uuids.add(member_uuid) + if not self._lifecycle_service.running: + break + # TODO: ERROR:asyncio:Task was destroyed but it is pending! + tg.create_task(self._get_or_connect_to_member(member)) + member_uuids.append(member_uuid) + + for item in member_uuids: + connecting_uuids.discard(item) + + self._connect_all_members_timer = self._reactor.add_timer(1, lambda: asyncio.create_task(run())) + + self._connect_all_members_timer = self._reactor.add_timer(1, lambda: asyncio.create_task(run())) + + async def _connect_to_cluster(self): + await self._sync_connect_to_cluster() + + async def _start_connect_to_cluster_thread(self): + async with self._lock: + if self._connect_to_cluster_thread_running: + return + + self._connect_to_cluster_thread_running = True + + try: + while True: + await self._sync_connect_to_cluster() + async with self._lock: + if self.active_connections: + self._connect_to_cluster_thread_running = False + return + except Exception: + _logger.exception("Could not connect to any cluster, shutting down the client") + await self._shutdown_client() + + async def _shutdown_client(self): + try: + await self._client.shutdown() + except Exception: + _logger.exception("Exception during client shutdown") + + async def _sync_connect_to_cluster(self): + tried_addresses = set() + self._wait_strategy.reset() + try: + while True: + tried_addresses_per_attempt = set() + members = self._cluster_service.get_members() + if self._shuffle_member_list: + random.shuffle(members) + + for member in members: + self._check_client_active() + tried_addresses_per_attempt.add(member.address) + connection = await self._connect(member, self._get_or_connect_to_member) + if connection: + return + + for address in self._get_possible_addresses(): + self._check_client_active() + if address in tried_addresses_per_attempt: + # We already tried this address on from the member list + continue + + tried_addresses_per_attempt.add(address) + connection = await self._connect(address, self._get_or_connect_to_address) + if connection: + return + + tried_addresses.update(tried_addresses_per_attempt) + + # If the address providers load no addresses (which seems to be possible), + # then the above loop is not entered and the lifecycle check is missing, + # hence we need to repeat the same check at this point. + if not tried_addresses_per_attempt: + self._check_client_active() + + if not self._wait_strategy.sleep(): + break + except (ClientNotAllowedInClusterError, InvalidConfigurationError): + cluster_name = self._config.cluster_name + _logger.exception("Stopped trying on cluster %s", cluster_name) + + cluster_name = self._config.cluster_name + _logger.info( + "Unable to connect to any address from the cluster with name: %s. " + "The following addresses were tried: %s", + cluster_name, + tried_addresses, + ) + if self._lifecycle_service.running: + msg = "Unable to connect to any cluster" + else: + msg = "Client is being shutdown" + raise IllegalStateError(msg) + + async def _connect(self, target, get_or_connect_func): + _logger.info("Trying to connect to %s", target) + try: + return await get_or_connect_func(target) + except (ClientNotAllowedInClusterError, InvalidConfigurationError) as e: + _logger.warning("Error during initial connection to %s", target, exc_info=True) + raise e + except Exception: + _logger.warning("Error during initial connection to %s", target, exc_info=True) + return None + + def _authenticate(self, connection) -> asyncio.Future: + client = self._client + cluster_name = self._config.cluster_name + client_name = client.name + if self._config.token_provider: + token = self._config.token_provider.token(connection.connected_address) + request = client_authentication_custom_codec.encode_request( + cluster_name, + token, + self.client_uuid, + CLIENT_TYPE, + SERIALIZATION_VERSION, + __version__, + client_name, + self._labels, + ) + else: + request = client_authentication_codec.encode_request( + cluster_name, + self._config.creds_username, + self._config.creds_password, + self.client_uuid, + CLIENT_TYPE, + SERIALIZATION_VERSION, + __version__, + client_name, + self._labels, + ) + invocation = Invocation( + request, connection=connection, urgent=True, response_handler=lambda m: m + ) + self._invocation_service.invoke(invocation) + return invocation.future + + async def _on_auth(self, response, connection): + try: + response = client_authentication_codec.decode_response(response) + except Exception as e: + await connection.close_connection("Failed to authenticate connection", e) + raise e + + status = response["status"] + if status == AuthenticationStatus.AUTHENTICATED: + return await self._handle_successful_auth(response, connection) + + if status == AuthenticationStatus.CREDENTIALS_FAILED: + err = AuthenticationError("Authentication failed. Check cluster name and credentials.") + elif status == AuthenticationStatus.NOT_ALLOWED_IN_CLUSTER: + err = ClientNotAllowedInClusterError("Client is not allowed in the cluster") + elif status == AuthenticationStatus.SERIALIZATION_VERSION_MISMATCH: + err = IllegalStateError("Server serialization version does not match to client") + else: + err = AuthenticationError( + "Authentication status code not supported. status: %s" % status + ) + + await connection.close_connection("Failed to authenticate connection", err) + raise err + + async def _handle_successful_auth(self, response, connection): + async with self._lock: + self._check_partition_count(response["partition_count"]) + + server_version_str = response["server_hazelcast_version"] + remote_address = response["address"] + remote_uuid = response["member_uuid"] + + connection.remote_address = remote_address + connection.server_version = calculate_version(server_version_str) + connection.remote_uuid = remote_uuid + + existing = self.active_connections.get(remote_uuid, None) + if existing: + await connection.close_connection( + "Duplicate connection to same member with UUID: %s" % remote_uuid, None + ) + return existing + + new_cluster_id = response["cluster_id"] + changed_cluster = self._cluster_id is not None and self._cluster_id != new_cluster_id + if changed_cluster: + await self._check_client_state_on_cluster_change(connection) + _logger.warning( + "Switching from current cluster: %s to new cluster: %s", + self._cluster_id, + new_cluster_id, + ) + self._on_cluster_restart() + + is_initial_connection = not self.active_connections + self.active_connections[remote_uuid] = connection + fire_connected_lifecycle_event = False + if is_initial_connection: + self._cluster_id = new_cluster_id + # In split brain, the client might connect to the one half + # of the cluster, and then later might reconnect to the + # other half, after the half it was connected to is + # completely dead. Since the cluster id is preserved in + # split brain scenarios, it is impossible to distinguish + # reconnection to the same cluster vs reconnection to the + # other half of the split brain. However, in the latter, + # we might need to send some state to the other half of + # the split brain (like Compact schemas). That forces us + # to send the client state to the cluster after the first + # cluster connection, regardless the cluster id is + # changed or not. + if self._established_initial_cluster_connection: + self._client_state = ClientState.CONNECTED_TO_CLUSTER + await self._initialize_on_cluster(new_cluster_id) + else: + fire_connected_lifecycle_event = True + self._established_initial_cluster_connection = True + self._client_state = ClientState.INITIALIZED_ON_CLUSTER + + if fire_connected_lifecycle_event: + self._lifecycle_service.fire_lifecycle_event(LifecycleState.CONNECTED) + + _logger.info( + "Authenticated with server %s:%s, server version: %s, local address: %s", + remote_address, + remote_uuid, + server_version_str, + connection.local_address, + ) + + async with asyncio.TaskGroup() as tg: + for on_connection_opened, _ in self._connection_listeners: + if on_connection_opened: + try: + # TODO: creating the task may not throw the exception + # TODO: protect the loop against exceptions, so all handlers run + maybe_coro = on_connection_opened(connection) + if isinstance(maybe_coro, Coroutine): + tg.create_task(maybe_coro) + except Exception: + _logger.exception("Exception in connection listener") + + if not connection.live: + await self.on_connection_close(connection) + + return connection + + async def _initialize_on_cluster(self, cluster_id) -> None: + # This method is only called in the reactor thread + if cluster_id != self._cluster_id: + _logger.warning( + f"Client won't send the state to the cluster: {cluster_id}" + f"because it switched to a new cluster: {self._cluster_id}" + ) + return + + async def callback(): + try: + if cluster_id == self._cluster_id: + _logger.debug("The client state is sent to the cluster %s", cluster_id) + self._client_state = ClientState.INITIALIZED_ON_CLUSTER + self._lifecycle_service.fire_lifecycle_event(LifecycleState.CONNECTED) + elif _logger.isEnabledFor(logging.DEBUG): + _logger.warning( + "Cannot set client state to 'INITIALIZED_ON_CLUSTER'" + f"because current cluster id: {self._cluster_id}" + f"is different than the expected cluster id: {cluster_id}" + ) + except Exception: + await retry_on_error() + + async def retry_on_error(): + _logger.exception(f"Failure during sending client state to the cluster {cluster_id}") + + if cluster_id != self._cluster_id: + return + + if _logger.isEnabledFor(logging.DEBUG): + _logger.warning(f"Retrying sending client state to the cluster: {cluster_id}") + + await self._initialize_on_cluster(cluster_id) + + try: + await self._send_state_to_cluster_fn() + await callback() + except Exception: + await retry_on_error() + + async def _check_client_state_on_cluster_change(self, connection): + if self.active_connections: + # If there are other connections, we must be connected to the wrong cluster. + # We should not stay connected to this new connection. + # Note that, in some racy scenarios, we might close a connection that + # we can operate on. In those scenarios, we rely on the fact that we will + # reopen the connections. + reason = "Connection does not belong to the cluster %s" % self._cluster_id + await connection.close_connection(reason, None) + raise ValueError(reason) + + def _on_cluster_restart(self): + self._near_cache_manager.clear_near_caches() + self._cluster_service.clear_member_list() + + def _check_partition_count(self, partition_count): + if not self._partition_service.check_and_set_partition_count(partition_count): + raise ClientNotAllowedInClusterError( + "Client can not work with this cluster because it has a " + "different partition count. Expected partition count: %d, " + "Member partition count: %d" + % (self._partition_service.partition_count, partition_count) + ) + + def _check_client_active(self): + if not self._lifecycle_service.running: + raise HazelcastClientNotActiveError() + + def _get_possible_addresses(self): + primaries, secondaries = self._address_provider.load_addresses() + if self._shuffle_member_list: + # The relative order between primary and secondary addresses should + # not be changed. So we shuffle the lists separately and then add + # them to the final list so that secondary addresses are not tried + # before all primary addresses have been tried. Otherwise we can get + # startup delays + random.shuffle(primaries) + random.shuffle(secondaries) + + addresses = [] + addresses.extend(primaries) + addresses.extend(secondaries) + return addresses + + +class HeartbeatManager: + + def __init__(self, connection_manager, client, config, reactor, invocation_service): + self._connection_manager = connection_manager + self._client = client + self._reactor = reactor + self._invocation_service = invocation_service + self._heartbeat_timeout = config.heartbeat_timeout + self._heartbeat_interval = config.heartbeat_interval + self._heartbeat_timer = None + + def start(self): + """Starts sending periodic HeartBeat operations.""" + + async def _heartbeat(): + conn_manager = self._connection_manager + if not conn_manager.live: + return + + now = time.time() + async with asyncio.TaskGroup() as tg: + for connection in list(conn_manager.active_connections.values()): + tg.create_task(self._check_connection(now, connection)) + self._heartbeat_timer = self._reactor.add_timer(self._heartbeat_interval, lambda: asyncio.create_task(_heartbeat())) + + self._heartbeat_timer = self._reactor.add_timer(self._heartbeat_interval, lambda: asyncio.create_task(_heartbeat())) + + def shutdown(self): + """Stops HeartBeat operations.""" + if self._heartbeat_timer: + self._heartbeat_timer.cancel() + + async def _check_connection(self, now, connection): + if not connection.live: + return + + if (now - connection.last_read_time) > self._heartbeat_timeout: + _logger.warning("Heartbeat failed over the connection: %s", connection) + await connection.close_connection( + "Heartbeat timed out", + TargetDisconnectedError("Heartbeat timed out to connection %s" % connection), + ) + return + + if (now - connection.last_write_time) > self._heartbeat_interval: + request = client_ping_codec.encode_request() + invocation = Invocation(request, connection=connection, urgent=True) + self._invocation_service.invoke(invocation) + + +_frame_header = struct.Struct(" asyncio.Future: + future.add_done_callback(callback) diff --git a/hazelcast/internal/asyncio_invocation.py b/hazelcast/internal/asyncio_invocation.py new file mode 100644 index 0000000000..591740faa0 --- /dev/null +++ b/hazelcast/internal/asyncio_invocation.py @@ -0,0 +1,445 @@ +import asyncio +import logging +import time +import functools + +from hazelcast.errors import ( + create_error_from_message, + HazelcastInstanceNotActiveError, + is_retryable_error, + TargetDisconnectedError, + HazelcastClientNotActiveError, + TargetNotMemberError, + EXCEPTION_MESSAGE_TYPE, + IndeterminateOperationStateError, + OperationTimeoutError, + InvocationMightContainCompactDataError, +) +from hazelcast.protocol.client_message import InboundMessage +from hazelcast.protocol.codec import client_local_backup_listener_codec +from hazelcast.util import AtomicInteger +from hazelcast.serialization.compact import SchemaNotFoundError + +_logger = logging.getLogger(__name__) + + +def _no_op_response_handler(_): + pass + + +class Invocation: + __slots__ = ( + "request", + "timeout", + "partition_id", + "uuid", + "connection", + "event_handler", + "future", + "sent_connection", + "urgent", + "response_handler", + "backup_acks_received", + "backup_acks_expected", + "pending_response", + "pending_response_received_time", + ) + + def __init__( + self, + request, + partition_id=-1, + uuid=None, + connection=None, + event_handler=None, + urgent=False, + timeout=None, + response_handler=_no_op_response_handler, + ): + self.request = request + self.partition_id = partition_id + self.uuid = uuid + self.connection = connection + self.event_handler = event_handler + self.urgent = urgent + self.timeout = timeout + self.future = asyncio.Future() + self.sent_connection = None + self.response_handler = response_handler + self.backup_acks_received = 0 + self.backup_acks_expected = -1 + self.pending_response = None + self.pending_response_received_time = -1 + + +class InvocationService: + _CLEAN_RESOURCES_PERIOD = 0.1 + + def __init__(self, client, config, reactor): + smart_routing = config.smart_routing + if smart_routing: + self._do_invoke = self._invoke_smart + else: + self._do_invoke = self._invoke_non_smart + + self._client = client + self._reactor = reactor + self._partition_service = None + self._connection_manager = None + self._listener_service = None + self._check_invocation_allowed_fn = None + self._pending = {} + self._next_correlation_id = AtomicInteger(1) + self._is_redo_operation = config.redo_operation + self._invocation_timeout = config.invocation_timeout + self._invocation_retry_pause = config.invocation_retry_pause + self._backup_ack_to_client_enabled = smart_routing and config.backup_ack_to_client_enabled + self._fail_on_indeterminate_state = config.fail_on_indeterminate_operation_state + self._backup_timeout = config.operation_backup_timeout + self._clean_resources_timer = None + self._shutdown = False + self._compact_schema_service = None + + def init(self, partition_service, connection_manager, listener_service, compact_schema_service): + self._partition_service = partition_service + self._connection_manager = connection_manager + self._listener_service = listener_service + self._check_invocation_allowed_fn = connection_manager.check_invocation_allowed + self._compact_schema_service = compact_schema_service + + def start(self): + self._start_clean_resources_timer() + + async def add_backup_listener(self): + if self._backup_ack_to_client_enabled: + await self._register_backup_listener() + + def handle_client_message(self, message): + correlation_id = message.get_correlation_id() + + start_frame = message.start_frame + if start_frame.has_event_flag() or start_frame.has_backup_event_flag(): + self._listener_service.handle_client_message(message, correlation_id) + return + + invocation = self._pending.get(correlation_id, None) + if not invocation: + _logger.warning("Got message with unknown correlation id: %s", message) + return + + if message.get_message_type() == EXCEPTION_MESSAGE_TYPE: + error = create_error_from_message(message) + return self._notify_error(invocation, error) + + self._notify(invocation, message) + + def invoke(self, invocation) -> None: + if not invocation.timeout: + invocation.timeout = self._invocation_timeout + time.time() + + correlation_id = self._next_correlation_id.get_and_increment() + request = invocation.request + request.set_correlation_id(correlation_id) + request.set_partition_id(invocation.partition_id) + self._do_invoke(invocation) + + async def ainvoke(self, invocation: Invocation): + self.invoke(invocation) + return await invocation.future + + def shutdown(self): + if self._shutdown: + return + + self._shutdown = True + if self._clean_resources_timer: + self._clean_resources_timer.cancel() + for invocation in list(self._pending.values()): + self._notify_error(invocation, HazelcastClientNotActiveError()) + + def _invoke_on_partition_owner(self, invocation, partition_id): + owner_uuid = self._partition_service.get_partition_owner(partition_id) + if not owner_uuid: + _logger.debug("Partition owner is not assigned yet") + return False + return self._invoke_on_target(invocation, owner_uuid) + + def _invoke_on_target(self, invocation, owner_uuid): + connection = self._connection_manager.get_connection(owner_uuid) + if not connection: + _logger.debug("Client is not connected to target: %s", owner_uuid) + return False + return self._send(invocation, connection) + + def _invoke_on_random_connection(self, invocation): + connection = self._connection_manager.get_random_connection() + if not connection: + _logger.debug("No connection found to invoke") + return False + return self._send(invocation, connection) + + def _invoke_smart(self, invocation): + try: + if invocation.urgent: + self._check_urgent_invocation_allowed(invocation) + else: + self._check_invocation_allowed_fn() + + connection = invocation.connection + if connection: + invoked = self._send(invocation, connection) + if not invoked: + self._notify_error( + invocation, IOError("Could not invoke on connection %s" % connection) + ) + return + + if invocation.partition_id != -1: + invoked = self._invoke_on_partition_owner(invocation, invocation.partition_id) + elif invocation.uuid: + invoked = self._invoke_on_target(invocation, invocation.uuid) + else: + invoked = self._invoke_on_random_connection(invocation) + + if not invoked: + invoked = self._invoke_on_random_connection(invocation) + + if not invoked: + self._notify_error(invocation, IOError("No connection found to invoke")) + except Exception as e: + self._notify_error(invocation, e) + + def _invoke_non_smart(self, invocation): + try: + if invocation.urgent: + self._check_urgent_invocation_allowed(invocation) + else: + self._check_invocation_allowed_fn() + + connection = invocation.connection + if connection: + invoked = self._send(invocation, connection) + if not invoked: + self._notify_error( + invocation, IOError("Could not invoke on connection %s" % connection) + ) + return + + if not self._invoke_on_random_connection(invocation): + self._notify_error(invocation, IOError("No connection found to invoke")) + except Exception as e: + self._notify_error(invocation, e) + + def _send(self, invocation, connection): + if self._shutdown: + raise HazelcastClientNotActiveError() + + if self._backup_ack_to_client_enabled: + invocation.request.set_backup_aware_flag() + + message = invocation.request + correlation_id = message.get_correlation_id() + self._pending[correlation_id] = invocation + + if invocation.event_handler: + self._listener_service.add_event_handler(correlation_id, invocation.event_handler) + + if not connection.send_message(message): + if invocation.event_handler: + self._listener_service.remove_event_handler(correlation_id) + return False + + invocation.sent_connection = connection + return True + + def _complete(self, invocation: Invocation, client_message: InboundMessage) -> None: + try: + result = invocation.response_handler(client_message) + invocation.future.set_result(result) + except SchemaNotFoundError as e: + self._fetch_schema_and_complete_again(e, invocation, client_message) + return + except Exception as e: + invocation.future.set_exception(e) + + correlation_id = invocation.request.get_correlation_id() + self._pending.pop(correlation_id, None) + + def _complete_with_error(self, invocation, error): + invocation.future.set_exception(error) + correlation_id = invocation.request.get_correlation_id() + self._pending.pop(correlation_id, None) + + def _fetch_schema_and_complete_again( + self, error: SchemaNotFoundError, invocation: Invocation, message: InboundMessage + ) -> None: + schema_id = error.schema_id + + def callback(future): + try: + schema = future.result() + self._compact_schema_service.register_fetched_schema(schema_id, schema) + except Exception as e: + self._complete_with_error(invocation, e) + return + + message.reset_next_frame() + self._complete(invocation, message) + + fetch_schema_future = self._compact_schema_service.fetch_schema(schema_id) + fetch_schema_future.add_done_callback(callback) + + def _notify_error(self, invocation, error): + _logger.debug("Got exception for request %s, error: %s", invocation.request, error) + + if not self._client.lifecycle_service.is_running(): + self._complete_with_error(invocation, HazelcastClientNotActiveError()) + return + + if not self._should_retry(invocation, error): + self._complete_with_error(invocation, error) + return + + if invocation.timeout < time.time(): + _logger.debug("Error will not be retried because invocation timed out: %s", error) + error = OperationTimeoutError( + "Request timed out because an error occurred " + "after invocation timeout: %s" % error + ) + self._complete_with_error(invocation, error) + return + + invocation.sent_connection = None + invoke_func = functools.partial(self._retry_if_not_done, invocation) + self._reactor.add_timer(self._invocation_retry_pause, invoke_func) + + def _retry_if_not_done(self, invocation): + if not invocation.future.done(): + self._do_invoke(invocation) + + def _should_retry(self, invocation, error): + if isinstance(error, InvocationMightContainCompactDataError): + return True + + if invocation.connection and isinstance(error, (IOError, TargetDisconnectedError)): + return False + + if invocation.uuid and isinstance(error, TargetNotMemberError): + return False + + if isinstance(error, (IOError, HazelcastInstanceNotActiveError)) or is_retryable_error( + error + ): + return True + + if isinstance(error, TargetDisconnectedError): + return invocation.request.retryable or self._is_redo_operation + + return False + + def _check_urgent_invocation_allowed(self, invocation: Invocation): + if self._connection_manager.initialized_on_cluster(): + # If the client is initialized on the cluster, that means we + # have sent all the schemas to the cluster, even if we are + # reconnected to it + return + + if not self._compact_schema_service.has_replicated_schemas(): + # If there were no Compact schemas to begin with, we don't need + # to perform the check below. If the client didn't send a Compact + # schema up until this point, the retries or listener registrations + # could not send a schema, because if they were, we wouldn't hit + # this line. + return + + # We are not yet initialized on cluster, so the Compact schemas might + # not be sent yet. This message contains some serialized data, + # and it is possible that it can also contain Compact serialized data. + # In that case, allowing this invocation to go through now could + # violate the invariant that the schema must come to cluster before + # the data. We will retry this invocation and wait until the client + # is initialized on the cluster, which means schemas are replicated + # in the cluster. + if invocation.request.contains_data: + raise InvocationMightContainCompactDataError() + + async def _register_backup_listener(self): + codec = client_local_backup_listener_codec + request = codec.encode_request() + await self._listener_service.register_listener( + request, + codec.decode_response, + lambda reg_id: None, + lambda m: codec.handle(m, self._backup_event_handler), + ) + + def _backup_event_handler(self, correlation_id): + invocation = self._pending.get(correlation_id, None) + if not invocation: + _logger.debug("Invocation not found for backup event, invocation id %s", correlation_id) + return + self._notify_backup_complete(invocation) + + def _notify(self, invocation, client_message): + expected_backups = client_message.get_number_of_backup_acks() + if expected_backups > invocation.backup_acks_received: + invocation.pending_response_received_time = time.time() + invocation.backup_acks_expected = expected_backups + invocation.pending_response = client_message + return + + self._complete(invocation, client_message) + + def _notify_backup_complete(self, invocation): + invocation.backup_acks_received += 1 + if not invocation.pending_response: + return + + if invocation.backup_acks_expected != invocation.backup_acks_received: + return + + self._complete(invocation, invocation.pending_response) + + def _start_clean_resources_timer(self): + def run(): + if self._shutdown: + return + + now = time.time() + for invocation in list(self._pending.values()): + connection = invocation.sent_connection + if not connection: + continue + + if not connection.live: + error = TargetDisconnectedError(connection.close_reason) + self._notify_error(invocation, error) + continue + + if self._backup_ack_to_client_enabled: + self._detect_and_handle_backup_timeout(invocation, now) + + self._clean_resources_timer = self._reactor.add_timer(self._CLEAN_RESOURCES_PERIOD, run) + + self._clean_resources_timer = self._reactor.add_timer(self._CLEAN_RESOURCES_PERIOD, run) + + def _detect_and_handle_backup_timeout(self, invocation, now): + if not invocation.pending_response: + return + + if invocation.backup_acks_expected == invocation.backup_acks_received: + return + + expiration_time = invocation.pending_response_received_time + self._backup_timeout + timeout_reached = 0 < expiration_time < now + if not timeout_reached: + return + + if self._fail_on_indeterminate_state: + error = IndeterminateOperationStateError( + "Invocation failed because the backup acks are missed" + ) + self._complete_with_error(invocation, error) + return + + self._complete(invocation, invocation.pending_response) diff --git a/hazelcast/internal/asyncio_listener.py b/hazelcast/internal/asyncio_listener.py new file mode 100644 index 0000000000..5eb818a791 --- /dev/null +++ b/hazelcast/internal/asyncio_listener.py @@ -0,0 +1,295 @@ +import asyncio +import logging +import sys +import typing +from uuid import uuid4 + +from hazelcast.internal.asyncio_compact import CompactSchemaService +from hazelcast.errors import HazelcastError, HazelcastClientNotActiveError, TargetDisconnectedError +from hazelcast.internal.asyncio_invocation import Invocation +from hazelcast.internal.asyncio_reactor import AsyncioConnection +from hazelcast.protocol.client_message import InboundMessage +from hazelcast.protocol.codec import client_add_cluster_view_listener_codec +from hazelcast.serialization.compact import SchemaNotFoundError +from hazelcast.util import check_not_none + +_logger = logging.getLogger(__name__) + + +class _ListenerRegistration: + __slots__ = ( + "registration_request", + "decode_register_response", + "encode_deregister_request", + "handler", + "connection_registrations", + ) + + def __init__( + self, registration_request, decode_register_response, encode_deregister_request, handler + ): + self.registration_request = registration_request + self.decode_register_response = decode_register_response + self.encode_deregister_request = encode_deregister_request + self.handler = handler + self.connection_registrations = {} # Dict of Connection, EventRegistration + + +class _EventRegistration: + __slots__ = ("server_registration_id", "correlation_id") + + def __init__(self, server_registration_id, correlation_id): + self.server_registration_id = server_registration_id + self.correlation_id = correlation_id + + +class ListenerService: + def __init__( + self, + client, + config, + connection_manager, + invocation_service, + compact_schema_service: CompactSchemaService, + ): + self._client = client + self._connection_manager = connection_manager + self._invocation_service = invocation_service + self._compact_schema_service = compact_schema_service + self._is_smart = config.smart_routing + self._active_registrations: typing.Dict[str, _ListenerRegistration] = {} + self._registration_lock = asyncio.Lock() + self._event_handlers: typing.Dict[int, typing.Callable] = {} + + def start(self): + self._connection_manager.add_listener(self._connection_added, self._connection_removed) + + async def register_listener( + self, registration_request, decode_register_response, encode_deregister_request, handler + ): + async with self._registration_lock: + registration_id = str(uuid4()) + registration = _ListenerRegistration( + registration_request, decode_register_response, encode_deregister_request, handler + ) + self._active_registrations[registration_id] = registration + try: + async with asyncio.TaskGroup() as tg: + for connection in list(self._connection_manager.active_connections.values()): + task = self._register_on_connection(registration_id, registration, connection) + tg.create_task(task) + return registration_id + except Exception: + await self.deregister_listener(registration_id) + raise HazelcastError("Listener cannot be added") + + + async def deregister_listener(self, user_registration_id): + check_not_none(user_registration_id, "None user_registration_id is not allowed!") + async with self._registration_lock: + listener_registration = self._active_registrations.pop(user_registration_id, None) + if not listener_registration: + return False + + async def handle(inv: Invocation, conn: AsyncioConnection): + try: + await inv.future + except Exception as e: + if not isinstance( + e, (HazelcastClientNotActiveError, IOError, TargetDisconnectedError) + ): + _logger.warning( + "Deregistration of listener with ID %s has failed for address %s: %s", + user_registration_id, + conn.remote_address, + e, + ) + + async with asyncio.TaskGroup() as tg: + items = listener_registration.connection_registrations.items() + for connection, event_registration in items: + # Remove local handler + self.remove_event_handler(event_registration.correlation_id) + # The rest is for deleting the remote registration + server_registration_id = event_registration.server_registration_id + deregister_request = listener_registration.encode_deregister_request( + server_registration_id + ) + if deregister_request is None: + # None means no remote registration (e.g. for backup acks) + continue + invocation = Invocation( + deregister_request, connection=connection, timeout=sys.maxsize, urgent=True + ) + self._invocation_service.invoke(invocation) + tg.create_task(handle(invocation, connection)) + + listener_registration.connection_registrations.clear() + return True + + def handle_client_message(self, message: InboundMessage, correlation_id: int): + handler = self._event_handlers.get(correlation_id, None) + if handler: + try: + handler(message) + except SchemaNotFoundError as e: + self._fetch_schema_and_handle_again(e, handler, message) + else: + _logger.debug("Got event message with unknown correlation id: %s", message) + + def _fetch_schema_and_handle_again( + self, + error: SchemaNotFoundError, + handler: typing.Callable[[InboundMessage], None], + message: InboundMessage, + ) -> None: + schema_id = error.schema_id + + def callback(future): + try: + schema = future.result() + self._compact_schema_service.register_fetched_schema(schema_id, schema) + except Exception: + _logger.exception( + f"Failed to call event handler: {handler} with message: {message}" + ) + return + + message.reset_next_frame() + try: + handler(message) + except SchemaNotFoundError as e: + self._fetch_schema_and_handle_again(e, handler, message) + + fetch_schema_future = self._compact_schema_service.fetch_schema(schema_id) + fetch_schema_future.add_done_callback(callback) + + def add_event_handler(self, correlation_id, event_handler): + self._event_handlers[correlation_id] = event_handler + + def remove_event_handler(self, correlation_id): + self._event_handlers.pop(correlation_id, None) + + async def _register_on_connection(self, user_registration_id, listener_registration, connection): + registration_map = listener_registration.connection_registrations + + if connection in registration_map: + return + + registration_request = listener_registration.registration_request.copy() + invocation = Invocation( + registration_request, + connection=connection, + event_handler=listener_registration.handler, + response_handler=lambda m: m, + urgent=True, + ) + self._invocation_service.invoke(invocation) + + def callback(f): + try: + response = f.result() + server_registration_id = listener_registration.decode_register_response(response) + correlation_id = registration_request.get_correlation_id() + registration = _EventRegistration(server_registration_id, correlation_id) + registration_map[connection] = registration + except Exception as e: + if connection.live: + _logger.exception( + "Listener %s can not be added to a new connection: %s", + user_registration_id, + connection, + ) + raise e + + invocation.future.add_done_callback(callback) + return await invocation.future + + async def _connection_added(self, connection): + async with self._registration_lock: + async with asyncio.TaskGroup() as tg: + for user_reg_id, listener_registration in self._active_registrations.items(): + task = self._register_on_connection(user_reg_id, listener_registration, connection) + tg.create_task(task) + + async def _connection_removed(self, connection): + async with self._registration_lock: + for listener_registration in self._active_registrations.values(): + event_registration = listener_registration.connection_registrations.pop( + connection, None + ) + if event_registration: + self.remove_event_handler(event_registration.correlation_id) + + +class ClusterViewListenerService: + def __init__( + self, + client, + connection_manager, + partition_service, + cluster_service, + invocation_service, + ): + self._client = client + self._partition_service = partition_service + self._connection_manager = connection_manager + self._cluster_service = cluster_service + self._invocation_service = invocation_service + self._listener_added_connection = None + + def start(self): + self._connection_manager.add_listener(self._connection_added, self._connection_removed) + + def _connection_added(self, connection): + self._try_register(connection) + + def _connection_removed(self, connection): + self._try_register_to_random_connection(connection) + + def _try_register_to_random_connection(self, old_connection): + if self._listener_added_connection is not old_connection: + return + self._listener_added_connection = None + new_connection = self._connection_manager.get_random_connection() + if new_connection: + self._try_register(new_connection) + + def _try_register(self, connection): + if not self._connection_manager.live: + # There is no point on trying the register a backup listener + # if the client is about to shutdown. + return + + if self._listener_added_connection: + return + + self._listener_added_connection = connection + request = client_add_cluster_view_listener_codec.encode_request() + invocation = Invocation( + request, connection=connection, event_handler=self._handler(connection), urgent=True + ) + self._cluster_service.clear_member_list_version() + self._invocation_service.invoke(invocation) + + def callback(f): + try: + f.result() + except Exception: + self._try_register_to_random_connection(connection) + + invocation.future.add_done_callback(callback) + + def _handler(self, connection): + def handle_partitions_view_event(version, partitions): + self._partition_service.handle_partitions_view_event(connection, partitions, version) + + def handle_members_view_event(member_list_version, member_infos): + self._cluster_service.handle_members_view_event(member_list_version, member_infos) + + def inner(message): + client_add_cluster_view_listener_codec.handle( + message, handle_members_view_event, handle_partitions_view_event + ) + + return inner diff --git a/hazelcast/internal/asyncio_proxy/__init__.py b/hazelcast/internal/asyncio_proxy/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/hazelcast/internal/asyncio_proxy/base.py b/hazelcast/internal/asyncio_proxy/base.py new file mode 100644 index 0000000000..861109636e --- /dev/null +++ b/hazelcast/internal/asyncio_proxy/base.py @@ -0,0 +1,289 @@ +import abc +import asyncio +import typing +import uuid + +from hazelcast.core import MemberInfo +from hazelcast.types import KeyType, ValueType, ItemType, MessageType, BlockingProxyType +from hazelcast.internal.asyncio_invocation import Invocation +from hazelcast.partition import string_partition_strategy +from hazelcast.util import get_attr_name + +MAX_SIZE = float("inf") + + +def _no_op_response_handler(_): + return None + + +class Proxy(typing.Generic[BlockingProxyType], abc.ABC): + """Provides basic functionality for Hazelcast Proxies.""" + + def __init__(self, service_name: str, name: str, context): + self.service_name = service_name + self.name = name + self._context = context + self._invocation_service = context.invocation_service + self._partition_service = context.partition_service + serialization_service = context.serialization_service + self._to_object = serialization_service.to_object + self._to_data = serialization_service.to_data + listener_service = context.listener_service + self._register_listener = listener_service.register_listener + self._deregister_listener = listener_service.deregister_listener + self._is_smart = context.config.smart_routing + self._send_schema_and_retry = context.compact_schema_service.send_schema_and_retry + + async def destroy(self) -> bool: + """Destroys this proxy. + + Returns: + ``True`` if this proxy is destroyed successfully, ``False`` + otherwise. + """ + self._on_destroy() + return await self._context.proxy_manager.destroy_proxy(self.service_name, self.name) + + def _on_destroy(self): + pass + + def __repr__(self) -> str: + return '%s(name="%s")' % (type(self).__name__, self.name) + + def _invoke(self, request, response_handler=_no_op_response_handler) -> asyncio.Future: + invocation = Invocation(request, response_handler=response_handler) + self._invocation_service.invoke(invocation) + return invocation.future + + def _invoke_on_target(self, request, uuid, response_handler=_no_op_response_handler) -> asyncio.Future: + invocation = Invocation(request, uuid=uuid, response_handler=response_handler) + self._invocation_service.invoke(invocation) + return invocation.future + + def _invoke_on_key(self, request, key_data, response_handler=_no_op_response_handler) -> asyncio.Future: + partition_id = self._partition_service.get_partition_id(key_data) + invocation = Invocation( + request, partition_id=partition_id, response_handler=response_handler + ) + self._invocation_service.invoke(invocation) + return invocation.future + + def _invoke_on_partition(self, request, partition_id, response_handler=_no_op_response_handler) -> asyncio.Future: + invocation = Invocation( + request, partition_id=partition_id, response_handler=response_handler + ) + self._invocation_service.invoke(invocation) + return invocation.future + + async def _ainvoke_on_partition(self, request, partition_id, response_handler=_no_op_response_handler) -> typing.Any: + fut = self._invoke_on_partition(request, partition_id, response_handler) + return await fut + + +class PartitionSpecificProxy(Proxy[BlockingProxyType], abc.ABC): + """Provides basic functionality for Partition Specific Proxies.""" + + def __init__(self, service_name, name, context): + super(PartitionSpecificProxy, self).__init__(service_name, name, context) + partition_key = context.serialization_service.to_data(string_partition_strategy(self.name)) + self._partition_id = context.partition_service.get_partition_id(partition_key) + + def _invoke(self, request, response_handler=_no_op_response_handler): + invocation = Invocation( + request, partition_id=self._partition_id, response_handler=response_handler + ) + self._invocation_service.invoke(invocation) + return invocation.future + + +class TransactionalProxy: + """Provides an interface for all transactional distributed objects.""" + + def __init__(self, name, transaction, context): + self.name = name + self.transaction = transaction + self._invocation_service = context.invocation_service + serialization_service = context.serialization_service + self._to_object = serialization_service.to_object + self._to_data = serialization_service.to_data + self._send_schema_and_retry = context.compact_schema_service.send_schema_and_retry + + def _send_schema(self, error): + return self._send_schema_and_retry(error, lambda: None).result() + + def _invoke(self, request, response_handler=_no_op_response_handler): + invocation = Invocation( + request, connection=self.transaction.connection, response_handler=response_handler + ) + self._invocation_service.invoke(invocation) + return invocation.future.result() + + def __repr__(self): + return '%s(name="%s")' % (type(self).__name__, self.name) + + +class ItemEventType: + """Type of item events.""" + + ADDED = 1 + """ + Fired when an item is added. + """ + + REMOVED = 2 + """ + Fired when an item is removed. + """ + + +class EntryEventType: + """Type of entry event.""" + + ADDED = 1 + """ + Fired if an entry is added. + """ + + REMOVED = 2 + """ + Fired if an entry is removed. + """ + + UPDATED = 4 + """ + Fired if an entry is updated. + """ + + EVICTED = 8 + """ + Fired if an entry is evicted. + """ + + EXPIRED = 16 + """ + Fired if an entry is expired. + """ + + EVICT_ALL = 32 + """ + Fired if all entries are evicted. + """ + + CLEAR_ALL = 64 + """ + Fired if all entries are cleared. + """ + + MERGED = 128 + """ + Fired if an entry is merged after a network partition. + """ + + INVALIDATION = 256 + """ + Fired if an entry is invalidated. + """ + + LOADED = 512 + """ + Fired if an entry is loaded. + """ + + +class ItemEvent(typing.Generic[ItemType]): + """Map Item event. + + Attributes: + name: Name of the proxy that fired the event. + item: The item related to the event. + event_type: Type of the event. + member: Member that fired the event. + """ + + def __init__(self, name: str, item: ItemEventType, event_type: int, member: MemberInfo): + self.name = name + self.item = item + self.event_type = event_type + self.member = member + + +class EntryEvent(typing.Generic[KeyType, ValueType]): + """Map Entry event. + + Attributes: + event_type: Type of the event. + uuid: UUID of the member that fired the event. + number_of_affected_entries: Number of affected entries by this event. + key: The key of this entry event. + value: The value of the entry event. + old_value: The old value of the entry event. + merging_value: The incoming merging value of the entry event. + """ + + def __init__( + self, + key: KeyType, + value: ValueType, + old_value: ValueType, + merging_value: ValueType, + event_type: int, + member_uuid: uuid.UUID, + number_of_affected_entries: int, + ): + self.key = key + self.value = value + self.old_value = old_value + self.merging_value = merging_value + self.event_type = event_type + self.uuid = member_uuid + self.number_of_affected_entries = number_of_affected_entries + + def __repr__(self): + return ( + "EntryEvent(key=%s, value=%s, old_value=%s, merging_value=%s, event_type=%s, uuid=%s, " + "number_of_affected_entries=%s)" + % ( + self.key, + self.value, + self.old_value, + self.merging_value, + get_attr_name(EntryEventType, self.event_type), + self.uuid, + self.number_of_affected_entries, + ) + ) + + +class TopicMessage(typing.Generic[MessageType]): + """Topic message. + + Attributes: + name: Name of the proxy that fired the event. + message: The message sent to Topic. + publish_time: UNIX time that the event is published as seconds. + member: Member that fired the event. + """ + + __slots__ = ("name", "message", "publish_time", "member") + + def __init__(self, name: str, message: MessageType, publish_time: int, member: MemberInfo): + self.name = name + self.message = message + self.publish_time = publish_time + self.member = member + + def __repr__(self): + return "TopicMessage(message=%s, publish_time=%s, topic_name=%s, publishing_member=%s)" % ( + self.message, + self.publish_time, + self.name, + self.member, + ) + + +def get_entry_listener_flags(**kwargs): + flags = 0 + for key, value in kwargs.items(): + if value: + flags |= getattr(EntryEventType, key) + return flags diff --git a/hazelcast/internal/asyncio_proxy/manager.py b/hazelcast/internal/asyncio_proxy/manager.py new file mode 100644 index 0000000000..23356ed6d0 --- /dev/null +++ b/hazelcast/internal/asyncio_proxy/manager.py @@ -0,0 +1,89 @@ +import typing + +from hazelcast.protocol.codec import client_create_proxy_codec, client_destroy_proxy_codec +from hazelcast.internal.asyncio_invocation import Invocation +from hazelcast.internal.asyncio_proxy.base import Proxy +# from hazelcast.proxy.executor import Executor +# from hazelcast.proxy.list import List +from hazelcast.internal.asyncio_proxy.map import create_map_proxy +# from hazelcast.proxy.multi_map import MultiMap +# from hazelcast.proxy.queue import Queue +# from hazelcast.proxy.reliable_topic import ReliableTopic +# from hazelcast.proxy.replicated_map import ReplicatedMap +# from hazelcast.proxy.ringbuffer import Ringbuffer +# from hazelcast.proxy.set import Set +# from hazelcast.proxy.topic import Topic +# from hazelcast.proxy.pn_counter import PNCounter +# from hazelcast.proxy.flake_id_generator import FlakeIdGenerator +# from hazelcast.proxy.vector_collection import VectorCollection +from hazelcast.util import to_list + +# EXECUTOR_SERVICE = "hz:impl:executorService" +# LIST_SERVICE = "hz:impl:listService" +# MULTI_MAP_SERVICE = "hz:impl:multiMapService" +MAP_SERVICE = "hz:impl:mapService" +# RELIABLE_TOPIC_SERVICE = "hz:impl:reliableTopicService" +# REPLICATED_MAP_SERVICE = "hz:impl:replicatedMapService" +# RINGBUFFER_SERVICE = "hz:impl:ringbufferService" +# SET_SERVICE = "hz:impl:setService" +# QUEUE_SERVICE = "hz:impl:queueService" +# TOPIC_SERVICE = "hz:impl:topicService" +# PN_COUNTER_SERVICE = "hz:impl:PNCounterService" +# FLAKE_ID_GENERATOR_SERVICE = "hz:impl:flakeIdGeneratorService" +# VECTOR_SERVICE = "hz:service:vector" + +_proxy_init: typing.Dict[str, typing.Callable[[str, str, typing.Any], Proxy]] = { + # EXECUTOR_SERVICE: Executor, + # LIST_SERVICE: List, + MAP_SERVICE: create_map_proxy, + # MULTI_MAP_SERVICE: MultiMap, + # QUEUE_SERVICE: Queue, + # RELIABLE_TOPIC_SERVICE: ReliableTopic, + # REPLICATED_MAP_SERVICE: ReplicatedMap, + # RINGBUFFER_SERVICE: Ringbuffer, + # SET_SERVICE: Set, + # TOPIC_SERVICE: Topic, + # PN_COUNTER_SERVICE: PNCounter, + # FLAKE_ID_GENERATOR_SERVICE: FlakeIdGenerator, + # VECTOR_SERVICE: VectorCollection, +} + + +class ProxyManager: + def __init__(self, context): + self._context = context + self._proxies = {} + + async def get_or_create(self, service_name, name, create_on_remote=True): + ns = (service_name, name) + if ns in self._proxies: + return self._proxies[ns] + + proxy = await self._create_proxy(service_name, name, create_on_remote) + self._proxies[ns] = proxy + return proxy + + async def _create_proxy(self, service_name, name, create_on_remote) -> Proxy: + if create_on_remote: + request = client_create_proxy_codec.encode_request(name, service_name) + invocation = Invocation(request) + invocation_service = self._context.invocation_service + await invocation_service.ainvoke(invocation) + + return _proxy_init[service_name](service_name, name, self._context) + + async def destroy_proxy(self, service_name, name, destroy_on_remote=True): + ns = (service_name, name) + try: + self._proxies.pop(ns) + if destroy_on_remote: + request = client_destroy_proxy_codec.encode_request(name, service_name) + invocation = Invocation(request) + invocation_service = self._context.invocation_service + await invocation_service.ainvoke(invocation) + return True + except KeyError: + return False + + def get_distributed_objects(self): + return to_list(self._proxies.values()) diff --git a/hazelcast/internal/asyncio_proxy/map.py b/hazelcast/internal/asyncio_proxy/map.py new file mode 100644 index 0000000000..e3e4762413 --- /dev/null +++ b/hazelcast/internal/asyncio_proxy/map.py @@ -0,0 +1,967 @@ +import asyncio +import itertools +import typing + +from hazelcast.aggregator import Aggregator +from hazelcast.config import IndexUtil, IndexType, IndexConfig +from hazelcast.core import SimpleEntryView +from hazelcast.errors import InvalidConfigurationError +from hazelcast.projection import Projection +from hazelcast.protocol import PagingPredicateHolder +from hazelcast.protocol.codec import ( + map_add_entry_listener_codec, + map_add_entry_listener_to_key_codec, + map_add_entry_listener_with_predicate_codec, + map_add_entry_listener_to_key_with_predicate_codec, + map_clear_codec, + map_contains_key_codec, + map_contains_value_codec, + map_delete_codec, + map_entry_set_codec, + map_entries_with_predicate_codec, + map_evict_codec, + map_evict_all_codec, + map_flush_codec, + map_get_codec, + map_get_all_codec, + map_get_entry_view_codec, + map_is_empty_codec, + map_key_set_codec, + map_key_set_with_predicate_codec, + map_load_all_codec, + map_load_given_keys_codec, + map_put_codec, + map_put_all_codec, + map_put_if_absent_codec, + map_put_transient_codec, + map_size_codec, + map_remove_codec, + map_remove_if_same_codec, + map_remove_entry_listener_codec, + map_replace_codec, + map_replace_if_same_codec, + map_set_codec, + map_try_put_codec, + map_try_remove_codec, + map_values_codec, + map_values_with_predicate_codec, + map_add_interceptor_codec, + map_aggregate_codec, + map_aggregate_with_predicate_codec, + map_project_codec, + map_project_with_predicate_codec, + map_execute_on_all_keys_codec, + map_execute_on_key_codec, + map_execute_on_keys_codec, + map_execute_with_predicate_codec, + map_add_index_codec, + map_set_ttl_codec, + map_entries_with_paging_predicate_codec, + map_key_set_with_paging_predicate_codec, + map_values_with_paging_predicate_codec, + map_put_with_max_idle_codec, + map_put_if_absent_with_max_idle_codec, + map_put_transient_with_max_idle_codec, + map_set_with_max_idle_codec, + map_remove_interceptor_codec, + map_remove_all_codec, +) +from hazelcast.internal.asyncio_proxy.base import ( + Proxy, + EntryEvent, + EntryEventType, + get_entry_listener_flags, +) +from hazelcast.predicate import Predicate, _PagingPredicate +from hazelcast.serialization.data import Data +from hazelcast.types import AggregatorResultType, KeyType, ValueType, ProjectionType +from hazelcast.serialization.compact import SchemaNotReplicatedError +from hazelcast.util import ( + check_not_none, + thread_id, + to_millis, + IterationType, + deserialize_entry_list_in_place, + deserialize_list_in_place, +) + + +EntryEventCallable = typing.Callable[[EntryEvent[KeyType, ValueType]], None] + + +class Map(Proxy, typing.Generic[KeyType, ValueType]): + + def __init__(self, service_name, name, context): + super(Map, self).__init__(service_name, name, context) + self._reference_id_generator = context.lock_reference_id_generator + + async def add_entry_listener( + self, + include_value: bool = False, + key: KeyType = None, + predicate: Predicate = None, + added_func: EntryEventCallable = None, + removed_func: EntryEventCallable = None, + updated_func: EntryEventCallable = None, + evicted_func: EntryEventCallable = None, + evict_all_func: EntryEventCallable = None, + clear_all_func: EntryEventCallable = None, + merged_func: EntryEventCallable = None, + expired_func: EntryEventCallable = None, + loaded_func: EntryEventCallable = None, + ) -> str: + flags = get_entry_listener_flags( + ADDED=added_func, + REMOVED=removed_func, + UPDATED=updated_func, + EVICTED=evicted_func, + EXPIRED=expired_func, + EVICT_ALL=evict_all_func, + CLEAR_ALL=clear_all_func, + MERGED=merged_func, + LOADED=loaded_func, + ) + if key is not None and predicate is not None: + try: + key_data = self._to_data(key) + predicate_data = self._to_data(predicate) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry( + e, + self.add_entry_listener, + include_value, + key, + predicate, + added_func, + removed_func, + updated_func, + evicted_func, + evict_all_func, + clear_all_func, + merged_func, + expired_func, + loaded_func, + ) + with_key_and_predicate_codec = map_add_entry_listener_to_key_with_predicate_codec + request = with_key_and_predicate_codec.encode_request( + self.name, key_data, predicate_data, include_value, flags, self._is_smart + ) + response_decoder = with_key_and_predicate_codec.decode_response + event_message_handler = with_key_and_predicate_codec.handle + elif key is not None and predicate is None: + try: + key_data = self._to_data(key) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry( + e, + self.add_entry_listener, + include_value, + key, + predicate, + added_func, + removed_func, + updated_func, + evicted_func, + evict_all_func, + clear_all_func, + merged_func, + expired_func, + loaded_func, + ) + + with_key_codec = map_add_entry_listener_to_key_codec + request = with_key_codec.encode_request( + self.name, key_data, include_value, flags, self._is_smart + ) + response_decoder = with_key_codec.decode_response + event_message_handler = with_key_codec.handle + elif key is None and predicate is not None: + try: + predicate = self._to_data(predicate) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry( + e, + self.add_entry_listener, + include_value, + key, + predicate, + added_func, + removed_func, + updated_func, + evicted_func, + evict_all_func, + clear_all_func, + merged_func, + expired_func, + loaded_func, + ) + with_predicate_codec = map_add_entry_listener_with_predicate_codec + request = with_predicate_codec.encode_request( + self.name, predicate, include_value, flags, self._is_smart + ) + response_decoder = with_predicate_codec.decode_response + event_message_handler = with_predicate_codec.handle + else: + codec = map_add_entry_listener_codec + request = codec.encode_request(self.name, include_value, flags, self._is_smart) + response_decoder = codec.decode_response + event_message_handler = codec.handle + + def handle_event_entry( + key_data, + value_data, + old_value_data, + merging_value_data, + event_type, + uuid, + number_of_affected_entries, + ): + event = EntryEvent( + self._to_object(key_data), + self._to_object(value_data), + self._to_object(old_value_data), + self._to_object(merging_value_data), + event_type, + uuid, + number_of_affected_entries, + ) + if event.event_type == EntryEventType.ADDED: + added_func(event) + elif event.event_type == EntryEventType.REMOVED: + removed_func(event) + elif event.event_type == EntryEventType.UPDATED: + updated_func(event) + elif event.event_type == EntryEventType.EVICTED: + evicted_func(event) + elif event.event_type == EntryEventType.EVICT_ALL: + evict_all_func(event) + elif event.event_type == EntryEventType.CLEAR_ALL: + clear_all_func(event) + elif event.event_type == EntryEventType.MERGED: + merged_func(event) + elif event.event_type == EntryEventType.EXPIRED: + expired_func(event) + elif event.event_type == EntryEventType.LOADED: + loaded_func(event) + return await self._register_listener( + request, + lambda r: response_decoder(r), + lambda reg_id: map_remove_entry_listener_codec.encode_request(self.name, reg_id), + lambda m: event_message_handler(m, handle_event_entry), + ) + + async def add_index( + self, + attributes: typing.Sequence[str] = None, + index_type: typing.Union[int, str] = IndexType.SORTED, + name: str = None, + bitmap_index_options: typing.Dict[str, typing.Any] = None, + ) -> None: + d = { + "name": name, + "type": index_type, + "attributes": attributes, + "bitmap_index_options": bitmap_index_options, + } + config = IndexConfig.from_dict(d) + validated = IndexUtil.validate_and_normalize(self.name, config) + request = map_add_index_codec.encode_request(self.name, validated) + return await self._invoke(request) + + async def add_interceptor(self, interceptor: typing.Any) -> str: + try: + interceptor_data = self._to_data(interceptor) + except SchemaNotReplicatedError as e: + return self._send_schema_and_retry(e, self.add_interceptor, interceptor) + + request = map_add_interceptor_codec.encode_request(self.name, interceptor_data) + return await self._invoke(request, map_add_interceptor_codec.decode_response) + + async def aggregate( + self, aggregator: Aggregator[AggregatorResultType], predicate: Predicate = None + ) -> AggregatorResultType: + check_not_none(aggregator, "aggregator can't be none") + if predicate: + if isinstance(predicate, _PagingPredicate): + raise AssertionError("Paging predicate is not supported.") + + try: + aggregator_data = self._to_data(aggregator) + predicate_data = self._to_data(predicate) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.aggregate, aggregator, predicate) + + def handler(message): + return self._to_object(map_aggregate_with_predicate_codec.decode_response(message)) + + request = map_aggregate_with_predicate_codec.encode_request( + self.name, aggregator_data, predicate_data + ) + else: + try: + aggregator_data = self._to_data(aggregator) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.aggregate, aggregator, predicate) + + def handler(message): + return self._to_object(map_aggregate_codec.decode_response(message)) + + request = map_aggregate_codec.encode_request(self.name, aggregator_data) + + return await self._invoke(request, handler) + + async def clear(self) -> None: + request = map_clear_codec.encode_request(self.name) + return await self._invoke(request) + + async def contains_key(self, key: KeyType) -> bool: + check_not_none(key, "key can't be None") + try: + key_data = self._to_data(key) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.contains_key, key) + + return await self._contains_key_internal(key_data) + + async def contains_value(self, value: ValueType) -> bool: + check_not_none(value, "value can't be None") + try: + value_data = self._to_data(value) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.contains_value, value) + request = map_contains_value_codec.encode_request(self.name, value_data) + return await self._invoke(request, map_contains_value_codec.decode_response) + + async def delete(self, key: KeyType) -> None: + check_not_none(key, "key can't be None") + try: + key_data = self._to_data(key) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.delete, key) + return await self._delete_internal(key_data) + + async def entry_set( + self, predicate: Predicate = None + ) -> typing.List[typing.Tuple[KeyType, ValueType]]: + if predicate: + if isinstance(predicate, _PagingPredicate): + predicate.iteration_type = IterationType.ENTRY + try: + holder = PagingPredicateHolder.of(predicate, self._to_data) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.entry_set, predicate) + + def handler(message): + response = map_entries_with_paging_predicate_codec.decode_response(message) + predicate.anchor_list = response["anchor_data_list"].as_anchor_list( + self._to_object + ) + entry_data_list = response["response"] + return deserialize_entry_list_in_place(entry_data_list, self._to_object) + + request = map_entries_with_paging_predicate_codec.encode_request(self.name, holder) + else: + try: + predicate_data = self._to_data(predicate) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.entry_set, predicate) + + def handler(message): + entry_data_list = map_entries_with_predicate_codec.decode_response(message) + return deserialize_entry_list_in_place(entry_data_list, self._to_object) + + request = map_entries_with_predicate_codec.encode_request(self.name, predicate_data) + else: + def handler(message): + entry_data_list = map_entry_set_codec.decode_response(message) + return deserialize_entry_list_in_place(entry_data_list, self._to_object) + + request = map_entry_set_codec.encode_request(self.name) + + return await self._invoke(request, handler) + + async def evict(self, key: KeyType) -> bool: + check_not_none(key, "key can't be None") + try: + key_data = self._to_data(key) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.evict, key) + + return await self._evict_internal(key_data) + + async def evict_all(self) -> None: + request = map_evict_all_codec.encode_request(self.name) + return await self._invoke(request) + + async def execute_on_entries( + self, entry_processor: typing.Any, predicate: Predicate = None + ) -> typing.List[typing.Any]: + if predicate: + try: + entry_processor_data = self._to_data(entry_processor) + predicate_data = self._to_data(predicate) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry( + e, self.execute_on_entries, entry_processor, predicate + ) + + def handler(message): + entry_data_list = map_execute_with_predicate_codec.decode_response(message) + return deserialize_entry_list_in_place(entry_data_list, self._to_object) + + request = map_execute_with_predicate_codec.encode_request( + self.name, entry_processor_data, predicate_data + ) + else: + try: + entry_processor_data = self._to_data(entry_processor) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry( + e, self.execute_on_entries, entry_processor, predicate + ) + + def handler(message): + entry_data_list = map_execute_on_all_keys_codec.decode_response(message) + return deserialize_entry_list_in_place(entry_data_list, self._to_object) + + request = map_execute_on_all_keys_codec.encode_request(self.name, entry_processor_data) + + return await self._invoke(request, handler) + + async def execute_on_key(self, key: KeyType, entry_processor: typing.Any) -> typing.Any: + check_not_none(key, "key can't be None") + try: + key_data = self._to_data(key) + entry_processor_data = self._to_data(entry_processor) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.execute_on_key, key, entry_processor) + + return await self._execute_on_key_internal(key_data, entry_processor_data) + + async def execute_on_keys( + self, keys: typing.Sequence[KeyType], entry_processor: typing.Any + ) -> typing.List[typing.Any]: + if len(keys) == 0: + return [] + try: + key_list = [] + for key in keys: + check_not_none(key, "key can't be None") + key_list.append(self._to_data(key)) + + entry_processor_data = self._to_data(entry_processor) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.execute_on_keys, keys, entry_processor) + + def handler(message): + entry_data_list = map_execute_on_keys_codec.decode_response(message) + return deserialize_entry_list_in_place(entry_data_list, self._to_object) + + request = map_execute_on_keys_codec.encode_request( + self.name, entry_processor_data, key_list + ) + return await self._invoke(request, handler) + + async def flush(self) -> None: + request = map_flush_codec.encode_request(self.name) + return await self._invoke(request) + + async def get(self, key: KeyType) -> typing.Optional[ValueType]: + check_not_none(key, "key can't be None") + try: + key_data = self._to_data(key) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.get, key) + return await self._get_internal(key_data) + + async def get_all(self, keys: typing.Sequence[KeyType]) -> typing.Dict[KeyType, ValueType]: + check_not_none(keys, "keys can't be None") + if not keys: + return {} + partition_service = self._context.partition_service + partition_to_keys: typing.Dict[int, typing.Dict[KeyType, Data]] = {} + for key in keys: + check_not_none(key, "key can't be None") + try: + key_data = self._to_data(key) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.get_all, keys) + partition_id = partition_service.get_partition_id(key_data) + try: + partition_to_keys[partition_id][key] = key_data + except KeyError: + partition_to_keys[partition_id] = {key: key_data} + + return await self._get_all_internal(partition_to_keys) + + async def get_entry_view(self, key: KeyType) -> SimpleEntryView[KeyType, ValueType]: + check_not_none(key, "key can't be None") + try: + key_data = self._to_data(key) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.get_entry_view, key) + + def handler(message): + response = map_get_entry_view_codec.decode_response(message) + entry_view = response["response"] + if not entry_view: + return None + entry_view.key = self._to_object(entry_view.key) + entry_view.value = self._to_object(entry_view.value) + return entry_view + + request = map_get_entry_view_codec.encode_request(self.name, key_data, thread_id()) + return await self._invoke_on_key(request, key_data, handler) + + async def is_empty(self) -> bool: + request = map_is_empty_codec.encode_request(self.name) + return await self._invoke(request, map_is_empty_codec.decode_response) + + async def key_set(self, predicate: Predicate = None) -> typing.List[ValueType]: + if predicate: + if isinstance(predicate, _PagingPredicate): + predicate.iteration_type = IterationType.KEY + + try: + holder = PagingPredicateHolder.of(predicate, self._to_data) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.key_set, predicate) + + def handler(message): + response = map_key_set_with_paging_predicate_codec.decode_response(message) + predicate.anchor_list = response["anchor_data_list"].as_anchor_list( + self._to_object + ) + data_list = response["response"] + return deserialize_list_in_place(data_list, self._to_object) + + request = map_key_set_with_paging_predicate_codec.encode_request(self.name, holder) + else: + try: + predicate_data = self._to_data(predicate) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.key_set, predicate) + + def handler(message): + data_list = map_key_set_with_predicate_codec.decode_response(message) + return deserialize_list_in_place(data_list, self._to_object) + + request = map_key_set_with_predicate_codec.encode_request(self.name, predicate_data) + else: + def handler(message): + data_list = map_key_set_codec.decode_response(message) + return deserialize_list_in_place(data_list, self._to_object) + + request = map_key_set_codec.encode_request(self.name) + + return await self._invoke(request, handler) + + async def load_all( + self, keys: typing.Sequence[KeyType] = None, replace_existing_values: bool = True + ) -> None: + if keys: + try: + key_data_list = [self._to_data(key) for key in keys] + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.load_all, keys, replace_existing_values) + + return await self._load_all_internal(key_data_list, replace_existing_values) + + request = map_load_all_codec.encode_request(self.name, replace_existing_values) + return await self._invoke(request) + + async def project( + self, projection: Projection[ProjectionType], predicate: Predicate = None + ) -> ProjectionType: + check_not_none(projection, "Projection can't be none") + if predicate: + if isinstance(predicate, _PagingPredicate): + raise AssertionError("Paging predicate is not supported.") + try: + projection_data = self._to_data(projection) + predicate_data = self._to_data(predicate) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.project, projection, predicate) + + def handler(message): + data_list = map_project_with_predicate_codec.decode_response(message) + return deserialize_list_in_place(data_list, self._to_object) + + request = map_project_with_predicate_codec.encode_request( + self.name, projection_data, predicate_data + ) + else: + try: + projection_data = self._to_data(projection) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.project, projection, predicate) + + def handler(message): + data_list = map_project_codec.decode_response(message) + return deserialize_list_in_place(data_list, self._to_object) + + request = map_project_codec.encode_request(self.name, projection_data) + + return await self._invoke(request, handler) + + async def put( + self, key: KeyType, value: ValueType, ttl: float = None, max_idle: float = None + ) -> typing.Optional[ValueType]: + check_not_none(key, "key can't be None") + check_not_none(value, "value can't be None") + try: + key_data = self._to_data(key) + value_data = self._to_data(value) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.put, key, value, ttl, max_idle) + + return await self._put_internal(key_data, value_data, ttl, max_idle) + + async def put_all(self, map: typing.Dict[KeyType, ValueType]) -> None: + check_not_none(map, "map can't be None") + if not map: + return None + partition_service = self._context.partition_service + partition_map: typing.Dict[int, typing.List[typing.Tuple[Data, Data]]] = {} + for key, value in map.items(): + check_not_none(key, "key can't be None") + check_not_none(value, "value can't be None") + try: + entry = (self._to_data(key), self._to_data(value)) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.put_all, map) + partition_id = partition_service.get_partition_id(entry[0]) + try: + partition_map[partition_id].append(entry) + except KeyError: + partition_map[partition_id] = [entry] + + async with asyncio.TaskGroup() as tg: + for partition_id, entry_list in partition_map.items(): + request = map_put_all_codec.encode_request( + self.name, entry_list, False + ) # TODO trigger map loader + tg.create_task(self._ainvoke_on_partition(request, partition_id)) + return None + + async def put_if_absent( + self, key: KeyType, value: ValueType, ttl: float = None, max_idle: float = None + ) -> typing.Optional[ValueType]: + check_not_none(key, "key can't be None") + check_not_none(value, "value can't be None") + try: + key_data = self._to_data(key) + value_data = self._to_data(value) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.put_if_absent, key, value, ttl, max_idle) + + return await self._put_if_absent_internal(key_data, value_data, ttl, max_idle) + + async def put_transient( + self, key: KeyType, value: ValueType, ttl: float = None, max_idle: float = None + ) -> None: + check_not_none(key, "key can't be None") + check_not_none(value, "value can't be None") + try: + key_data = self._to_data(key) + value_data = self._to_data(value) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.put_transient, key, value, ttl, max_idle) + + return await self._put_transient_internal(key_data, value_data, ttl, max_idle) + + async def remove(self, key: KeyType) -> typing.Optional[ValueType]: + check_not_none(key, "key can't be None") + try: + key_data = self._to_data(key) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.remove, key) + + return await self._remove_internal(key_data) + + async def remove_all(self, predicate: Predicate) -> None: + check_not_none(predicate, "predicate can't be None") + try: + predicate_data = self._to_data(predicate) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.remove_all, predicate) + + return await self._remove_all_internal(predicate_data) + + async def remove_if_same(self, key: KeyType, value: ValueType) -> bool: + check_not_none(key, "key can't be None") + check_not_none(value, "value can't be None") + try: + key_data = self._to_data(key) + value_data = self._to_data(value) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.remove_if_same, key, value) + return await self._remove_if_same_internal_(key_data, value_data) + + async def remove_entry_listener(self, registration_id: str) -> bool: + return await self._deregister_listener(registration_id) + + async def remove_interceptor(self, registration_id: str) -> bool: + check_not_none(registration_id, "Interceptor registration id should not be None") + request = map_remove_interceptor_codec.encode_request(self.name, registration_id) + return await self._invoke(request, map_remove_interceptor_codec.decode_response) + + async def replace(self, key: KeyType, value: ValueType) -> typing.Optional[ValueType]: + check_not_none(key, "key can't be None") + check_not_none(value, "value can't be None") + try: + key_data = self._to_data(key) + value_data = self._to_data(value) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.replace, key, value) + return await self._replace_internal(key_data, value_data) + + async def replace_if_same( + self, key: ValueType, old_value: ValueType, new_value: ValueType + ) -> bool: + check_not_none(key, "key can't be None") + check_not_none(old_value, "old_value can't be None") + check_not_none(new_value, "new_value can't be None") + try: + key_data = self._to_data(key) + old_value_data = self._to_data(old_value) + new_value_data = self._to_data(new_value) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.replace_if_same, key, old_value, new_value) + + return await self._replace_if_same_internal(key_data, old_value_data, new_value_data) + + async def set( + self, key: KeyType, value: ValueType, ttl: float = None, max_idle: float = None + ) -> None: + check_not_none(key, "key can't be None") + check_not_none(value, "value can't be None") + try: + key_data = self._to_data(key) + value_data = self._to_data(value) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.set, key, value, ttl, max_idle) + return await self._set_internal(key_data, value_data, ttl, max_idle) + + async def set_ttl(self, key: KeyType, ttl: float) -> None: + check_not_none(key, "key can't be None") + check_not_none(ttl, "ttl can't be None") + try: + key_data = self._to_data(key) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.set_ttl, key, ttl) + return await self._set_ttl_internal(key_data, ttl) + + async def size(self) -> int: + request = map_size_codec.encode_request(self.name) + return await self._invoke(request, map_size_codec.decode_response) + + async def try_put(self, key: KeyType, value: ValueType, timeout: float = 0) -> bool: + check_not_none(key, "key can't be None") + check_not_none(value, "value can't be None") + try: + key_data = self._to_data(key) + value_data = self._to_data(value) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.try_put, key, value, timeout) + return await self._try_put_internal(key_data, value_data, timeout) + + async def try_remove(self, key: KeyType, timeout: float = 0) -> bool: + check_not_none(key, "key can't be None") + try: + key_data = self._to_data(key) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.try_remove, key, timeout) + return await self._try_remove_internal(key_data, timeout) + + async def values(self, predicate: Predicate = None) -> typing.List[ValueType]: + if predicate: + if isinstance(predicate, _PagingPredicate): + predicate.iteration_type = IterationType.VALUE + + try: + holder = PagingPredicateHolder.of(predicate, self._to_data) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.values, predicate) + + def handler(message): + response = map_values_with_paging_predicate_codec.decode_response(message) + predicate.anchor_list = response["anchor_data_list"].as_anchor_list( + self._to_object + ) + data_list = response["response"] + return deserialize_list_in_place(data_list, self._to_object) + + request = map_values_with_paging_predicate_codec.encode_request(self.name, holder) + else: + try: + predicate_data = self._to_data(predicate) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.values, predicate) + + def handler(message): + data_list = map_values_with_predicate_codec.decode_response(message) + return deserialize_list_in_place(data_list, self._to_object) + + request = map_values_with_predicate_codec.encode_request(self.name, predicate_data) + else: + def handler(message): + data_list = map_values_codec.decode_response(message) + return deserialize_list_in_place(data_list, self._to_object) + + request = map_values_codec.encode_request(self.name) + + return await self._invoke(request, handler) + + def _contains_key_internal(self, key_data): + request = map_contains_key_codec.encode_request(self.name, key_data, thread_id()) + return self._invoke_on_key(request, key_data, map_contains_key_codec.decode_response) + + def _get_internal(self, key_data): + def handler(message): + return self._to_object(map_get_codec.decode_response(message)) + + request = map_get_codec.encode_request(self.name, key_data, thread_id()) + return self._invoke_on_key(request, key_data, handler) + + async def _get_all_internal(self, partition_to_keys, tasks=None): + def handler(message): + entry_data_list = map_get_all_codec.decode_response(message) + return deserialize_entry_list_in_place(entry_data_list, self._to_object) + + tasks = tasks or [] + async with asyncio.TaskGroup() as tg: + for partition_id, key_dict in partition_to_keys.items(): + request = map_get_all_codec.encode_request(self.name, key_dict.values()) + task = tg.create_task(self._ainvoke_on_partition(request, partition_id, handler)) + tasks.append(task) + kvs = itertools.chain.from_iterable(task.result() for task in tasks) + return dict(kvs) + + def _remove_internal(self, key_data): + def handler(message): + return self._to_object(map_remove_codec.decode_response(message)) + + request = map_remove_codec.encode_request(self.name, key_data, thread_id()) + return self._invoke_on_key(request, key_data, handler) + + def _remove_all_internal(self, predicate_data): + request = map_remove_all_codec.encode_request(self.name, predicate_data) + return self._invoke(request) + + def _remove_if_same_internal_(self, key_data, value_data): + request = map_remove_if_same_codec.encode_request( + self.name, key_data, value_data, thread_id() + ) + return self._invoke_on_key( + request, key_data, response_handler=map_remove_if_same_codec.decode_response + ) + + def _delete_internal(self, key_data): + request = map_delete_codec.encode_request(self.name, key_data, thread_id()) + return self._invoke_on_key(request, key_data) + + def _put_internal(self, key_data, value_data, ttl, max_idle): + def handler(message): + return self._to_object(map_put_codec.decode_response(message)) + + if max_idle is not None: + request = map_put_with_max_idle_codec.encode_request( + self.name, key_data, value_data, thread_id(), to_millis(ttl), to_millis(max_idle) + ) + else: + request = map_put_codec.encode_request( + self.name, key_data, value_data, thread_id(), to_millis(ttl) + ) + return self._invoke_on_key(request, key_data, handler) + + def _set_internal(self, key_data, value_data, ttl, max_idle): + if max_idle is not None: + request = map_set_with_max_idle_codec.encode_request( + self.name, key_data, value_data, thread_id(), to_millis(ttl), to_millis(max_idle) + ) + else: + request = map_set_codec.encode_request( + self.name, key_data, value_data, thread_id(), to_millis(ttl) + ) + return self._invoke_on_key(request, key_data) + + def _set_ttl_internal(self, key_data, ttl): + request = map_set_ttl_codec.encode_request(self.name, key_data, to_millis(ttl)) + return self._invoke_on_key(request, key_data, map_set_ttl_codec.decode_response) + + def _try_remove_internal(self, key_data, timeout): + request = map_try_remove_codec.encode_request( + self.name, key_data, thread_id(), to_millis(timeout) + ) + return self._invoke_on_key(request, key_data, map_try_remove_codec.decode_response) + + def _try_put_internal(self, key_data, value_data, timeout): + request = map_try_put_codec.encode_request( + self.name, key_data, value_data, thread_id(), to_millis(timeout) + ) + return self._invoke_on_key(request, key_data, map_try_put_codec.decode_response) + + def _put_transient_internal(self, key_data, value_data, ttl, max_idle): + if max_idle is not None: + request = map_put_transient_with_max_idle_codec.encode_request( + self.name, key_data, value_data, thread_id(), to_millis(ttl), to_millis(max_idle) + ) + else: + request = map_put_transient_codec.encode_request( + self.name, key_data, value_data, thread_id(), to_millis(ttl) + ) + return self._invoke_on_key(request, key_data) + + def _put_if_absent_internal(self, key_data, value_data, ttl, max_idle): + def handler(message): + return self._to_object(map_put_if_absent_codec.decode_response(message)) + + if max_idle is not None: + request = map_put_if_absent_with_max_idle_codec.encode_request( + self.name, key_data, value_data, thread_id(), to_millis(ttl), to_millis(max_idle) + ) + else: + request = map_put_if_absent_codec.encode_request( + self.name, key_data, value_data, thread_id(), to_millis(ttl) + ) + return self._invoke_on_key(request, key_data, handler) + + def _replace_if_same_internal(self, key_data, old_value_data, new_value_data): + request = map_replace_if_same_codec.encode_request( + self.name, key_data, old_value_data, new_value_data, thread_id() + ) + return self._invoke_on_key(request, key_data, map_replace_if_same_codec.decode_response) + + def _replace_internal(self, key_data, value_data): + def handler(message): + return self._to_object(map_replace_codec.decode_response(message)) + + request = map_replace_codec.encode_request(self.name, key_data, value_data, thread_id()) + return self._invoke_on_key(request, key_data, handler) + + def _evict_internal(self, key_data): + request = map_evict_codec.encode_request(self.name, key_data, thread_id()) + return self._invoke_on_key(request, key_data, map_evict_codec.decode_response) + + def _load_all_internal(self, key_data_list, replace_existing_values): + request = map_load_given_keys_codec.encode_request( + self.name, key_data_list, replace_existing_values + ) + return self._invoke(request) + + def _execute_on_key_internal(self, key_data, entry_processor_data): + def handler(message): + return self._to_object(map_execute_on_key_codec.decode_response(message)) + + request = map_execute_on_key_codec.encode_request( + self.name, entry_processor_data, key_data, thread_id() + ) + return self._invoke_on_key(request, key_data, handler) + + +def create_map_proxy(service_name, name, context): + near_cache_config = context.config.near_caches.get(name, None) + if near_cache_config is None: + return Map(service_name, name, context) + raise InvalidConfigurationError("near cache is not supported") diff --git a/hazelcast/internal/asyncio_reactor.py b/hazelcast/internal/asyncio_reactor.py new file mode 100644 index 0000000000..b3082985f5 --- /dev/null +++ b/hazelcast/internal/asyncio_reactor.py @@ -0,0 +1,172 @@ +import asyncio +import io +import logging +import time +from asyncio import AbstractEventLoop, transports + +from hazelcast.internal.asyncio_connection import Connection +from hazelcast.core import Address + +_BUFFER_SIZE = 128000 + + +_logger = logging.getLogger(__name__) + + +class AsyncioReactor: + + def __init__(self, loop: AbstractEventLoop | None = None): + self._is_live = False + self._loop = loop or asyncio.get_running_loop() + self._bytes_lock = asyncio.Lock() + self._bytes_sent = 0 + self._bytes_received = 0 + + def add_timer(self, delay, callback): + return self._loop.call_later(delay, callback) + + def start(self): + self._is_live = True + + def shutdown(self): + if not self._is_live: + return + # TODO: cancel tasks + + async def connection_factory( + self, connection_manager, connection_id, address: Address, network_config, message_callback + ): + return await AsyncioConnection.create_and_connect( + self._loop, self, connection_manager, connection_id, address, network_config, message_callback, + ) + + def update_bytes_sent(self, sent: int): + # with self._bytes_lock: + self._bytes_sent += sent + + def update_bytes_received(self, received: int): + # with self._bytes_lock: + self._bytes_received += received + + # def _asyncio_loop(self): + # asyncio.set_event_loop(self._loop) + # self._loop.run_forever() + + +class AsyncioConnection(Connection): + + def __init__(self, loop, reactor: AsyncioReactor, connection_manager, connection_id, address, config, message_callback): + super().__init__(connection_manager, connection_id, message_callback) + self._loop = loop + self._reactor = reactor + self._address = address + self._config = config + self._proto = None + + @classmethod + async def create_and_connect(cls, loop, reactor: AsyncioReactor, connection_manager, connection_id, address, config, message_callback): + this = cls(loop, reactor, connection_manager, connection_id, address, config, message_callback) + if this._config.ssl_enabled: + await this._create_ssl_connection() + else: + await this._create_connection() + return this + + def _create_protocol(self): + return HazelcastProtocol(self._loop, self._reader, self._address, self._update_read_time, + self._update_write_time, self._update_sent, self._update_received) + + async def _create_connection(self): + loop = self._loop + res = await loop.create_connection(self._create_protocol, host=self._address.host, port=self._address.port) + _sock, self._proto = res + + async def _create_ssl_connection(self): + raise NotImplementedError + + def _write(self, buf): + self._proto.write(buf) + + def _inner_close(self): + self._proto.close() + + def _update_read_time(self, time): + self.last_read_time = time + + def _update_write_time(self, time): + self.last_write_time = time + + def _update_sent(self, sent): + self._reactor.update_bytes_sent(sent) + + def _update_received(self, received): + self._reactor.update_bytes_received(received) + + +class HazelcastProtocol(asyncio.BufferedProtocol): + + PROTOCOL_STARTER = b"CP2" + + def __init__(self, loop: AbstractEventLoop, reader, address, update_read_time, update_write_time, update_sent, update_received): + self._loop = loop + self._reader = reader + self._address = address + self._update_read_time = update_read_time + self._update_write_time = update_write_time + self._update_sent = update_sent + self._update_received = update_received + self._transport = None + self.start_time = None + self._write_buf = io.BytesIO() + self._write_buf_size = 0 + self._recv_buf = None + self._alive = True + + def connection_made(self, transport: transports.Transport): + self._transport = transport + self.start_time = time.time() + self.write(self.PROTOCOL_STARTER) + _logger.debug("Connected to %s", self._address) + self._loop.call_soon(self._write_loop) + + def connection_lost(self, exc): + self._alive = False + return False + + def close(self): + self._transport.close() + + def write(self, buf): + self._write_buf.write(buf) + self._write_buf_size += len(buf) + + def get_buffer(self, sizehint): + if self._recv_buf is None: + buf_size = max(sizehint, _BUFFER_SIZE) + self._recv_buf = memoryview(bytearray(buf_size)) + return self._recv_buf + + def buffer_updated(self, nbytes): + recv_bytes = self._recv_buf[:nbytes] + self._update_read_time(time.time()) + self._update_received(nbytes) + self._reader.read(recv_bytes) + if self._reader.length: + self._reader.process() + + def eof_received(self): + self._alive = False + + def _do_write(self): + if not self._write_buf_size: + return + buf_bytes = self._write_buf.getvalue() + self._transport.write(buf_bytes[:self._write_buf_size]) + self._update_write_time(time.time()) + self._update_sent(self._write_buf_size) + self._write_buf.seek(0) + self._write_buf_size = 0 + + def _write_loop(self): + self._do_write() + return self._loop.call_later(0.01, self._write_loop) diff --git a/hazelcast/proxy/__init__.py b/hazelcast/proxy/__init__.py index 1e6d98abe2..0b94d82fc2 100644 --- a/hazelcast/proxy/__init__.py +++ b/hazelcast/proxy/__init__.py @@ -54,26 +54,23 @@ def __init__(self, context): self._context = context self._proxies = {} - def get_or_create(self, service_name, name, create_on_remote=True): + async def get_or_create(self, service_name, name, create_on_remote=True): ns = (service_name, name) if ns in self._proxies: return self._proxies[ns] - - proxy = self._create_proxy(service_name, name, create_on_remote) + proxy = await self._create_proxy(service_name, name, create_on_remote) self._proxies[ns] = proxy return proxy - def _create_proxy(self, service_name, name, create_on_remote): + async def _create_proxy(self, service_name, name, create_on_remote): if create_on_remote: request = client_create_proxy_codec.encode_request(name, service_name) invocation = Invocation(request) invocation_service = self._context.invocation_service - invocation_service.invoke(invocation) - invocation.future.result() - + await invocation_service.ainvoke(invocation) return _proxy_init[service_name](service_name, name, self._context) - def destroy_proxy(self, service_name, name, destroy_on_remote=True): + async def destroy_proxy(self, service_name, name, destroy_on_remote=True): ns = (service_name, name) try: self._proxies.pop(ns) @@ -81,8 +78,7 @@ def destroy_proxy(self, service_name, name, destroy_on_remote=True): request = client_destroy_proxy_codec.encode_request(name, service_name) invocation = Invocation(request) invocation_service = self._context.invocation_service - invocation_service.invoke(invocation) - invocation.future.result() + await invocation_service.ainvoke(invocation) return True except KeyError: return False diff --git a/tests/integration/asyncio/__init__.py b/tests/integration/asyncio/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/integration/asyncio/authentication_tests/__init__.py b/tests/integration/asyncio/authentication_tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/integration/asyncio/authentication_tests/authentication_test.py b/tests/integration/asyncio/authentication_tests/authentication_test.py new file mode 100644 index 0000000000..601ac5ef59 --- /dev/null +++ b/tests/integration/asyncio/authentication_tests/authentication_test.py @@ -0,0 +1,62 @@ +import os +import unittest + +import pytest + +from hazelcast.errors import HazelcastError +from tests.integration.asyncio.base import HazelcastTestCase +from tests.util import get_abs_path, compare_client_version +from hazelcast.asyncio.client import HazelcastClient + +try: + from hazelcast.security import BasicTokenProvider +except ImportError: + pass + + +@pytest.mark.enterprise +@unittest.skipIf( + compare_client_version("4.2.1") < 0, "Tests the features added in 4.2.1 version of the client" +) +class AuthenticationTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): + current_directory = os.path.dirname(__file__) + rc = None + hazelcast_token_xml = get_abs_path(current_directory, "hazelcast-token.xml") + hazelcast_userpass_xml = get_abs_path(current_directory, "hazelcast-user-pass.xml") + + def setUp(self): + self.rc = self.create_rc() + + def tearDown(self): + self.rc.exit() + + async def test_no_auth(self): + cluster = self.create_cluster(self.rc, self.configure_cluster(self.hazelcast_userpass_xml)) + cluster.start_member() + + with self.assertRaises(HazelcastError): + await HazelcastClient.create_and_start(cluster_name=cluster.id, cluster_connect_timeout=2) + + async def test_token_auth(self): + cluster = self.create_cluster(self.rc, self.configure_cluster(self.hazelcast_token_xml)) + cluster.start_member() + + token_provider = BasicTokenProvider("Hazelcast") + client = await HazelcastClient.create_and_start(cluster_name=cluster.id, token_provider=token_provider) + self.assertTrue(client.lifecycle_service.is_running()) + await client.shutdown() + + async def test_username_password_auth(self): + cluster = self.create_cluster(self.rc, self.configure_cluster(self.hazelcast_userpass_xml)) + cluster.start_member() + + client = await HazelcastClient.create_and_start( + cluster_name=cluster.id, creds_username="member1", creds_password="s3crEt" + ) + self.assertTrue(client.lifecycle_service.is_running()) + await client.shutdown() + + @classmethod + def configure_cluster(cls, filename): + with open(filename, "r") as f: + return f.read() diff --git a/tests/integration/asyncio/authentication_tests/hazelcast-token.xml b/tests/integration/asyncio/authentication_tests/hazelcast-token.xml new file mode 100644 index 0000000000..196dff8bd2 --- /dev/null +++ b/tests/integration/asyncio/authentication_tests/hazelcast-token.xml @@ -0,0 +1,26 @@ + + + + + + create + destroy + put + read + + + + + + + + + Hazelcast + + + + + \ No newline at end of file diff --git a/tests/integration/asyncio/authentication_tests/hazelcast-user-pass.xml b/tests/integration/asyncio/authentication_tests/hazelcast-user-pass.xml new file mode 100644 index 0000000000..3f58bf9b2a --- /dev/null +++ b/tests/integration/asyncio/authentication_tests/hazelcast-user-pass.xml @@ -0,0 +1,25 @@ + + + + + + create + destroy + put + read + + + + + + + + + + + + + \ No newline at end of file diff --git a/tests/integration/asyncio/backup_acks_test.py b/tests/integration/asyncio/backup_acks_test.py new file mode 100644 index 0000000000..44d5662ae4 --- /dev/null +++ b/tests/integration/asyncio/backup_acks_test.py @@ -0,0 +1,94 @@ +import unittest + +from mock import MagicMock + +from hazelcast.asyncio.client import HazelcastClient +from hazelcast.errors import IndeterminateOperationStateError +from tests.base import HazelcastTestCase + + +class BackupAcksTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): + @classmethod + def setUpClass(cls): + cls.rc = cls.create_rc() + cls.cluster = cls.rc.createCluster(None, None) + cls.rc.startMember(cls.cluster.id) + cls.rc.startMember(cls.cluster.id) + + @classmethod + def tearDownClass(cls): + cls.rc.terminateCluster(cls.cluster.id) + cls.rc.exit() + + def setUp(self): + self.client = None + + async def asyncTearDown(self): + if self.client: + await self.client.shutdown() + + async def test_smart_mode(self): + self.client = await HazelcastClient.create_and_start( + cluster_name=self.cluster.id, + fail_on_indeterminate_operation_state=True, + ) + m = await self.client.get_map("test") + # TODO: Remove the next line once + # https://github.com/hazelcast/hazelcast/issues/9398 is fixed + await m.get(1) + # it's enough for this operation to succeed + await m.set(1, 2) + + async def test_lost_backups_on_smart_mode_with_fail_on_indeterminate_operation_state(self): + self.client = await HazelcastClient.create_and_start( + cluster_name=self.cluster.id, + operation_backup_timeout=0.3, + fail_on_indeterminate_operation_state=True, + ) + client = self.client + # replace backup ack handler with a mock to emulate backup acks loss + client._invocation_service._backup_event_handler = MagicMock() + m = await client.get_map("test") + with self.assertRaises(IndeterminateOperationStateError): + await m.set(1, 2) + + async def test_lost_backups_on_smart_mode_without_fail_on_indeterminate_operation_state(self): + self.client = await HazelcastClient.create_and_start( + cluster_name=self.cluster.id, + operation_backup_timeout=0.3, + fail_on_indeterminate_operation_state=False, + ) + client = self.client + # replace backup ack handler with a mock to emulate backup acks loss + client._invocation_service._backup_event_handler = MagicMock() + m = await client.get_map("test") + # it's enough for this operation to succeed + await m.set(1, 2) + + async def test_backup_acks_disabled(self): + self.client = await HazelcastClient.create_and_start( + cluster_name=self.cluster.id, + backup_ack_to_client_enabled=False, + ) + m = await self.client.get_map("test") + # it's enough for this operation to succeed + await m.set(1, 2) + + async def test_unisocket_mode(self): + self.client = await HazelcastClient.create_and_start( + cluster_name=self.cluster.id, + smart_routing=False, + ) + m = await self.client.get_map("test") + # it's enough for this operation to succeed + await m.set(1, 2) + + async def test_unisocket_mode_with_disabled_backup_acks(self): + self.client = await HazelcastClient.create_and_start( + cluster_name=self.cluster.id, + smart_routing=False, + backup_ack_to_client_enabled=False, + ) + m = await self.client.get_map("test") + # it's enough for this operation to succeed + await m.set(1, 2) diff --git a/tests/integration/asyncio/base.py b/tests/integration/asyncio/base.py new file mode 100644 index 0000000000..b6573bfede --- /dev/null +++ b/tests/integration/asyncio/base.py @@ -0,0 +1,120 @@ +import asyncio +import logging +import unittest + +from hazelcast.asyncio.client import HazelcastClient + +from tests.base import _Cluster +from tests.hzrc.client import HzRemoteController +from tests.util import get_current_timestamp + + +class HazelcastTestCase(unittest.TestCase): + clients = [] + + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + self.logger = logging.getLogger(methodName) + + @staticmethod + def create_rc(): + return HzRemoteController("127.0.0.1", 9701) + + @classmethod + def create_cluster(cls, rc, config=None): + return _Cluster(rc, rc.createCluster(None, config)) + + @classmethod + def create_cluster_keep_cluster_name(cls, rc, config=None): + return _Cluster(rc, rc.createClusterKeepClusterName(None, config)) + + async def create_client(self, config=None): + client = await HazelcastClient.create_and_start(**config) + self.clients.append(client) + return client + + async def shutdown_all_clients(self): + async with asyncio.TaskGroup() as tg: + for c in self.clients: + await c.shutdown() + self.clients = [] + + async def assertTrueEventually(self, assertion, timeout=30): + timeout_time = get_current_timestamp() + timeout + last_exception = None + while get_current_timestamp() < timeout_time: + try: + assertion() + return + except AssertionError as e: + last_exception = e + await asyncio.sleep(0.1) + if last_exception is None: + raise Exception("Could not enter the assertion loop!") + raise last_exception + + async def assertSetEventually(self, event: asyncio.Event, timeout=5): + is_set = asyncio.wait_for(event.wait(), timeout=timeout) + self.assertTrue(is_set, "Event was not set within %d seconds" % timeout) + + def assertEntryEvent( + self, + event, + event_type, + key=None, + value=None, + old_value=None, + merging_value=None, + number_of_affected_entries=1, + ): + + self.assertEqual(event.key, key) + self.assertEqual(event.event_type, event_type) + self.assertEqual(event.value, value) + self.assertEqual(event.merging_value, merging_value) + self.assertEqual(event.old_value, old_value) + self.assertEqual(event.number_of_affected_entries, number_of_affected_entries) + + def assertDistributedObjectEvent(self, event, name, service_name, event_type): + self.assertEqual(name, event.name) + self.assertEqual(service_name, event.service_name) + self.assertEqual(event_type, event.event_type) + + def set_logging_level(self, level): + logging.getLogger().setLevel(level) + + +class SingleMemberTestCase(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): + """ + Test cases where a single member - client combination is needed + """ + + rc = None + cluster = None + member = None + client = None + + @classmethod + def setUpClass(cls): + cls.rc = cls.create_rc() + cls.cluster = cls.create_cluster(cls.rc, cls.configure_cluster()) + cls.member = cls.cluster.start_member() + + @classmethod + def tearDownClass(cls): + cls.rc.terminateCluster(cls.cluster.id) + cls.rc.exit() + + @classmethod + def configure_client(cls, config): + return config + + @classmethod + def configure_cluster(cls): + return None + + async def asyncSetUp(self): + self.client = await HazelcastClient.create_and_start(**self.configure_client({})) + + async def asyncTearDown(self): + await self.client.shutdown() diff --git a/tests/integration/asyncio/client_test.py b/tests/integration/asyncio/client_test.py new file mode 100644 index 0000000000..db15c28140 --- /dev/null +++ b/tests/integration/asyncio/client_test.py @@ -0,0 +1,142 @@ +import unittest + +from tests.integration.asyncio.base import HazelcastTestCase, SingleMemberTestCase +from hazelcast.asyncio.client import HazelcastClient +from tests.hzrc.ttypes import Lang +from tests.util import compare_client_version, random_string + +try: + from hazelcast.config import Config + from hazelcast.errors import InvalidConfigurationError +except ImportError: + # For backward compatibility tests + pass + + +class ClientLabelsTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): + + @classmethod + def setUpClass(cls): + cls.rc = cls.create_rc() + cls.cluster = cls.create_cluster(cls.rc) + cls.cluster.start_member() + + @classmethod + def tearDownClass(cls): + cls.rc.terminateCluster(cls.cluster.id) + cls.rc.exit() + + async def asyncTearDown(self): + await self.shutdown_all_clients() + + async def test_default_config(self): + client = await self.create_client({"cluster_name": self.cluster.id}) + self.assertIsNone(self.get_labels_from_member(client._connection_manager.client_uuid)) + + async def test_provided_labels_are_received(self): + client = await self.create_client( + { + "cluster_name": self.cluster.id, + "labels": [ + "test-label", + ], + } + ) + self.assertEqual( + b"test-label", self.get_labels_from_member(client._connection_manager.client_uuid) + ) + + def get_labels_from_member(self, client_uuid): + script = """ + var clients = instance_0.getClientService().getConnectedClients().toArray(); + for (i=0; i < clients.length; i++) { + var client = clients[i]; + if ("%s".equals(client.getUuid().toString())) { + result = client.getLabels().iterator().next(); + break; + } + }""" % str( + client_uuid + ) + return self.rc.executeOnController(self.cluster.id, script, Lang.JAVASCRIPT).result + + +@unittest.skipIf( + compare_client_version("4.2.2") < 0 or compare_client_version("5.0") == 0, + "Tests the features added in 5.1 version of the client, " + "which are backported into 4.2.2 and 5.0.1", +) +class ClientTcpMetricsTest(SingleMemberTestCase): + @classmethod + def configure_client(cls, config): + config["cluster_name"] = cls.cluster.id + return config + + async def test_bytes_received(self): + reactor = self.client._reactor + bytes_received = reactor._bytes_received + self.assertGreater(bytes_received, 0) + m = await self.client.get_map(random_string()) + await m.get(random_string()) + self.assertGreater(reactor._bytes_received, bytes_received) + + async def test_bytes_sent(self): + reactor = self.client._reactor + bytes_sent = reactor._bytes_sent + self.assertGreater(bytes_sent, 0) + m = await self.client.get_map(random_string()) + m.set(random_string(), random_string()) + self.assertGreater(reactor._bytes_sent, bytes_sent) + + +@unittest.skipIf( + compare_client_version("5.2") < 0, + "Tests the features added in 5.2 version of the client", +) +class ClientConfigurationTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): + rc = None + cluster = None + + @classmethod + def setUpClass(cls): + cls.rc = cls.create_rc() + cls.cluster = cls.create_cluster(cls.rc, None) + cls.cluster.start_member() + + def setUp(self): + self.client = None + + async def asyncTearDown(self): + if self.client: + await self.client.shutdown() + + @classmethod + def tearDownClass(cls): + cls.rc.terminateCluster(cls.cluster.id) + cls.rc.exit() + + async def test_keyword_args_configuration(self): + self.client = await HazelcastClient.create_and_start( + cluster_name=self.cluster.id, + ) + self.assertTrue(self.client.lifecycle_service.is_running()) + + async def test_configuration_object(self): + config = Config() + config.cluster_name = self.cluster.id + self.client = await HazelcastClient.create_and_start(config) + self.assertTrue(self.client.lifecycle_service.is_running()) + + async def test_configuration_object_as_keyword_argument(self): + config = Config() + config.cluster_name = self.cluster.id + self.client = await HazelcastClient.create_and_start(config=config) + self.assertTrue(self.client.lifecycle_service.is_running()) + + async def test_ambiguous_configuration(self): + config = Config() + with self.assertRaisesRegex( + InvalidConfigurationError, + "Ambiguous client configuration is found", + ): + self.client = await HazelcastClient.create_and_start(config, cluster_name="a-cluster") diff --git a/tests/integration/asyncio/proxy/__init__.py b/tests/integration/asyncio/proxy/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/integration/asyncio/proxy/hazelcast.xml b/tests/integration/asyncio/proxy/hazelcast.xml new file mode 100644 index 0000000000..c036e17d50 --- /dev/null +++ b/tests/integration/asyncio/proxy/hazelcast.xml @@ -0,0 +1,47 @@ + + + false + + + 5701 + + + 0 + + + + + com.hazelcast.client.test.IdentifiedFactory + + com.hazelcast.client.test.IdentifiedDataSerializableFactory + + + + com.hazelcast.client.test.PortableFactory + + + + + + 6 + + + 10 + + + 10 + 180 + + diff --git a/tests/integration/asyncio/proxy/map_test.py b/tests/integration/asyncio/proxy/map_test.py new file mode 100644 index 0000000000..e7cf09ca39 --- /dev/null +++ b/tests/integration/asyncio/proxy/map_test.py @@ -0,0 +1,1121 @@ +import asyncio +import os +import time +import unittest + + +try: + from hazelcast.aggregator import ( + count, + distinct, + double_avg, + double_sum, + fixed_point_sum, + floating_point_sum, + int_avg, + int_sum, + long_avg, + long_sum, + max_, + max_by, + min_, + min_by, + number_avg, + ) +except ImportError: + # If the import of those fail, we won't use + # them in the tests thanks to client version check. + pass + +try: + from hazelcast.projection import ( + single_attribute, + multi_attribute, + identity, + ) +except ImportError: + # If the import of those fail, we won't use + # them in the tests thanks to client version check. + pass + +from hazelcast.core import HazelcastJsonValue +from hazelcast.config import IndexType, IntType +from hazelcast.errors import HazelcastError +from hazelcast.predicate import greater_or_equal, less_or_equal, sql, paging, true +from hazelcast.internal.asyncio_proxy.map import EntryEventType +from hazelcast.serialization.api import IdentifiedDataSerializable +from tests.integration.asyncio.base import SingleMemberTestCase +from tests.integration.backward_compatible.util import ( + read_string_from_input, + write_string_to_output, +) +from tests.util import ( + event_collector, + fill_map, + get_current_timestamp, + compare_client_version, + compare_server_version, + skip_if_client_version_older_than, + random_string, +) + + +class EntryProcessor(IdentifiedDataSerializable): + FACTORY_ID = 66 + CLASS_ID = 1 + + def __init__(self, value=None): + self.value = value + + def write_data(self, object_data_output): + write_string_to_output(object_data_output, self.value) + + def read_data(self, object_data_input): + self.value = read_string_from_input(object_data_input) + + def get_factory_id(self): + return self.FACTORY_ID + + def get_class_id(self): + return self.CLASS_ID + + +class MapGetInterceptor(IdentifiedDataSerializable): + + FACTORY_ID = 666 + CLASS_ID = 6 + + def __init__(self, prefix): + self.prefix = prefix + + def write_data(self, object_data_output): + write_string_to_output(object_data_output, self.prefix) + + def read_data(self, object_data_input): + pass + + def get_factory_id(self): + return self.FACTORY_ID + + def get_class_id(self): + return self.CLASS_ID + + +class MapTest(SingleMemberTestCase): + @classmethod + def configure_cluster(cls): + path = os.path.abspath(__file__) + dir_path = os.path.dirname(path) + with open(os.path.join(dir_path, "hazelcast.xml")) as f: + return f.read() + + @classmethod + def configure_client(cls, config): + config["cluster_name"] = cls.cluster.id + config["data_serializable_factories"] = { + EntryProcessor.FACTORY_ID: {EntryProcessor.CLASS_ID: EntryProcessor} + } + return config + + async def asyncSetUp(self): + await super().asyncSetUp() + self.map = await self.client.get_map(random_string()) + + async def asyncTearDown(self): + await self.map.destroy() + await super().asyncTearDown() + + async def test_add_entry_listener_item_added(self): + collector = event_collector() + await self.map.add_entry_listener(include_value=True, added_func=collector) + await self.map.put("key", "value") + + def assert_event(): + self.assertEqual(len(collector.events), 1) + event = collector.events[0] + self.assertEntryEvent(event, key="key", event_type=EntryEventType.ADDED, value="value") + + await self.assertTrueEventually(assert_event, 5) + + async def test_add_entry_listener_item_removed(self): + collector = event_collector() + await self.map.add_entry_listener(include_value=True, removed_func=collector) + await self.map.put("key", "value") + await self.map.remove("key") + + def assert_event(): + self.assertEqual(len(collector.events), 1) + event = collector.events[0] + self.assertEntryEvent( + event, key="key", event_type=EntryEventType.REMOVED, old_value="value" + ) + + await self.assertTrueEventually(assert_event, 5) + + async def test_add_entry_listener_item_updated(self): + collector = event_collector() + await self.map.add_entry_listener(include_value=True, updated_func=collector) + await self.map.put("key", "value") + await self.map.put("key", "new_value") + + def assert_event(): + self.assertEqual(len(collector.events), 1) + event = collector.events[0] + self.assertEntryEvent( + event, + key="key", + event_type=EntryEventType.UPDATED, + old_value="value", + value="new_value", + ) + + await self.assertTrueEventually(assert_event, 5) + + async def test_add_entry_listener_item_expired(self): + collector = event_collector() + await self.map.add_entry_listener(include_value=True, expired_func=collector) + await self.map.put("key", "value", ttl=0.1) + + def assert_event(): + self.assertEqual(len(collector.events), 1) + event = collector.events[0] + self.assertEntryEvent( + event, key="key", event_type=EntryEventType.EXPIRED, old_value="value" + ) + + await self.assertTrueEventually(assert_event, 10) + + async def test_add_entry_listener_with_key(self): + collector = event_collector() + await self.map.add_entry_listener(key="key1", include_value=True, added_func=collector) + await self.map.put("key2", "value2") + await self.map.put("key1", "value1") + + def assert_event(): + self.assertEqual(len(collector.events), 1) + event = collector.events[0] + self.assertEntryEvent( + event, key="key1", event_type=EntryEventType.ADDED, value="value1" + ) + + await self.assertTrueEventually(assert_event, 5) + + async def test_add_entry_listener_with_predicate(self): + collector = event_collector() + await self.map.add_entry_listener( + predicate=sql("this == value1"), include_value=True, added_func=collector + ) + await self.map.put("key2", "value2") + await self.map.put("key1", "value1") + + def assert_event(): + self.assertEqual(len(collector.events), 1) + event = collector.events[0] + self.assertEntryEvent( + event, key="key1", event_type=EntryEventType.ADDED, value="value1" + ) + + await self.assertTrueEventually(assert_event, 5) + + async def test_add_entry_listener_with_key_and_predicate(self): + collector = event_collector() + await self.map.add_entry_listener( + key="key1", predicate=sql("this == value3"), include_value=True, added_func=collector + ) + await self.map.put("key2", "value2") + await self.map.put("key1", "value1") + await self.map.remove("key1") + await self.map.put("key1", "value3") + + def assert_event(): + self.assertEqual(len(collector.events), 1) + event = collector.events[0] + self.assertEntryEvent( + event, key="key1", event_type=EntryEventType.ADDED, value="value3" + ) + + await self.assertTrueEventually(assert_event, 5) + + async def test_add_index(self): + await self.map.add_index(attributes=["this"]) + await self.map.add_index(attributes=["this"], index_type=IndexType.HASH) + await self.map.add_index( + attributes=["this"], + index_type=IndexType.BITMAP, + bitmap_index_options={ + "unique_key": "this", + }, + ) + + async def test_add_index_duplicate_fields(self): + with self.assertRaises(ValueError): + await self.map.add_index(attributes=["this", "this"]) + + async def test_add_index_invalid_attribute(self): + with self.assertRaises(ValueError): + await self.map.add_index(attributes=["this.x."]) + + async def test_clear(self): + await self.fill_map() + await self.map.clear() + self.assertEqual(await self.map.size(), 0) + + async def test_contains_key(self): + await self.fill_map() + self.assertTrue(await self.map.contains_key("key-1")) + self.assertFalse(await self.map.contains_key("key-10")) + + async def test_contains_value(self): + await self.fill_map() + self.assertTrue(await self.map.contains_value("value-1")) + self.assertFalse(await self.map.contains_value("value-10")) + + async def test_delete(self): + await self.fill_map() + await self.map.delete("key-1") + self.assertEqual(await self.map.size(), 9) + self.assertFalse(await self.map.contains_key("key-1")) + + async def test_entry_set(self): + entries = await self.fill_map() + self.assertCountEqual(await self.map.entry_set(), list(entries.items())) + + async def test_entry_set_with_predicate(self): + await self.fill_map() + self.assertEqual(await self.map.entry_set(sql("this == 'value-1'")), [("key-1", "value-1")]) + + async def test_evict(self): + await self.fill_map() + await self.map.evict("key-1") + self.assertEqual(await self.map.size(), 9) + self.assertFalse(await self.map.contains_key("key-1")) + + async def test_evict_all(self): + await self.fill_map() + await self.map.evict_all() + self.assertEqual(await self.map.size(), 0) + + async def test_execute_on_entries(self): + m = await self.fill_map() + expected_entry_set = [(key, "processed") for key in m] + values = await self.map.execute_on_entries(EntryProcessor("processed")) + + self.assertCountEqual(expected_entry_set, await self.map.entry_set()) + self.assertCountEqual(expected_entry_set, values) + + async def test_execute_on_entries_with_predicate(self): + m = await self.fill_map() + expected_entry_set = [(key, "processed") if key < "key-5" else (key, m[key]) for key in m] + expected_values = [(key, "processed") for key in m if key < "key-5"] + values = await self.map.execute_on_entries(EntryProcessor("processed"), sql("__key < 'key-5'")) + self.assertCountEqual(expected_entry_set, await self.map.entry_set()) + self.assertCountEqual(expected_values, values) + + async def test_execute_on_key(self): + await self.map.put("test-key", "test-value") + value = await self.map.execute_on_key("test-key", EntryProcessor("processed")) + self.assertEqual("processed", await self.map.get("test-key")) + self.assertEqual("processed", value) + + async def test_execute_on_keys(self): + m = await self.fill_map() + expected_entry_set = [(key, "processed") for key in m] + values = await self.map.execute_on_keys(list(m.keys()), EntryProcessor("processed")) + self.assertCountEqual(expected_entry_set, await self.map.entry_set()) + self.assertCountEqual(expected_entry_set, values) + + async def test_execute_on_keys_with_empty_key_list(self): + m = await self.fill_map() + expected_entry_set = [(key, m[key]) for key in m] + values = await self.map.execute_on_keys([], EntryProcessor("processed")) + self.assertEqual([], values) + self.assertCountEqual(expected_entry_set, await self.map.entry_set()) + + async def test_flush(self): + await self.fill_map() + await self.map.flush() + + @unittest.skip + async def test_force_unlock(self): + async def force_unlock(): + await self.map.force_unlock("key") + + await self.map.put("key", "value") + await self.map.lock("key") + task = asyncio.create_task(force_unlock()) + + async def assertion(): + self.assertFalse(await self.map.is_locked("key")) + + await task + await self.assertTrueEventually(assertion) + + async def test_get_all(self): + expected = await self.fill_map(1000) + actual = await self.map.get_all(list(expected.keys())) + self.assertCountEqual(expected, actual) + + async def test_get_all_when_no_keys(self): + self.assertEqual(await self.map.get_all([]), {}) + + async def test_get_entry_view(self): + await self.map.put("key", "value") + await self.map.get("key") + await self.map.put("key", "new_value") + + entry_view = await self.map.get_entry_view("key") + + self.assertEqual(entry_view.key, "key") + self.assertEqual(entry_view.value, "new_value") + self.assertIsNotNone(entry_view.cost) + self.assertIsNotNone(entry_view.creation_time) + self.assertIsNotNone(entry_view.expiration_time) + if compare_server_version(self.client, "4.2") < 0: + self.assertEqual(entry_view.hits, 2) + else: + # 4.2+ servers do not collect per entry stats by default + self.assertIsNotNone(entry_view.hits) + self.assertIsNotNone(entry_view.last_access_time) + self.assertIsNotNone(entry_view.last_stored_time) + self.assertIsNotNone(entry_view.last_update_time) + self.assertEqual(entry_view.version, 1) + self.assertIsNotNone(entry_view.ttl) + self.assertIsNotNone(entry_view.max_idle) + + async def test_is_empty(self): + await self.map.put("key", "value") + self.assertFalse(await self.map.is_empty()) + await self.map.clear() + self.assertTrue(await self.map.is_empty()) + + async def test_is_locked(self): + await self.map.put("key", "value") + self.assertFalse(await self.map.is_locked("key")) + await self.map.lock("key") + self.assertTrue(await self.map.is_locked("key")) + await self.map.unlock("key") + self.assertFalse(await self.map.is_locked("key")) + + async def test_key_set(self): + keys = list((await self.fill_map()).keys()) + self.assertCountEqual(await self.map.key_set(), keys) + + async def test_key_set_with_predicate(self): + await self.fill_map() + self.assertEqual(await self.map.key_set(sql("this == 'value-1'")), ["key-1"]) + + @unittest.skip + async def test_lock(self): + await self.map.put("key", "value") + + t = self.start_new_thread(lambda: self.map.lock("key")) + t.join() + + self.assertFalse(await self.map.try_put("key", "new_value", timeout=0.01)) + + async def test_put_all(self): + m = {"key-%d" % x: "value-%d" % x for x in range(0, 1000)} + await self.map.put_all(m) + + entries = await self.map.entry_set() + + self.assertCountEqual(entries, m.items()) + + async def test_put_all_when_no_keys(self): + self.assertIsNone(await self.map.put_all({})) + + async def test_put_if_absent_when_missing_value(self): + returned_value = await self.map.put_if_absent("key", "new_value") + + self.assertIsNone(returned_value) + self.assertEqual(await self.map.get("key"), "new_value") + + async def test_put_if_absent_when_existing_value(self): + await self.map.put("key", "value") + returned_value = await self.map.put_if_absent("key", "new_value") + self.assertEqual(returned_value, "value") + self.assertEqual(await self.map.get("key"), "value") + + async def test_put_get(self): + self.assertIsNone(await self.map.put("key", "value")) + self.assertEqual(await self.map.get("key"), "value") + + async def test_put_get_large_payload(self): + # The fix for reading large payloads is introduced in 4.2.1 + # See https://github.com/hazelcast/hazelcast-python-client/pull/436 + skip_if_client_version_older_than(self, "4.2.1") + + payload = bytearray(os.urandom(16 * 1024 * 1024)) + start = get_current_timestamp() + self.assertIsNone(await self.map.put("key", payload)) + self.assertEqual(await self.map.get("key"), payload) + self.assertLessEqual(get_current_timestamp() - start, 5) + + async def test_put_get2(self): + val = "x" * 5000 + self.assertIsNone(await self.map.put("key-x", val)) + self.assertEqual(await self.map.get("key-x"), val) + + async def test_put_when_existing(self): + await self.map.put("key", "value") + self.assertEqual(await self.map.put("key", "new_value"), "value") + self.assertEqual(await self.map.get("key"), "new_value") + + async def test_put_transient(self): + await self.map.put_transient("key", "value") + self.assertEqual(await self.map.get("key"), "value") + + async def test_remove(self): + await self.map.put("key", "value") + removed = await self.map.remove("key") + self.assertEqual(removed, "value") + self.assertEqual(0, await self.map.size()) + self.assertFalse(await self.map.contains_key("key")) + + async def test_remove_all_with_none_predicate(self): + skip_if_client_version_older_than(self, "5.2.0") + + with self.assertRaises(AssertionError): + await self.map.remove_all(None) + + async def test_remove_all(self): + skip_if_client_version_older_than(self, "5.2.0") + + await self.fill_map() + await self.map.remove_all(predicate=sql("__key > 'key-7'")) + self.assertEqual(await self.map.size(), 8) + + async def test_remove_if_same_when_same(self): + await self.map.put("key", "value") + self.assertTrue(await self.map.remove_if_same("key", "value")) + self.assertFalse(await self.map.contains_key("key")) + + async def test_remove_if_same_when_different(self): + await self.map.put("key", "value") + self.assertFalse(await self.map.remove_if_same("key", "another_value")) + self.assertTrue(await self.map.contains_key("key")) + + async def test_remove_entry_listener(self): + collector = event_collector() + reg_id = await self.map.add_entry_listener(added_func=collector) + + await self.map.put("key", "value") + await self.assertTrueEventually(lambda: self.assertEqual(len(collector.events), 1)) + await self.map.remove_entry_listener(reg_id) + await self.map.put("key2", "value") + + await asyncio.sleep(1) + self.assertEqual(len(collector.events), 1) + + async def test_remove_entry_listener_with_none_id(self): + with self.assertRaises(AssertionError) as cm: + await self.map.remove_entry_listener(None) + e = cm.exception + self.assertEqual(e.args[0], "None user_registration_id is not allowed!") + + async def test_replace(self): + await self.map.put("key", "value") + replaced = await self.map.replace("key", "new_value") + self.assertEqual(replaced, "value") + self.assertEqual(await self.map.get("key"), "new_value") + + async def test_replace_if_same_when_same(self): + await self.map.put("key", "value") + self.assertTrue(await self.map.replace_if_same("key", "value", "new_value")) + self.assertEqual(await self.map.get("key"), "new_value") + + async def test_replace_if_same_when_different(self): + await self.map.put("key", "value") + self.assertFalse(await self.map.replace_if_same("key", "another_value", "new_value")) + self.assertEqual(await self.map.get("key"), "value") + + async def test_set(self): + await self.map.set("key", "value") + + self.assertEqual(await self.map.get("key"), "value") + + async def test_set_ttl(self): + await self.map.put("key", "value") + await self.map.set_ttl("key", 0.1) + + async def evicted(): + self.assertFalse(await self.map.contains_key("key")) + + await self.assertTrueEventually(evicted, 5) + + async def test_size(self): + await self.fill_map() + + self.assertEqual(10, await self.map.size()) + + async def test_try_lock_when_unlocked(self): + self.assertTrue(await self.map.try_lock("key")) + self.assertTrue(await self.map.is_locked("key")) + + @unittest.skip + async def test_try_lock_when_locked(self): + t = self.start_new_thread(lambda: self.map.lock("key")) + t.join() + self.assertFalse(await self.map.try_lock("key", timeout=0.1)) + + async def test_try_put_when_unlocked(self): + self.assertTrue(await self.map.try_put("key", "value")) + self.assertEqual(await self.map.get("key"), "value") + + @unittest.skip + async def test_try_put_when_locked(self): + t = self.start_new_thread(lambda: self.map.lock("key")) + t.join() + self.assertFalse(await self.map.try_put("key", "value", timeout=0.1)) + + @unittest.skip + async def test_try_remove_when_unlocked(self): + await self.map.put("key", "value") + self.assertTrue(await self.map.try_remove("key")) + self.assertIsNone(await self.map.get("key")) + + @unittest.skip + async def test_try_remove_when_locked(self): + await self.map.put("key", "value") + t = self.start_new_thread(lambda: self.map.lock("key")) + t.join() + self.assertFalse(await self.map.try_remove("key", timeout=0.1)) + + @unittest.skip + async def test_unlock(self): + await self.map.lock("key") + self.assertTrue(await self.map.is_locked("key")) + await self.map.unlock("key") + self.assertFalse(await self.map.is_locked("key")) + + @unittest.skip + async def test_unlock_when_no_lock(self): + with self.assertRaises(HazelcastError): + await self.map.unlock("key") + + async def test_values(self): + values = list((await self.fill_map()).values()) + + self.assertCountEqual(list(await self.map.values()), values) + + async def test_values_with_predicate(self): + await self.fill_map() + self.assertEqual(await self.map.values(sql("this == 'value-1'")), ["value-1"]) + + def test_str(self): + self.assertTrue(str(self.map).startswith("Map")) + + async def test_add_interceptor(self): + interceptor = MapGetInterceptor(":") + registration_id = await self.map.add_interceptor(interceptor) + self.assertIsNotNone(registration_id) + + await self.map.set(1, ")") + value = await self.map.get(1) + self.assertEqual(":)", value) + + async def test_remove_interceptor(self): + skip_if_client_version_older_than(self, "5.0") + + interceptor = MapGetInterceptor(":") + registration_id = await self.map.add_interceptor(interceptor) + self.assertIsNotNone(registration_id) + self.assertTrue(await self.map.remove_interceptor(registration_id)) + + # Unknown registration id should return False + self.assertFalse(await self.map.remove_interceptor(registration_id)) + + # Make sure that the interceptor is indeed removed + await self.map.set(1, ")") + value = await self.map.get(1) + self.assertEqual(")", value) + + async def fill_map(self, count=10): + m = {"key-%d" % x: "value-%d" % x for x in range(0, count)} + await self.map.put_all(m) + return m + + +class MapStoreTest(SingleMemberTestCase): + @classmethod + def configure_client(cls, config): + config["cluster_name"] = cls.cluster.id + return config + + @classmethod + def configure_cluster(cls): + path = os.path.abspath(__file__) + dir_path = os.path.dirname(path) + with open(os.path.join(dir_path, "hazelcast_mapstore.xml")) as f: + return f.read() + + def setUp(self): + self.map = self.client.get_map("mapstore-test").blocking() + self.entries = fill_map(self.map, size=10, key_prefix="key", value_prefix="val") + + def tearDown(self): + self.map.destroy() + + def test_load_all_with_no_args_loads_all_keys(self): + self.map.evict_all() + self.map.load_all() + entry_set = self.map.get_all(self.entries.keys()) + self.assertCountEqual(entry_set, self.entries) + + def test_load_all_with_key_set_loads_given_keys(self): + self.map.evict_all() + self.map.load_all(["key0", "key1"]) + entry_set = self.map.get_all(["key0", "key1"]) + self.assertCountEqual(entry_set, {"key0": "val0", "key1": "val1"}) + + def test_load_all_overrides_entries_in_memory_by_default(self): + self.map.evict_all() + self.map.put_transient("key0", "new0") + self.map.put_transient("key1", "new1") + self.map.load_all(["key0", "key1"]) + entry_set = self.map.get_all(["key0", "key1"]) + self.assertCountEqual(entry_set, {"key0": "val0", "key1": "val1"}) + + def test_load_all_with_replace_existing_false_does_not_override(self): + self.map.evict_all() + self.map.put_transient("key0", "new0") + self.map.put_transient("key1", "new1") + self.map.load_all(["key0", "key1"], replace_existing_values=False) + entry_set = self.map.get_all(["key0", "key1"]) + self.assertCountEqual(entry_set, {"key0": "new0", "key1": "new1"}) + + def test_evict(self): + self.map.evict("key0") + self.assertEqual(self.map.size(), 9) + + def test_evict_non_existing_key(self): + self.map.evict("non_existing_key") + self.assertEqual(self.map.size(), 10) + + def test_evict_all(self): + self.map.evict_all() + self.assertEqual(self.map.size(), 0) + + def test_add_entry_listener_item_loaded(self): + collector = event_collector() + self.map.add_entry_listener(include_value=True, loaded_func=collector) + self.map.put("key", "value", ttl=0.1) + time.sleep(2) + self.map.get("key") + + def assert_event(): + self.assertEqual(len(collector.events), 1) + event = collector.events[0] + self.assertEntryEvent(event, key="key", value="value", event_type=EntryEventType.LOADED) + + self.assertTrueEventually(assert_event, 10) + + +class MapTTLTest(SingleMemberTestCase): + @classmethod + def configure_client(cls, config): + config["cluster_name"] = cls.cluster.id + return config + + def setUp(self): + self.map = self.client.get_map(random_string()).blocking() + + def tearDown(self): + self.map.destroy() + + def test_put_default_ttl(self): + self.map.put("key", "value") + time.sleep(1.0) + self.assertTrue(self.map.contains_key("key")) + + def test_put(self): + self.map.put("key", "value", 0.1) + self.assertTrueEventually(lambda: self.assertFalse(self.map.contains_key("key"))) + + def test_put_transient_default_ttl(self): + self.map.put_transient("key", "value") + time.sleep(1.0) + self.assertTrue(self.map.contains_key("key")) + + def test_put_transient(self): + self.map.put_transient("key", "value", 0.1) + self.assertTrueEventually(lambda: self.assertFalse(self.map.contains_key("key"))) + + def test_put_if_absent_ttl(self): + self.map.put_if_absent("key", "value") + time.sleep(1.0) + self.assertTrue(self.map.contains_key("key")) + + def test_put_if_absent(self): + self.map.put_if_absent("key", "value", 0.1) + self.assertTrueEventually(lambda: self.assertFalse(self.map.contains_key("key"))) + + def test_set_default_ttl(self): + self.map.set("key", "value") + time.sleep(1.0) + self.assertTrue(self.map.contains_key("key")) + + def test_set(self): + self.map.set("key", "value", 0.1) + self.assertTrueEventually(lambda: self.assertFalse(self.map.contains_key("key"))) + + +class MapMaxIdleTest(SingleMemberTestCase): + @classmethod + def configure_client(cls, config): + config["cluster_name"] = cls.cluster.id + return config + + def setUp(self): + self.map = self.client.get_map(random_string()).blocking() + + def tearDown(self): + self.map.destroy() + + def test_put_default_max_idle(self): + self.map.put("key", "value") + time.sleep(1.0) + self.assertTrue(self.map.contains_key("key")) + + def test_put(self): + self.map.put("key", "value", max_idle=0.1) + time.sleep(1.0) + self.assertFalse(self.map.contains_key("key")) + + def test_put_transient_default_max_idle(self): + self.map.put_transient("key", "value") + time.sleep(1.0) + self.assertTrue(self.map.contains_key("key")) + + def test_put_transient(self): + self.map.put_transient("key", "value", max_idle=0.1) + time.sleep(1.0) + self.assertFalse(self.map.contains_key("key")) + + def test_put_if_absent_max_idle(self): + self.map.put_if_absent("key", "value") + time.sleep(1.0) + self.assertTrue(self.map.contains_key("key")) + + def test_put_if_absent(self): + self.map.put_if_absent("key", "value", max_idle=0.1) + time.sleep(1.0) + self.assertFalse(self.map.contains_key("key")) + + def test_set_default_ttl(self): + self.map.set("key", "value") + time.sleep(1.0) + self.assertTrue(self.map.contains_key("key")) + + def test_set(self): + self.map.set("key", "value", max_idle=0.1) + time.sleep(1.0) + self.assertFalse(self.map.contains_key("key")) + + +@unittest.skipIf( + compare_client_version("4.2.1") < 0, "Tests the features added in 4.2.1 version of the client" +) +class MapAggregatorsIntTest(SingleMemberTestCase): + @classmethod + def configure_client(cls, config): + config["cluster_name"] = cls.cluster.id + config["default_int_type"] = IntType.INT + return config + + def setUp(self): + self.map = self.client.get_map(random_string()).blocking() + self.map.put_all({"key-%d" % i: i for i in range(50)}) + + def tearDown(self): + self.map.destroy() + + def test_aggregate_with_none_aggregator(self): + with self.assertRaises(AssertionError): + self.map.aggregate(None) + + def test_aggregate_with_paging_predicate(self): + with self.assertRaises(AssertionError): + self.map.aggregate(int_avg("foo"), paging(true(), 10)) + + def test_int_average(self): + average = self.map.aggregate(int_avg()) + self.assertEqual(24.5, average) + + def test_int_average_with_attribute_path(self): + average = self.map.aggregate(int_avg("this")) + self.assertEqual(24.5, average) + + def test_int_average_with_predicate(self): + average = self.map.aggregate(int_avg(), greater_or_equal("this", 47)) + self.assertEqual(48, average) + + def test_int_sum(self): + sum_ = self.map.aggregate(int_sum()) + self.assertEqual(1225, sum_) + + def test_int_sum_with_attribute_path(self): + sum_ = self.map.aggregate(int_sum("this")) + self.assertEqual(1225, sum_) + + def test_int_sum_with_predicate(self): + sum_ = self.map.aggregate(int_sum(), greater_or_equal("this", 47)) + self.assertEqual(144, sum_) + + def test_fixed_point_sum(self): + sum_ = self.map.aggregate(fixed_point_sum()) + self.assertEqual(1225, sum_) + + def test_fixed_point_sum_with_attribute_path(self): + sum_ = self.map.aggregate(fixed_point_sum("this")) + self.assertEqual(1225, sum_) + + def test_fixed_point_sum_with_predicate(self): + sum_ = self.map.aggregate(fixed_point_sum(), greater_or_equal("this", 47)) + self.assertEqual(144, sum_) + + def test_distinct(self): + self._fill_with_duplicate_values() + distinct_values = self.map.aggregate(distinct()) + self.assertEqual(set(range(50)), distinct_values) + + def test_distinct_with_attribute_path(self): + self._fill_with_duplicate_values() + distinct_values = self.map.aggregate(distinct("this")) + self.assertEqual(set(range(50)), distinct_values) + + def test_distinct_with_predicate(self): + self._fill_with_duplicate_values() + distinct_values = self.map.aggregate(distinct(), greater_or_equal("this", 10)) + self.assertEqual(set(range(10, 50)), distinct_values) + + def test_max_by(self): + max_item = self.map.aggregate(max_by("this")) + self.assertEqual("key-49", max_item.key) + self.assertEqual(49, max_item.value) + + def test_max_by_with_predicate(self): + max_item = self.map.aggregate(max_by("this"), less_or_equal("this", 10)) + self.assertEqual("key-10", max_item.key) + self.assertEqual(10, max_item.value) + + def test_min_by(self): + min_item = self.map.aggregate(min_by("this")) + self.assertEqual("key-0", min_item.key) + self.assertEqual(0, min_item.value) + + def test_min_by_with_predicate(self): + min_item = self.map.aggregate(min_by("this"), greater_or_equal("this", 10)) + self.assertEqual("key-10", min_item.key) + self.assertEqual(10, min_item.value) + + def _fill_with_duplicate_values(self): + # Map is initially filled with key-i: i mappings from [0, 50). + # Add more values with different keys but the same values to + # test the behaviour of the distinct aggregator. + self.map.put_all({"different-key-%d" % i: i for i in range(50)}) + + +@unittest.skipIf( + compare_client_version("4.2.1") < 0, "Tests the features added in 4.2.1 version of the client" +) +class MapAggregatorsLongTest(SingleMemberTestCase): + @classmethod + def configure_client(cls, config): + config["cluster_name"] = cls.cluster.id + config["default_int_type"] = IntType.LONG + return config + + def setUp(self): + self.map = self.client.get_map(random_string()).blocking() + self.map.put_all({"key-%d" % i: i for i in range(50)}) + + def tearDown(self): + self.map.destroy() + + def test_long_average(self): + average = self.map.aggregate(long_avg()) + self.assertEqual(24.5, average) + + def test_long_average_with_attribute_path(self): + average = self.map.aggregate(long_avg("this")) + self.assertEqual(24.5, average) + + def test_long_average_with_predicate(self): + average = self.map.aggregate(long_avg(), greater_or_equal("this", 47)) + self.assertEqual(48, average) + + def test_long_sum(self): + sum_ = self.map.aggregate(long_sum()) + self.assertEqual(1225, sum_) + + def test_long_sum_with_attribute_path(self): + sum_ = self.map.aggregate(long_sum("this")) + self.assertEqual(1225, sum_) + + def test_long_sum_with_predicate(self): + sum_ = self.map.aggregate(long_sum(), greater_or_equal("this", 47)) + self.assertEqual(144, sum_) + + +@unittest.skipIf( + compare_client_version("4.2.1") < 0, "Tests the features added in 4.2.1 version of the client" +) +class MapAggregatorsDoubleTest(SingleMemberTestCase): + @classmethod + def configure_client(cls, config): + config["cluster_name"] = cls.cluster.id + return config + + def setUp(self): + self.map = self.client.get_map(random_string()).blocking() + self.map.put_all({"key-%d" % i: float(i) for i in range(50)}) + + def tearDown(self): + self.map.destroy() + + def test_count(self): + count_ = self.map.aggregate(count()) + self.assertEqual(50, count_) + + def test_count_with_attribute_path(self): + count_ = self.map.aggregate(count("this")) + self.assertEqual(50, count_) + + def test_count_with_predicate(self): + count_ = self.map.aggregate(count(), greater_or_equal("this", 1)) + self.assertEqual(49, count_) + + def test_double_average(self): + average = self.map.aggregate(double_avg()) + self.assertEqual(24.5, average) + + def test_double_average_with_attribute_path(self): + average = self.map.aggregate(double_avg("this")) + self.assertEqual(24.5, average) + + def test_double_average_with_predicate(self): + average = self.map.aggregate(double_avg(), greater_or_equal("this", 47)) + self.assertEqual(48, average) + + def test_double_sum(self): + sum_ = self.map.aggregate(double_sum()) + self.assertEqual(1225, sum_) + + def test_double_sum_with_attribute_path(self): + sum_ = self.map.aggregate(double_sum("this")) + self.assertEqual(1225, sum_) + + def test_double_sum_with_predicate(self): + sum_ = self.map.aggregate(double_sum(), greater_or_equal("this", 47)) + self.assertEqual(144, sum_) + + def test_floating_point_sum(self): + sum_ = self.map.aggregate(floating_point_sum()) + self.assertEqual(1225, sum_) + + def test_floating_point_sum_with_attribute_path(self): + sum_ = self.map.aggregate(floating_point_sum("this")) + self.assertEqual(1225, sum_) + + def test_floating_point_sum_with_predicate(self): + sum_ = self.map.aggregate(floating_point_sum(), greater_or_equal("this", 47)) + self.assertEqual(144, sum_) + + def test_number_avg(self): + average = self.map.aggregate(number_avg()) + self.assertEqual(24.5, average) + + def test_number_avg_with_attribute_path(self): + average = self.map.aggregate(number_avg("this")) + self.assertEqual(24.5, average) + + def test_number_avg_with_predicate(self): + average = self.map.aggregate(number_avg(), greater_or_equal("this", 47)) + self.assertEqual(48, average) + + def test_max(self): + average = self.map.aggregate(max_()) + self.assertEqual(49, average) + + def test_max_with_attribute_path(self): + average = self.map.aggregate(max_("this")) + self.assertEqual(49, average) + + def test_max_with_predicate(self): + average = self.map.aggregate(max_(), less_or_equal("this", 3)) + self.assertEqual(3, average) + + def test_min(self): + average = self.map.aggregate(min_()) + self.assertEqual(0, average) + + def test_min_with_attribute_path(self): + average = self.map.aggregate(min_("this")) + self.assertEqual(0, average) + + def test_min_with_predicate(self): + average = self.map.aggregate(min_(), greater_or_equal("this", 3)) + self.assertEqual(3, average) + + +@unittest.skipIf( + compare_client_version("4.2.1") < 0, "Tests the features added in 4.2.1 version of the client" +) +class MapProjectionsTest(SingleMemberTestCase): + @classmethod + def configure_client(cls, config): + config["cluster_name"] = cls.cluster.id + return config + + def setUp(self): + self.map = self.client.get_map(random_string()).blocking() + self.map.put(1, HazelcastJsonValue('{"attr1": 1, "attr2": 2, "attr3": 3}')) + self.map.put(2, HazelcastJsonValue('{"attr1": 4, "attr2": 5, "attr3": 6}')) + + def tearDown(self): + self.map.destroy() + + def test_project_with_none_projection(self): + with self.assertRaises(AssertionError): + self.map.project(None) + + def test_project_with_paging_predicate(self): + with self.assertRaises(AssertionError): + self.map.project(single_attribute("foo"), paging(true(), 10)) + + def test_single_attribute(self): + attributes = self.map.project(single_attribute("attr1")) + self.assertCountEqual([1, 4], attributes) + + def test_single_attribute_with_predicate(self): + attributes = self.map.project(single_attribute("attr1"), greater_or_equal("attr1", 4)) + self.assertCountEqual([4], attributes) + + def test_multi_attribute(self): + attributes = self.map.project(multi_attribute("attr1", "attr2")) + self.assertCountEqual([[1, 2], [4, 5]], attributes) + + def test_multi_attribute_with_predicate(self): + attributes = self.map.project( + multi_attribute("attr1", "attr2"), + greater_or_equal("attr2", 3), + ) + self.assertCountEqual([[4, 5]], attributes) + + def test_identity(self): + attributes = self.map.project(identity()) + self.assertCountEqual( + [ + HazelcastJsonValue('{"attr1": 4, "attr2": 5, "attr3": 6}'), + HazelcastJsonValue('{"attr1": 1, "attr2": 2, "attr3": 3}'), + ], + [attribute.value for attribute in attributes], + ) + + def test_identity_with_predicate(self): + attributes = self.map.project(identity(), greater_or_equal("attr2", 3)) + self.assertCountEqual( + [HazelcastJsonValue('{"attr1": 4, "attr2": 5, "attr3": 6}')], + [attribute.value for attribute in attributes], + ) \ No newline at end of file From 162fd17e590d893b102d3c508114617526a2e0b7 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Fri, 19 Sep 2025 19:43:25 +0300 Subject: [PATCH 02/51] Updates --- hazelcast/internal/asyncio_proxy/manager.py | 36 -- hazelcast/proxy/__init__.py | 18 +- .../authentication_test.py | 4 +- .../authentication_tests/hazelcast-token.xml | 26 - .../hazelcast-user-pass.xml | 25 - tests/integration/asyncio/base.py | 7 +- tests/integration/asyncio/proxy/hazelcast.xml | 47 -- tests/integration/asyncio/proxy/map_test.py | 585 ++++++++---------- tests/util.py | 7 + 9 files changed, 294 insertions(+), 461 deletions(-) delete mode 100644 tests/integration/asyncio/authentication_tests/hazelcast-token.xml delete mode 100644 tests/integration/asyncio/authentication_tests/hazelcast-user-pass.xml delete mode 100644 tests/integration/asyncio/proxy/hazelcast.xml diff --git a/hazelcast/internal/asyncio_proxy/manager.py b/hazelcast/internal/asyncio_proxy/manager.py index 23356ed6d0..6bf635bcfc 100644 --- a/hazelcast/internal/asyncio_proxy/manager.py +++ b/hazelcast/internal/asyncio_proxy/manager.py @@ -3,49 +3,13 @@ from hazelcast.protocol.codec import client_create_proxy_codec, client_destroy_proxy_codec from hazelcast.internal.asyncio_invocation import Invocation from hazelcast.internal.asyncio_proxy.base import Proxy -# from hazelcast.proxy.executor import Executor -# from hazelcast.proxy.list import List from hazelcast.internal.asyncio_proxy.map import create_map_proxy -# from hazelcast.proxy.multi_map import MultiMap -# from hazelcast.proxy.queue import Queue -# from hazelcast.proxy.reliable_topic import ReliableTopic -# from hazelcast.proxy.replicated_map import ReplicatedMap -# from hazelcast.proxy.ringbuffer import Ringbuffer -# from hazelcast.proxy.set import Set -# from hazelcast.proxy.topic import Topic -# from hazelcast.proxy.pn_counter import PNCounter -# from hazelcast.proxy.flake_id_generator import FlakeIdGenerator -# from hazelcast.proxy.vector_collection import VectorCollection from hazelcast.util import to_list -# EXECUTOR_SERVICE = "hz:impl:executorService" -# LIST_SERVICE = "hz:impl:listService" -# MULTI_MAP_SERVICE = "hz:impl:multiMapService" MAP_SERVICE = "hz:impl:mapService" -# RELIABLE_TOPIC_SERVICE = "hz:impl:reliableTopicService" -# REPLICATED_MAP_SERVICE = "hz:impl:replicatedMapService" -# RINGBUFFER_SERVICE = "hz:impl:ringbufferService" -# SET_SERVICE = "hz:impl:setService" -# QUEUE_SERVICE = "hz:impl:queueService" -# TOPIC_SERVICE = "hz:impl:topicService" -# PN_COUNTER_SERVICE = "hz:impl:PNCounterService" -# FLAKE_ID_GENERATOR_SERVICE = "hz:impl:flakeIdGeneratorService" -# VECTOR_SERVICE = "hz:service:vector" _proxy_init: typing.Dict[str, typing.Callable[[str, str, typing.Any], Proxy]] = { - # EXECUTOR_SERVICE: Executor, - # LIST_SERVICE: List, MAP_SERVICE: create_map_proxy, - # MULTI_MAP_SERVICE: MultiMap, - # QUEUE_SERVICE: Queue, - # RELIABLE_TOPIC_SERVICE: ReliableTopic, - # REPLICATED_MAP_SERVICE: ReplicatedMap, - # RINGBUFFER_SERVICE: Ringbuffer, - # SET_SERVICE: Set, - # TOPIC_SERVICE: Topic, - # PN_COUNTER_SERVICE: PNCounter, - # FLAKE_ID_GENERATOR_SERVICE: FlakeIdGenerator, - # VECTOR_SERVICE: VectorCollection, } diff --git a/hazelcast/proxy/__init__.py b/hazelcast/proxy/__init__.py index 0b94d82fc2..14979c3ddc 100644 --- a/hazelcast/proxy/__init__.py +++ b/hazelcast/proxy/__init__.py @@ -54,23 +54,26 @@ def __init__(self, context): self._context = context self._proxies = {} - async def get_or_create(self, service_name, name, create_on_remote=True): + def get_or_create(self, service_name, name, create_on_remote=True): ns = (service_name, name) if ns in self._proxies: return self._proxies[ns] - proxy = await self._create_proxy(service_name, name, create_on_remote) + + proxy = self._create_proxy(service_name, name, create_on_remote) self._proxies[ns] = proxy return proxy - async def _create_proxy(self, service_name, name, create_on_remote): + def _create_proxy(self, service_name, name, create_on_remote): if create_on_remote: request = client_create_proxy_codec.encode_request(name, service_name) invocation = Invocation(request) invocation_service = self._context.invocation_service - await invocation_service.ainvoke(invocation) + invocation_service.invoke(invocation) + invocation.future.result() + return _proxy_init[service_name](service_name, name, self._context) - async def destroy_proxy(self, service_name, name, destroy_on_remote=True): + def destroy_proxy(self, service_name, name, destroy_on_remote=True): ns = (service_name, name) try: self._proxies.pop(ns) @@ -78,10 +81,11 @@ async def destroy_proxy(self, service_name, name, destroy_on_remote=True): request = client_destroy_proxy_codec.encode_request(name, service_name) invocation = Invocation(request) invocation_service = self._context.invocation_service - await invocation_service.ainvoke(invocation) + invocation_service.invoke(invocation) + invocation.future.result() return True except KeyError: return False def get_distributed_objects(self): - return to_list(self._proxies.values()) + return to_list(self._proxies.values()) \ No newline at end of file diff --git a/tests/integration/asyncio/authentication_tests/authentication_test.py b/tests/integration/asyncio/authentication_tests/authentication_test.py index 601ac5ef59..fbcb237c5b 100644 --- a/tests/integration/asyncio/authentication_tests/authentication_test.py +++ b/tests/integration/asyncio/authentication_tests/authentication_test.py @@ -21,8 +21,8 @@ class AuthenticationTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): current_directory = os.path.dirname(__file__) rc = None - hazelcast_token_xml = get_abs_path(current_directory, "hazelcast-token.xml") - hazelcast_userpass_xml = get_abs_path(current_directory, "hazelcast-user-pass.xml") + hazelcast_token_xml = get_abs_path(current_directory, "../../backward_compatible/authentication_tests/hazelcast-token.xml") + hazelcast_userpass_xml = get_abs_path(current_directory, "../../backward_compatible/authentication_tests/hazelcast-user-pass.xml") def setUp(self): self.rc = self.create_rc() diff --git a/tests/integration/asyncio/authentication_tests/hazelcast-token.xml b/tests/integration/asyncio/authentication_tests/hazelcast-token.xml deleted file mode 100644 index 196dff8bd2..0000000000 --- a/tests/integration/asyncio/authentication_tests/hazelcast-token.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - create - destroy - put - read - - - - - - - - - Hazelcast - - - - - \ No newline at end of file diff --git a/tests/integration/asyncio/authentication_tests/hazelcast-user-pass.xml b/tests/integration/asyncio/authentication_tests/hazelcast-user-pass.xml deleted file mode 100644 index 3f58bf9b2a..0000000000 --- a/tests/integration/asyncio/authentication_tests/hazelcast-user-pass.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - create - destroy - put - read - - - - - - - - - - - - - \ No newline at end of file diff --git a/tests/integration/asyncio/base.py b/tests/integration/asyncio/base.py index b6573bfede..d860f89929 100644 --- a/tests/integration/asyncio/base.py +++ b/tests/integration/asyncio/base.py @@ -1,6 +1,7 @@ import asyncio import logging import unittest +from typing import Awaitable from hazelcast.asyncio.client import HazelcastClient @@ -36,7 +37,7 @@ async def create_client(self, config=None): async def shutdown_all_clients(self): async with asyncio.TaskGroup() as tg: for c in self.clients: - await c.shutdown() + tg.create_task(c.shutdown()) self.clients = [] async def assertTrueEventually(self, assertion, timeout=30): @@ -44,7 +45,9 @@ async def assertTrueEventually(self, assertion, timeout=30): last_exception = None while get_current_timestamp() < timeout_time: try: - assertion() + maybe_awaitable = assertion() + if isinstance(maybe_awaitable, Awaitable): + await maybe_awaitable return except AssertionError as e: last_exception = e diff --git a/tests/integration/asyncio/proxy/hazelcast.xml b/tests/integration/asyncio/proxy/hazelcast.xml deleted file mode 100644 index c036e17d50..0000000000 --- a/tests/integration/asyncio/proxy/hazelcast.xml +++ /dev/null @@ -1,47 +0,0 @@ - - - false - - - 5701 - - - 0 - - - - - com.hazelcast.client.test.IdentifiedFactory - - com.hazelcast.client.test.IdentifiedDataSerializableFactory - - - - com.hazelcast.client.test.PortableFactory - - - - - - 6 - - - 10 - - - 10 - 180 - - diff --git a/tests/integration/asyncio/proxy/map_test.py b/tests/integration/asyncio/proxy/map_test.py index e7cf09ca39..a5858b86f4 100644 --- a/tests/integration/asyncio/proxy/map_test.py +++ b/tests/integration/asyncio/proxy/map_test.py @@ -51,12 +51,11 @@ ) from tests.util import ( event_collector, - fill_map, get_current_timestamp, compare_client_version, compare_server_version, skip_if_client_version_older_than, - random_string, + random_string, afill_map, ) @@ -106,7 +105,7 @@ class MapTest(SingleMemberTestCase): def configure_cluster(cls): path = os.path.abspath(__file__) dir_path = os.path.dirname(path) - with open(os.path.join(dir_path, "hazelcast.xml")) as f: + with open(os.path.join(dir_path, "../../backward_compatible/proxy/hazelcast.xml")) as f: return f.read() @classmethod @@ -335,21 +334,6 @@ async def test_flush(self): await self.fill_map() await self.map.flush() - @unittest.skip - async def test_force_unlock(self): - async def force_unlock(): - await self.map.force_unlock("key") - - await self.map.put("key", "value") - await self.map.lock("key") - task = asyncio.create_task(force_unlock()) - - async def assertion(): - self.assertFalse(await self.map.is_locked("key")) - - await task - await self.assertTrueEventually(assertion) - async def test_get_all(self): expected = await self.fill_map(1000) actual = await self.map.get_all(list(expected.keys())) @@ -388,14 +372,6 @@ async def test_is_empty(self): await self.map.clear() self.assertTrue(await self.map.is_empty()) - async def test_is_locked(self): - await self.map.put("key", "value") - self.assertFalse(await self.map.is_locked("key")) - await self.map.lock("key") - self.assertTrue(await self.map.is_locked("key")) - await self.map.unlock("key") - self.assertFalse(await self.map.is_locked("key")) - async def test_key_set(self): keys = list((await self.fill_map()).keys()) self.assertCountEqual(await self.map.key_set(), keys) @@ -404,15 +380,6 @@ async def test_key_set_with_predicate(self): await self.fill_map() self.assertEqual(await self.map.key_set(sql("this == 'value-1'")), ["key-1"]) - @unittest.skip - async def test_lock(self): - await self.map.put("key", "value") - - t = self.start_new_thread(lambda: self.map.lock("key")) - t.join() - - self.assertFalse(await self.map.try_put("key", "new_value", timeout=0.01)) - async def test_put_all(self): m = {"key-%d" % x: "value-%d" % x for x in range(0, 1000)} await self.map.put_all(m) @@ -548,51 +515,6 @@ async def test_size(self): self.assertEqual(10, await self.map.size()) - async def test_try_lock_when_unlocked(self): - self.assertTrue(await self.map.try_lock("key")) - self.assertTrue(await self.map.is_locked("key")) - - @unittest.skip - async def test_try_lock_when_locked(self): - t = self.start_new_thread(lambda: self.map.lock("key")) - t.join() - self.assertFalse(await self.map.try_lock("key", timeout=0.1)) - - async def test_try_put_when_unlocked(self): - self.assertTrue(await self.map.try_put("key", "value")) - self.assertEqual(await self.map.get("key"), "value") - - @unittest.skip - async def test_try_put_when_locked(self): - t = self.start_new_thread(lambda: self.map.lock("key")) - t.join() - self.assertFalse(await self.map.try_put("key", "value", timeout=0.1)) - - @unittest.skip - async def test_try_remove_when_unlocked(self): - await self.map.put("key", "value") - self.assertTrue(await self.map.try_remove("key")) - self.assertIsNone(await self.map.get("key")) - - @unittest.skip - async def test_try_remove_when_locked(self): - await self.map.put("key", "value") - t = self.start_new_thread(lambda: self.map.lock("key")) - t.join() - self.assertFalse(await self.map.try_remove("key", timeout=0.1)) - - @unittest.skip - async def test_unlock(self): - await self.map.lock("key") - self.assertTrue(await self.map.is_locked("key")) - await self.map.unlock("key") - self.assertFalse(await self.map.is_locked("key")) - - @unittest.skip - async def test_unlock_when_no_lock(self): - with self.assertRaises(HazelcastError): - await self.map.unlock("key") - async def test_values(self): values = list((await self.fill_map()).values()) @@ -637,6 +559,7 @@ async def fill_map(self, count=10): class MapStoreTest(SingleMemberTestCase): + @classmethod def configure_client(cls, config): config["cluster_name"] = cls.cluster.id @@ -646,315 +569,341 @@ def configure_client(cls, config): def configure_cluster(cls): path = os.path.abspath(__file__) dir_path = os.path.dirname(path) - with open(os.path.join(dir_path, "hazelcast_mapstore.xml")) as f: + with open(os.path.join(dir_path, "../../backward_compatible/proxy/hazelcast_mapstore.xml")) as f: return f.read() - def setUp(self): - self.map = self.client.get_map("mapstore-test").blocking() - self.entries = fill_map(self.map, size=10, key_prefix="key", value_prefix="val") + async def asyncSetUp(self): + await super().asyncSetUp() + self.map = await self.client.get_map("mapstore-test") + self.entries = await afill_map(self.map, size=10, key_prefix="key", value_prefix="val") - def tearDown(self): - self.map.destroy() + async def asyncTearDown(self): + await self.map.destroy() + await super().asyncTearDown() - def test_load_all_with_no_args_loads_all_keys(self): - self.map.evict_all() - self.map.load_all() - entry_set = self.map.get_all(self.entries.keys()) + async def test_load_all_with_no_args_loads_all_keys(self): + await self.map.evict_all() + await self.map.load_all() + entry_set = await self.map.get_all(self.entries.keys()) self.assertCountEqual(entry_set, self.entries) - def test_load_all_with_key_set_loads_given_keys(self): - self.map.evict_all() - self.map.load_all(["key0", "key1"]) - entry_set = self.map.get_all(["key0", "key1"]) + async def test_load_all_with_key_set_loads_given_keys(self): + await self.map.evict_all() + await self.map.load_all(["key0", "key1"]) + entry_set = await self.map.get_all(["key0", "key1"]) self.assertCountEqual(entry_set, {"key0": "val0", "key1": "val1"}) - def test_load_all_overrides_entries_in_memory_by_default(self): + async def test_load_all_overrides_entries_in_memory_by_default(self): self.map.evict_all() self.map.put_transient("key0", "new0") self.map.put_transient("key1", "new1") self.map.load_all(["key0", "key1"]) - entry_set = self.map.get_all(["key0", "key1"]) + entry_set = await self.map.get_all(["key0", "key1"]) self.assertCountEqual(entry_set, {"key0": "val0", "key1": "val1"}) - def test_load_all_with_replace_existing_false_does_not_override(self): - self.map.evict_all() - self.map.put_transient("key0", "new0") - self.map.put_transient("key1", "new1") - self.map.load_all(["key0", "key1"], replace_existing_values=False) - entry_set = self.map.get_all(["key0", "key1"]) + async def test_load_all_with_replace_existing_false_does_not_override(self): + await self.map.evict_all() + await self.map.put_transient("key0", "new0") + await self.map.put_transient("key1", "new1") + await self.map.load_all(["key0", "key1"], replace_existing_values=False) + entry_set = await self.map.get_all(["key0", "key1"]) self.assertCountEqual(entry_set, {"key0": "new0", "key1": "new1"}) - def test_evict(self): - self.map.evict("key0") - self.assertEqual(self.map.size(), 9) + async def test_evict(self): + await self.map.evict("key0") + self.assertEqual(await self.map.size(), 9) - def test_evict_non_existing_key(self): - self.map.evict("non_existing_key") - self.assertEqual(self.map.size(), 10) + async def test_evict_non_existing_key(self): + await self.map.evict("non_existing_key") + self.assertEqual(await self.map.size(), 10) - def test_evict_all(self): - self.map.evict_all() - self.assertEqual(self.map.size(), 0) + async def test_evict_all(self): + await self.map.evict_all() + self.assertEqual(await self.map.size(), 0) - def test_add_entry_listener_item_loaded(self): + async def test_add_entry_listener_item_loaded(self): collector = event_collector() - self.map.add_entry_listener(include_value=True, loaded_func=collector) - self.map.put("key", "value", ttl=0.1) + await self.map.add_entry_listener(include_value=True, loaded_func=collector) + await self.map.put("key", "value", ttl=0.1) time.sleep(2) - self.map.get("key") + await self.map.get("key") def assert_event(): self.assertEqual(len(collector.events), 1) event = collector.events[0] self.assertEntryEvent(event, key="key", value="value", event_type=EntryEventType.LOADED) - self.assertTrueEventually(assert_event, 10) + await self.assertTrueEventually(assert_event, 10) class MapTTLTest(SingleMemberTestCase): + @classmethod def configure_client(cls, config): config["cluster_name"] = cls.cluster.id return config - def setUp(self): - self.map = self.client.get_map(random_string()).blocking() + async def asyncSetUp(self): + await super().asyncSetUp() + self.map = await self.client.get_map(random_string()) - def tearDown(self): - self.map.destroy() + async def asyncTearDown(self): + await self.map.destroy() + await super().asyncTearDown() - def test_put_default_ttl(self): - self.map.put("key", "value") - time.sleep(1.0) - self.assertTrue(self.map.contains_key("key")) + async def test_put_default_ttl(self): + await self.map.put("key", "value") + await asyncio.sleep(1.0) + self.assertTrue(await self.map.contains_key("key")) - def test_put(self): - self.map.put("key", "value", 0.1) - self.assertTrueEventually(lambda: self.assertFalse(self.map.contains_key("key"))) + async def test_put(self): + async def assert_map_not_contains(): + self.assertFalse(await self.map.contains_key("key")) - def test_put_transient_default_ttl(self): - self.map.put_transient("key", "value") - time.sleep(1.0) - self.assertTrue(self.map.contains_key("key")) + await self.map.put("key", "value", 0.1) + await self.assertTrueEventually(lambda: assert_map_not_contains()) - def test_put_transient(self): - self.map.put_transient("key", "value", 0.1) - self.assertTrueEventually(lambda: self.assertFalse(self.map.contains_key("key"))) + async def test_put_transient_default_ttl(self): + await self.map.put_transient("key", "value") + await asyncio.sleep(1.0) + self.assertTrue(await self.map.contains_key("key")) - def test_put_if_absent_ttl(self): - self.map.put_if_absent("key", "value") - time.sleep(1.0) - self.assertTrue(self.map.contains_key("key")) + async def test_put_transient(self): + async def assert_map_not_contains(): + self.assertFalse(await self.map.contains_key("key")) + + await self.map.put_transient("key", "value", 0.1) + await self.assertTrueEventually(lambda: assert_map_not_contains()) + + async def test_put_if_absent_ttl(self): + await self.map.put_if_absent("key", "value") + await asyncio.sleep(1.0) + self.assertTrue(await self.map.contains_key("key")) + + async def test_put_if_absent(self): + async def assert_map_not_contains(): + self.assertFalse(await self.map.contains_key("key")) + + await self.map.put_if_absent("key", "value", 0.1) + await self.assertTrueEventually(lambda: assert_map_not_contains()) - def test_put_if_absent(self): - self.map.put_if_absent("key", "value", 0.1) - self.assertTrueEventually(lambda: self.assertFalse(self.map.contains_key("key"))) + async def test_set_default_ttl(self): + await self.map.set("key", "value") + await asyncio.sleep(1.0) + self.assertTrue(await self.map.contains_key("key")) - def test_set_default_ttl(self): - self.map.set("key", "value") - time.sleep(1.0) - self.assertTrue(self.map.contains_key("key")) + async def test_set(self): + async def assert_map_not_contains(): + self.assertFalse(await self.map.contains_key("key")) - def test_set(self): - self.map.set("key", "value", 0.1) - self.assertTrueEventually(lambda: self.assertFalse(self.map.contains_key("key"))) + await self.map.set("key", "value", 0.1) + await self.assertTrueEventually(lambda: assert_map_not_contains()) class MapMaxIdleTest(SingleMemberTestCase): + @classmethod def configure_client(cls, config): config["cluster_name"] = cls.cluster.id return config - def setUp(self): - self.map = self.client.get_map(random_string()).blocking() + async def asyncSetUp(self): + await super().asyncSetUp() + self.map = await self.client.get_map(random_string()) - def tearDown(self): - self.map.destroy() + async def asyncTearDown(self): + await self.map.destroy() + await super().asyncTearDown() - def test_put_default_max_idle(self): - self.map.put("key", "value") - time.sleep(1.0) - self.assertTrue(self.map.contains_key("key")) + async def test_put_default_max_idle(self): + await self.map.put("key", "value") + await asyncio.sleep(1.0) + self.assertTrue(await self.map.contains_key("key")) - def test_put(self): - self.map.put("key", "value", max_idle=0.1) - time.sleep(1.0) - self.assertFalse(self.map.contains_key("key")) + async def test_put(self): + await self.map.put("key", "value", max_idle=0.1) + await asyncio.sleep(1.0) + self.assertFalse(await self.map.contains_key("key")) - def test_put_transient_default_max_idle(self): - self.map.put_transient("key", "value") - time.sleep(1.0) - self.assertTrue(self.map.contains_key("key")) + async def test_put_transient_default_max_idle(self): + await self.map.put_transient("key", "value") + await asyncio.sleep(1.0) + self.assertTrue(await self.map.contains_key("key")) - def test_put_transient(self): - self.map.put_transient("key", "value", max_idle=0.1) - time.sleep(1.0) - self.assertFalse(self.map.contains_key("key")) + async def test_put_transient(self): + await self.map.put_transient("key", "value", max_idle=0.1) + await asyncio.sleep(1.0) + self.assertFalse(await self.map.contains_key("key")) - def test_put_if_absent_max_idle(self): - self.map.put_if_absent("key", "value") - time.sleep(1.0) - self.assertTrue(self.map.contains_key("key")) + async def test_put_if_absent_max_idle(self): + await self.map.put_if_absent("key", "value") + await asyncio.sleep(1.0) + self.assertTrue(await self.map.contains_key("key")) - def test_put_if_absent(self): - self.map.put_if_absent("key", "value", max_idle=0.1) - time.sleep(1.0) - self.assertFalse(self.map.contains_key("key")) + async def test_put_if_absent(self): + await self.map.put_if_absent("key", "value", max_idle=0.1) + await asyncio.sleep(1.0) + self.assertFalse(await self.map.contains_key("key")) - def test_set_default_ttl(self): - self.map.set("key", "value") - time.sleep(1.0) - self.assertTrue(self.map.contains_key("key")) + async def test_set_default_ttl(self): + await self.map.set("key", "value") + await asyncio.sleep(1.0) + self.assertTrue(await self.map.contains_key("key")) - def test_set(self): - self.map.set("key", "value", max_idle=0.1) - time.sleep(1.0) - self.assertFalse(self.map.contains_key("key")) + async def test_set(self): + await self.map.set("key", "value", max_idle=0.1) + await asyncio.sleep(1.0) + self.assertFalse(await self.map.contains_key("key")) @unittest.skipIf( compare_client_version("4.2.1") < 0, "Tests the features added in 4.2.1 version of the client" ) class MapAggregatorsIntTest(SingleMemberTestCase): + @classmethod def configure_client(cls, config): config["cluster_name"] = cls.cluster.id config["default_int_type"] = IntType.INT return config - def setUp(self): - self.map = self.client.get_map(random_string()).blocking() - self.map.put_all({"key-%d" % i: i for i in range(50)}) + async def asyncSetUp(self): + await super().asyncSetUp() + self.map = await self.client.get_map(random_string()) + await self.map.put_all({"key-%d" % i: i for i in range(50)}) - def tearDown(self): - self.map.destroy() + async def tearDown(self): + await self.map.destroy() + await super().asyncTearDown() - def test_aggregate_with_none_aggregator(self): + async def test_aggregate_with_none_aggregator(self): with self.assertRaises(AssertionError): - self.map.aggregate(None) + await self.map.aggregate(None) - def test_aggregate_with_paging_predicate(self): + async def test_aggregate_with_paging_predicate(self): with self.assertRaises(AssertionError): - self.map.aggregate(int_avg("foo"), paging(true(), 10)) + await self.map.aggregate(int_avg("foo"), paging(true(), 10)) - def test_int_average(self): - average = self.map.aggregate(int_avg()) + async def test_int_average(self): + average = await self.map.aggregate(int_avg()) self.assertEqual(24.5, average) - def test_int_average_with_attribute_path(self): - average = self.map.aggregate(int_avg("this")) + async def test_int_average_with_attribute_path(self): + average = await self.map.aggregate(int_avg("this")) self.assertEqual(24.5, average) - def test_int_average_with_predicate(self): - average = self.map.aggregate(int_avg(), greater_or_equal("this", 47)) + async def test_int_average_with_predicate(self): + average = await self.map.aggregate(int_avg(), greater_or_equal("this", 47)) self.assertEqual(48, average) - def test_int_sum(self): - sum_ = self.map.aggregate(int_sum()) + async def test_int_sum(self): + sum_ = await self.map.aggregate(int_sum()) self.assertEqual(1225, sum_) - def test_int_sum_with_attribute_path(self): - sum_ = self.map.aggregate(int_sum("this")) + async def test_int_sum_with_attribute_path(self): + sum_ = await self.map.aggregate(int_sum("this")) self.assertEqual(1225, sum_) - def test_int_sum_with_predicate(self): - sum_ = self.map.aggregate(int_sum(), greater_or_equal("this", 47)) + async def test_int_sum_with_predicate(self): + sum_ = await self.map.aggregate(int_sum(), greater_or_equal("this", 47)) self.assertEqual(144, sum_) - def test_fixed_point_sum(self): - sum_ = self.map.aggregate(fixed_point_sum()) + async def test_fixed_point_sum(self): + sum_ = await self.map.aggregate(fixed_point_sum()) self.assertEqual(1225, sum_) - def test_fixed_point_sum_with_attribute_path(self): - sum_ = self.map.aggregate(fixed_point_sum("this")) + async def test_fixed_point_sum_with_attribute_path(self): + sum_ = await self.map.aggregate(fixed_point_sum("this")) self.assertEqual(1225, sum_) - def test_fixed_point_sum_with_predicate(self): - sum_ = self.map.aggregate(fixed_point_sum(), greater_or_equal("this", 47)) + async def test_fixed_point_sum_with_predicate(self): + sum_ = await self.map.aggregate(fixed_point_sum(), greater_or_equal("this", 47)) self.assertEqual(144, sum_) - def test_distinct(self): - self._fill_with_duplicate_values() - distinct_values = self.map.aggregate(distinct()) + async def test_distinct(self): + await self._fill_with_duplicate_values() + distinct_values = await self.map.aggregate(distinct()) self.assertEqual(set(range(50)), distinct_values) - def test_distinct_with_attribute_path(self): - self._fill_with_duplicate_values() - distinct_values = self.map.aggregate(distinct("this")) + async def test_distinct_with_attribute_path(self): + await self._fill_with_duplicate_values() + distinct_values = await self.map.aggregate(distinct("this")) self.assertEqual(set(range(50)), distinct_values) - def test_distinct_with_predicate(self): - self._fill_with_duplicate_values() - distinct_values = self.map.aggregate(distinct(), greater_or_equal("this", 10)) + async def test_distinct_with_predicate(self): + await self._fill_with_duplicate_values() + distinct_values = await self.map.aggregate(distinct(), greater_or_equal("this", 10)) self.assertEqual(set(range(10, 50)), distinct_values) - def test_max_by(self): - max_item = self.map.aggregate(max_by("this")) + async def test_max_by(self): + max_item = await self.map.aggregate(max_by("this")) self.assertEqual("key-49", max_item.key) self.assertEqual(49, max_item.value) - def test_max_by_with_predicate(self): - max_item = self.map.aggregate(max_by("this"), less_or_equal("this", 10)) + async def test_max_by_with_predicate(self): + max_item = await self.map.aggregate(max_by("this"), less_or_equal("this", 10)) self.assertEqual("key-10", max_item.key) self.assertEqual(10, max_item.value) - def test_min_by(self): - min_item = self.map.aggregate(min_by("this")) + async def test_min_by(self): + min_item = await self.map.aggregate(min_by("this")) self.assertEqual("key-0", min_item.key) self.assertEqual(0, min_item.value) - def test_min_by_with_predicate(self): - min_item = self.map.aggregate(min_by("this"), greater_or_equal("this", 10)) + async def test_min_by_with_predicate(self): + min_item = await self.map.aggregate(min_by("this"), greater_or_equal("this", 10)) self.assertEqual("key-10", min_item.key) self.assertEqual(10, min_item.value) - def _fill_with_duplicate_values(self): + async def _fill_with_duplicate_values(self): # Map is initially filled with key-i: i mappings from [0, 50). # Add more values with different keys but the same values to # test the behaviour of the distinct aggregator. - self.map.put_all({"different-key-%d" % i: i for i in range(50)}) + await self.map.put_all({"different-key-%d" % i: i for i in range(50)}) @unittest.skipIf( compare_client_version("4.2.1") < 0, "Tests the features added in 4.2.1 version of the client" ) class MapAggregatorsLongTest(SingleMemberTestCase): + @classmethod def configure_client(cls, config): config["cluster_name"] = cls.cluster.id config["default_int_type"] = IntType.LONG return config - def setUp(self): - self.map = self.client.get_map(random_string()).blocking() - self.map.put_all({"key-%d" % i: i for i in range(50)}) + async def asyncSetUp(self): + await super().asyncSetUp() + self.map = await self.client.get_map(random_string()) + await self.map.put_all({"key-%d" % i: i for i in range(50)}) - def tearDown(self): - self.map.destroy() + async def asyncTearDown(self): + await self.map.destroy() + await super().asyncTearDown() - def test_long_average(self): - average = self.map.aggregate(long_avg()) + async def test_long_average(self): + average = await self.map.aggregate(long_avg()) self.assertEqual(24.5, average) - def test_long_average_with_attribute_path(self): - average = self.map.aggregate(long_avg("this")) + async def test_long_average_with_attribute_path(self): + average = await self.map.aggregate(long_avg("this")) self.assertEqual(24.5, average) - def test_long_average_with_predicate(self): - average = self.map.aggregate(long_avg(), greater_or_equal("this", 47)) + async def test_long_average_with_predicate(self): + average = await self.map.aggregate(long_avg(), greater_or_equal("this", 47)) self.assertEqual(48, average) - def test_long_sum(self): - sum_ = self.map.aggregate(long_sum()) + async def test_long_sum(self): + sum_ = await self.map.aggregate(long_sum()) self.assertEqual(1225, sum_) - def test_long_sum_with_attribute_path(self): - sum_ = self.map.aggregate(long_sum("this")) + async def test_long_sum_with_attribute_path(self): + sum_ = await self.map.aggregate(long_sum("this")) self.assertEqual(1225, sum_) - def test_long_sum_with_predicate(self): - sum_ = self.map.aggregate(long_sum(), greater_or_equal("this", 47)) + async def test_long_sum_with_predicate(self): + sum_ = await self.map.aggregate(long_sum(), greater_or_equal("this", 47)) self.assertEqual(144, sum_) @@ -967,95 +916,97 @@ def configure_client(cls, config): config["cluster_name"] = cls.cluster.id return config - def setUp(self): - self.map = self.client.get_map(random_string()).blocking() - self.map.put_all({"key-%d" % i: float(i) for i in range(50)}) + async def asyncSetUp(self): + await super().asyncSetUp() + self.map = await self.client.get_map(random_string()) + await self.map.put_all({"key-%d" % i: float(i) for i in range(50)}) - def tearDown(self): - self.map.destroy() + async def asyncTearDown(self): + await self.map.destroy() + await super().asyncTearDown() - def test_count(self): - count_ = self.map.aggregate(count()) + async def test_count(self): + count_ = await self.map.aggregate(count()) self.assertEqual(50, count_) - def test_count_with_attribute_path(self): - count_ = self.map.aggregate(count("this")) + async def test_count_with_attribute_path(self): + count_ = await self.map.aggregate(count("this")) self.assertEqual(50, count_) - def test_count_with_predicate(self): - count_ = self.map.aggregate(count(), greater_or_equal("this", 1)) + async def test_count_with_predicate(self): + count_ = await self.map.aggregate(count(), greater_or_equal("this", 1)) self.assertEqual(49, count_) - def test_double_average(self): - average = self.map.aggregate(double_avg()) + async def test_double_average(self): + average = await self.map.aggregate(double_avg()) self.assertEqual(24.5, average) - def test_double_average_with_attribute_path(self): - average = self.map.aggregate(double_avg("this")) + async def test_double_average_with_attribute_path(self): + average = await self.map.aggregate(double_avg("this")) self.assertEqual(24.5, average) - def test_double_average_with_predicate(self): - average = self.map.aggregate(double_avg(), greater_or_equal("this", 47)) + async def test_double_average_with_predicate(self): + average = await self.map.aggregate(double_avg(), greater_or_equal("this", 47)) self.assertEqual(48, average) - def test_double_sum(self): - sum_ = self.map.aggregate(double_sum()) + async def test_double_sum(self): + sum_ = await self.map.aggregate(double_sum()) self.assertEqual(1225, sum_) - def test_double_sum_with_attribute_path(self): - sum_ = self.map.aggregate(double_sum("this")) + async def test_double_sum_with_attribute_path(self): + sum_ = await self.map.aggregate(double_sum("this")) self.assertEqual(1225, sum_) - def test_double_sum_with_predicate(self): - sum_ = self.map.aggregate(double_sum(), greater_or_equal("this", 47)) + async def test_double_sum_with_predicate(self): + sum_ = await self.map.aggregate(double_sum(), greater_or_equal("this", 47)) self.assertEqual(144, sum_) - def test_floating_point_sum(self): - sum_ = self.map.aggregate(floating_point_sum()) + async def test_floating_point_sum(self): + sum_ = await self.map.aggregate(floating_point_sum()) self.assertEqual(1225, sum_) - def test_floating_point_sum_with_attribute_path(self): - sum_ = self.map.aggregate(floating_point_sum("this")) + async def test_floating_point_sum_with_attribute_path(self): + sum_ = await self.map.aggregate(floating_point_sum("this")) self.assertEqual(1225, sum_) - def test_floating_point_sum_with_predicate(self): - sum_ = self.map.aggregate(floating_point_sum(), greater_or_equal("this", 47)) + async def test_floating_point_sum_with_predicate(self): + sum_ = await self.map.aggregate(floating_point_sum(), greater_or_equal("this", 47)) self.assertEqual(144, sum_) - def test_number_avg(self): - average = self.map.aggregate(number_avg()) + async def test_number_avg(self): + average = await self.map.aggregate(number_avg()) self.assertEqual(24.5, average) - def test_number_avg_with_attribute_path(self): - average = self.map.aggregate(number_avg("this")) + async def test_number_avg_with_attribute_path(self): + average = await self.map.aggregate(number_avg("this")) self.assertEqual(24.5, average) - def test_number_avg_with_predicate(self): - average = self.map.aggregate(number_avg(), greater_or_equal("this", 47)) + async def test_number_avg_with_predicate(self): + average = await self.map.aggregate(number_avg(), greater_or_equal("this", 47)) self.assertEqual(48, average) - def test_max(self): - average = self.map.aggregate(max_()) + async def test_max(self): + average = await self.map.aggregate(max_()) self.assertEqual(49, average) - def test_max_with_attribute_path(self): - average = self.map.aggregate(max_("this")) + async def test_max_with_attribute_path(self): + average = await self.map.aggregate(max_("this")) self.assertEqual(49, average) - def test_max_with_predicate(self): - average = self.map.aggregate(max_(), less_or_equal("this", 3)) + async def test_max_with_predicate(self): + average = await self.map.aggregate(max_(), less_or_equal("this", 3)) self.assertEqual(3, average) - def test_min(self): - average = self.map.aggregate(min_()) + async def test_min(self): + average = await self.map.aggregate(min_()) self.assertEqual(0, average) - def test_min_with_attribute_path(self): - average = self.map.aggregate(min_("this")) + async def test_min_with_attribute_path(self): + average = await self.map.aggregate(min_("this")) self.assertEqual(0, average) - def test_min_with_predicate(self): - average = self.map.aggregate(min_(), greater_or_equal("this", 3)) + async def test_min_with_predicate(self): + average = await self.map.aggregate(min_(), greater_or_equal("this", 3)) self.assertEqual(3, average) @@ -1068,43 +1019,45 @@ def configure_client(cls, config): config["cluster_name"] = cls.cluster.id return config - def setUp(self): - self.map = self.client.get_map(random_string()).blocking() - self.map.put(1, HazelcastJsonValue('{"attr1": 1, "attr2": 2, "attr3": 3}')) - self.map.put(2, HazelcastJsonValue('{"attr1": 4, "attr2": 5, "attr3": 6}')) + async def asyncSetUp(self): + await super().asyncSetUp() + self.map = await self.client.get_map(random_string()) + await self.map.put(1, HazelcastJsonValue('{"attr1": 1, "attr2": 2, "attr3": 3}')) + await self.map.put(2, HazelcastJsonValue('{"attr1": 4, "attr2": 5, "attr3": 6}')) - def tearDown(self): - self.map.destroy() + async def asyncTearDown(self): + await self.map.destroy() + await super().asyncTearDown() - def test_project_with_none_projection(self): + async def test_project_with_none_projection(self): with self.assertRaises(AssertionError): - self.map.project(None) + await self.map.project(None) - def test_project_with_paging_predicate(self): + async def test_project_with_paging_predicate(self): with self.assertRaises(AssertionError): - self.map.project(single_attribute("foo"), paging(true(), 10)) + await self.map.project(single_attribute("foo"), paging(true(), 10)) - def test_single_attribute(self): - attributes = self.map.project(single_attribute("attr1")) + async def test_single_attribute(self): + attributes = await self.map.project(single_attribute("attr1")) self.assertCountEqual([1, 4], attributes) - def test_single_attribute_with_predicate(self): - attributes = self.map.project(single_attribute("attr1"), greater_or_equal("attr1", 4)) + async def test_single_attribute_with_predicate(self): + attributes = await self.map.project(single_attribute("attr1"), greater_or_equal("attr1", 4)) self.assertCountEqual([4], attributes) - def test_multi_attribute(self): - attributes = self.map.project(multi_attribute("attr1", "attr2")) + async def test_multi_attribute(self): + attributes = await self.map.project(multi_attribute("attr1", "attr2")) self.assertCountEqual([[1, 2], [4, 5]], attributes) - def test_multi_attribute_with_predicate(self): - attributes = self.map.project( + async def test_multi_attribute_with_predicate(self): + attributes = await self.map.project( multi_attribute("attr1", "attr2"), greater_or_equal("attr2", 3), ) self.assertCountEqual([[4, 5]], attributes) - def test_identity(self): - attributes = self.map.project(identity()) + async def test_identity(self): + attributes = await self.map.project(identity()) self.assertCountEqual( [ HazelcastJsonValue('{"attr1": 4, "attr2": 5, "attr3": 6}'), @@ -1113,8 +1066,8 @@ def test_identity(self): [attribute.value for attribute in attributes], ) - def test_identity_with_predicate(self): - attributes = self.map.project(identity(), greater_or_equal("attr2", 3)) + async def test_identity_with_predicate(self): + attributes = await self.map.project(identity(), greater_or_equal("attr2", 3)) self.assertCountEqual( [HazelcastJsonValue('{"attr1": 4, "attr2": 5, "attr3": 6}')], [attribute.value for attribute in attributes], diff --git a/tests/util.py b/tests/util.py index 6492f2cd7f..e6b772eb1f 100644 --- a/tests/util.py +++ b/tests/util.py @@ -37,6 +37,13 @@ def fill_map(map, size=10, key_prefix="key", value_prefix="val"): map.put_all(entries) return entries +async def afill_map(map, size=10, key_prefix="key", value_prefix="val"): + import asyncio + entries = dict() + for i in range(size): + entries[key_prefix + str(i)] = value_prefix + str(i) + await map.put_all(entries) + return entries def get_ssl_config( cluster_name, From 9931956b7526867fb4e35f481da5ce22c50ae3d9 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Fri, 19 Sep 2025 19:45:58 +0300 Subject: [PATCH 03/51] Updates --- hazelcast/proxy/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hazelcast/proxy/__init__.py b/hazelcast/proxy/__init__.py index 14979c3ddc..1e6d98abe2 100644 --- a/hazelcast/proxy/__init__.py +++ b/hazelcast/proxy/__init__.py @@ -88,4 +88,4 @@ def destroy_proxy(self, service_name, name, destroy_on_remote=True): return False def get_distributed_objects(self): - return to_list(self._proxies.values()) \ No newline at end of file + return to_list(self._proxies.values()) From 35384bffd68e9e571eee3d76453b3f68cc7de9e8 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Fri, 19 Sep 2025 20:01:48 +0300 Subject: [PATCH 04/51] Black --- hazelcast/asyncio/client.py | 19 +++--- hazelcast/internal/asyncio_cluster.py | 4 +- hazelcast/internal/asyncio_connection.py | 23 +++++-- hazelcast/internal/asyncio_listener.py | 15 +++-- hazelcast/internal/asyncio_proxy/base.py | 16 +++-- hazelcast/internal/asyncio_proxy/map.py | 21 ++++-- hazelcast/internal/asyncio_reactor.py | 66 +++++++++++++++---- start_rc.py | 4 +- .../authentication_test.py | 16 +++-- tests/integration/asyncio/client_test.py | 1 - tests/integration/asyncio/proxy/map_test.py | 18 ++--- tests/util.py | 3 + 12 files changed, 149 insertions(+), 57 deletions(-) diff --git a/hazelcast/asyncio/client.py b/hazelcast/asyncio/client.py index e54aa14cff..de9d1ef80b 100644 --- a/hazelcast/asyncio/client.py +++ b/hazelcast/asyncio/client.py @@ -27,7 +27,7 @@ ) from hazelcast.internal.asyncio_proxy.base import Proxy from hazelcast.internal.asyncio_proxy.map import Map -from hazelcast.internal.asyncio_reactor import AsyncioReactor +from hazelcast.internal.asyncio_reactor import AsyncioReactor from hazelcast.serialization import SerializationServiceV1 from hazelcast.sql import SqlService, _InternalSqlService from hazelcast.statistics import Statistics @@ -231,7 +231,6 @@ async def get_map(self, name: str) -> Map[KeyType, ValueType]: """ return await self._proxy_manager.get_or_create(MAP_SERVICE, name) - async def add_distributed_object_listener( self, listener_func: typing.Callable[[DistributedObjectEvent], None] ) -> str: @@ -299,15 +298,19 @@ async def get_distributed_objects(self) -> typing.List[Proxy]: async with asyncio.TaskGroup() as tg: for dist_obj_info in response: local_distributed_object_infos.discard(dist_obj_info) - tg.create_task(self._proxy_manager.get_or_create( - dist_obj_info.service_name, dist_obj_info.name, create_on_remote=False - )) + tg.create_task( + self._proxy_manager.get_or_create( + dist_obj_info.service_name, dist_obj_info.name, create_on_remote=False + ) + ) async with asyncio.TaskGroup() as tg: for dist_obj_info in local_distributed_object_infos: - tg.create_task(self._proxy_manager.destroy_proxy( - dist_obj_info.service_name, dist_obj_info.name, destroy_on_remote=False - )) + tg.create_task( + self._proxy_manager.destroy_proxy( + dist_obj_info.service_name, dist_obj_info.name, destroy_on_remote=False + ) + ) return self._proxy_manager.get_distributed_objects() diff --git a/hazelcast/internal/asyncio_cluster.py b/hazelcast/internal/asyncio_cluster.py index 28bb024964..4ce2491b65 100644 --- a/hazelcast/internal/asyncio_cluster.py +++ b/hazelcast/internal/asyncio_cluster.py @@ -203,7 +203,9 @@ async def wait_initial_member_list_fetched(self): IllegalStateError: If the member list could not be fetched """ try: - await asyncio.wait_for(self._initial_list_fetched.wait(), _INITIAL_MEMBERS_TIMEOUT_SECONDS) + await asyncio.wait_for( + self._initial_list_fetched.wait(), _INITIAL_MEMBERS_TIMEOUT_SECONDS + ) except TimeoutError: raise IllegalStateError("Could not get initial member list from cluster!") diff --git a/hazelcast/internal/asyncio_connection.py b/hazelcast/internal/asyncio_connection.py index 47b4db05d0..4a863f7fe9 100644 --- a/hazelcast/internal/asyncio_connection.py +++ b/hazelcast/internal/asyncio_connection.py @@ -284,7 +284,9 @@ async def shutdown(self): # Need to create copy of connection values to avoid modification errors on runtime async with asyncio.TaskGroup() as tg: for connection in list(self.active_connections.values()): - tg.create_task(connection.close_connection("Hazelcast client is shutting down", None)) + tg.create_task( + connection.close_connection("Hazelcast client is shutting down", None) + ) self.active_connections.clear() del self._connection_listeners[:] @@ -395,7 +397,7 @@ async def _get_or_connect_to_member(self, member): translated = self._translate_member_address(member) connection = await self._create_connection(translated) - response = await self._authenticate(connection) #.continue_with(self._on_auth, connection) + response = await self._authenticate(connection) # .continue_with(self._on_auth, connection) await self._on_auth(response, connection) return connection @@ -479,9 +481,13 @@ async def run(): for item in member_uuids: connecting_uuids.discard(item) - self._connect_all_members_timer = self._reactor.add_timer(1, lambda: asyncio.create_task(run())) + self._connect_all_members_timer = self._reactor.add_timer( + 1, lambda: asyncio.create_task(run()) + ) - self._connect_all_members_timer = self._reactor.add_timer(1, lambda: asyncio.create_task(run())) + self._connect_all_members_timer = self._reactor.add_timer( + 1, lambda: asyncio.create_task(run()) + ) async def _connect_to_cluster(self): await self._sync_connect_to_cluster() @@ -805,7 +811,6 @@ def _get_possible_addresses(self): class HeartbeatManager: - def __init__(self, connection_manager, client, config, reactor, invocation_service): self._connection_manager = connection_manager self._client = client @@ -827,9 +832,13 @@ async def _heartbeat(): async with asyncio.TaskGroup() as tg: for connection in list(conn_manager.active_connections.values()): tg.create_task(self._check_connection(now, connection)) - self._heartbeat_timer = self._reactor.add_timer(self._heartbeat_interval, lambda: asyncio.create_task(_heartbeat())) + self._heartbeat_timer = self._reactor.add_timer( + self._heartbeat_interval, lambda: asyncio.create_task(_heartbeat()) + ) - self._heartbeat_timer = self._reactor.add_timer(self._heartbeat_interval, lambda: asyncio.create_task(_heartbeat())) + self._heartbeat_timer = self._reactor.add_timer( + self._heartbeat_interval, lambda: asyncio.create_task(_heartbeat()) + ) def shutdown(self): """Stops HeartBeat operations.""" diff --git a/hazelcast/internal/asyncio_listener.py b/hazelcast/internal/asyncio_listener.py index 5eb818a791..dbd04956ea 100644 --- a/hazelcast/internal/asyncio_listener.py +++ b/hazelcast/internal/asyncio_listener.py @@ -76,14 +76,15 @@ async def register_listener( try: async with asyncio.TaskGroup() as tg: for connection in list(self._connection_manager.active_connections.values()): - task = self._register_on_connection(registration_id, registration, connection) + task = self._register_on_connection( + registration_id, registration, connection + ) tg.create_task(task) return registration_id except Exception: await self.deregister_listener(registration_id) raise HazelcastError("Listener cannot be added") - async def deregister_listener(self, user_registration_id): check_not_none(user_registration_id, "None user_registration_id is not allowed!") async with self._registration_lock: @@ -96,7 +97,7 @@ async def handle(inv: Invocation, conn: AsyncioConnection): await inv.future except Exception as e: if not isinstance( - e, (HazelcastClientNotActiveError, IOError, TargetDisconnectedError) + e, (HazelcastClientNotActiveError, IOError, TargetDisconnectedError) ): _logger.warning( "Deregistration of listener with ID %s has failed for address %s: %s", @@ -170,7 +171,9 @@ def add_event_handler(self, correlation_id, event_handler): def remove_event_handler(self, correlation_id): self._event_handlers.pop(correlation_id, None) - async def _register_on_connection(self, user_registration_id, listener_registration, connection): + async def _register_on_connection( + self, user_registration_id, listener_registration, connection + ): registration_map = listener_registration.connection_registrations if connection in registration_map: @@ -209,7 +212,9 @@ async def _connection_added(self, connection): async with self._registration_lock: async with asyncio.TaskGroup() as tg: for user_reg_id, listener_registration in self._active_registrations.items(): - task = self._register_on_connection(user_reg_id, listener_registration, connection) + task = self._register_on_connection( + user_reg_id, listener_registration, connection + ) tg.create_task(task) async def _connection_removed(self, connection): diff --git a/hazelcast/internal/asyncio_proxy/base.py b/hazelcast/internal/asyncio_proxy/base.py index 861109636e..60fb8de4ac 100644 --- a/hazelcast/internal/asyncio_proxy/base.py +++ b/hazelcast/internal/asyncio_proxy/base.py @@ -55,12 +55,16 @@ def _invoke(self, request, response_handler=_no_op_response_handler) -> asyncio. self._invocation_service.invoke(invocation) return invocation.future - def _invoke_on_target(self, request, uuid, response_handler=_no_op_response_handler) -> asyncio.Future: + def _invoke_on_target( + self, request, uuid, response_handler=_no_op_response_handler + ) -> asyncio.Future: invocation = Invocation(request, uuid=uuid, response_handler=response_handler) self._invocation_service.invoke(invocation) return invocation.future - def _invoke_on_key(self, request, key_data, response_handler=_no_op_response_handler) -> asyncio.Future: + def _invoke_on_key( + self, request, key_data, response_handler=_no_op_response_handler + ) -> asyncio.Future: partition_id = self._partition_service.get_partition_id(key_data) invocation = Invocation( request, partition_id=partition_id, response_handler=response_handler @@ -68,14 +72,18 @@ def _invoke_on_key(self, request, key_data, response_handler=_no_op_response_han self._invocation_service.invoke(invocation) return invocation.future - def _invoke_on_partition(self, request, partition_id, response_handler=_no_op_response_handler) -> asyncio.Future: + def _invoke_on_partition( + self, request, partition_id, response_handler=_no_op_response_handler + ) -> asyncio.Future: invocation = Invocation( request, partition_id=partition_id, response_handler=response_handler ) self._invocation_service.invoke(invocation) return invocation.future - async def _ainvoke_on_partition(self, request, partition_id, response_handler=_no_op_response_handler) -> typing.Any: + async def _ainvoke_on_partition( + self, request, partition_id, response_handler=_no_op_response_handler + ) -> typing.Any: fut = self._invoke_on_partition(request, partition_id, response_handler) return await fut diff --git a/hazelcast/internal/asyncio_proxy/map.py b/hazelcast/internal/asyncio_proxy/map.py index e3e4762413..ea0765fc86 100644 --- a/hazelcast/internal/asyncio_proxy/map.py +++ b/hazelcast/internal/asyncio_proxy/map.py @@ -90,7 +90,6 @@ class Map(Proxy, typing.Generic[KeyType, ValueType]): - def __init__(self, service_name, name, context): super(Map, self).__init__(service_name, name, context) self._reference_id_generator = context.lock_reference_id_generator @@ -243,6 +242,7 @@ def handle_event_entry( expired_func(event) elif event.event_type == EntryEventType.LOADED: loaded_func(event) + return await self._register_listener( request, lambda r: response_decoder(r), @@ -372,6 +372,7 @@ def handler(message): request = map_entries_with_predicate_codec.encode_request(self.name, predicate_data) else: + def handler(message): entry_data_list = map_entry_set_codec.decode_response(message) return deserialize_entry_list_in_place(entry_data_list, self._to_object) @@ -548,6 +549,7 @@ def handler(message): request = map_key_set_with_predicate_codec.encode_request(self.name, predicate_data) else: + def handler(message): data_list = map_key_set_codec.decode_response(message) return deserialize_list_in_place(data_list, self._to_object) @@ -563,7 +565,9 @@ async def load_all( try: key_data_list = [self._to_data(key) for key in keys] except SchemaNotReplicatedError as e: - return await self._send_schema_and_retry(e, self.load_all, keys, replace_existing_values) + return await self._send_schema_and_retry( + e, self.load_all, keys, replace_existing_values + ) return await self._load_all_internal(key_data_list, replace_existing_values) @@ -653,7 +657,9 @@ async def put_if_absent( key_data = self._to_data(key) value_data = self._to_data(value) except SchemaNotReplicatedError as e: - return await self._send_schema_and_retry(e, self.put_if_absent, key, value, ttl, max_idle) + return await self._send_schema_and_retry( + e, self.put_if_absent, key, value, ttl, max_idle + ) return await self._put_if_absent_internal(key_data, value_data, ttl, max_idle) @@ -666,7 +672,9 @@ async def put_transient( key_data = self._to_data(key) value_data = self._to_data(value) except SchemaNotReplicatedError as e: - return await self._send_schema_and_retry(e, self.put_transient, key, value, ttl, max_idle) + return await self._send_schema_and_retry( + e, self.put_transient, key, value, ttl, max_idle + ) return await self._put_transient_internal(key_data, value_data, ttl, max_idle) @@ -727,7 +735,9 @@ async def replace_if_same( old_value_data = self._to_data(old_value) new_value_data = self._to_data(new_value) except SchemaNotReplicatedError as e: - return await self._send_schema_and_retry(e, self.replace_if_same, key, old_value, new_value) + return await self._send_schema_and_retry( + e, self.replace_if_same, key, old_value, new_value + ) return await self._replace_if_same_internal(key_data, old_value_data, new_value_data) @@ -805,6 +815,7 @@ def handler(message): request = map_values_with_predicate_codec.encode_request(self.name, predicate_data) else: + def handler(message): data_list = map_values_codec.decode_response(message) return deserialize_list_in_place(data_list, self._to_object) diff --git a/hazelcast/internal/asyncio_reactor.py b/hazelcast/internal/asyncio_reactor.py index b3082985f5..d0293cd2f1 100644 --- a/hazelcast/internal/asyncio_reactor.py +++ b/hazelcast/internal/asyncio_reactor.py @@ -4,7 +4,7 @@ import time from asyncio import AbstractEventLoop, transports -from hazelcast.internal.asyncio_connection import Connection +from hazelcast.internal.asyncio_connection import Connection from hazelcast.core import Address _BUFFER_SIZE = 128000 @@ -14,7 +14,6 @@ class AsyncioReactor: - def __init__(self, loop: AbstractEventLoop | None = None): self._is_live = False self._loop = loop or asyncio.get_running_loop() @@ -37,7 +36,13 @@ async def connection_factory( self, connection_manager, connection_id, address: Address, network_config, message_callback ): return await AsyncioConnection.create_and_connect( - self._loop, self, connection_manager, connection_id, address, network_config, message_callback, + self._loop, + self, + connection_manager, + connection_id, + address, + network_config, + message_callback, ) def update_bytes_sent(self, sent: int): @@ -54,8 +59,16 @@ def update_bytes_received(self, received: int): class AsyncioConnection(Connection): - - def __init__(self, loop, reactor: AsyncioReactor, connection_manager, connection_id, address, config, message_callback): + def __init__( + self, + loop, + reactor: AsyncioReactor, + connection_manager, + connection_id, + address, + config, + message_callback, + ): super().__init__(connection_manager, connection_id, message_callback) self._loop = loop self._reactor = reactor @@ -64,8 +77,19 @@ def __init__(self, loop, reactor: AsyncioReactor, connection_manager, connection self._proto = None @classmethod - async def create_and_connect(cls, loop, reactor: AsyncioReactor, connection_manager, connection_id, address, config, message_callback): - this = cls(loop, reactor, connection_manager, connection_id, address, config, message_callback) + async def create_and_connect( + cls, + loop, + reactor: AsyncioReactor, + connection_manager, + connection_id, + address, + config, + message_callback, + ): + this = cls( + loop, reactor, connection_manager, connection_id, address, config, message_callback + ) if this._config.ssl_enabled: await this._create_ssl_connection() else: @@ -73,12 +97,21 @@ async def create_and_connect(cls, loop, reactor: AsyncioReactor, connection_mana return this def _create_protocol(self): - return HazelcastProtocol(self._loop, self._reader, self._address, self._update_read_time, - self._update_write_time, self._update_sent, self._update_received) + return HazelcastProtocol( + self._loop, + self._reader, + self._address, + self._update_read_time, + self._update_write_time, + self._update_sent, + self._update_received, + ) async def _create_connection(self): loop = self._loop - res = await loop.create_connection(self._create_protocol, host=self._address.host, port=self._address.port) + res = await loop.create_connection( + self._create_protocol, host=self._address.host, port=self._address.port + ) _sock, self._proto = res async def _create_ssl_connection(self): @@ -107,7 +140,16 @@ class HazelcastProtocol(asyncio.BufferedProtocol): PROTOCOL_STARTER = b"CP2" - def __init__(self, loop: AbstractEventLoop, reader, address, update_read_time, update_write_time, update_sent, update_received): + def __init__( + self, + loop: AbstractEventLoop, + reader, + address, + update_read_time, + update_write_time, + update_sent, + update_received, + ): self._loop = loop self._reader = reader self._address = address @@ -161,7 +203,7 @@ def _do_write(self): if not self._write_buf_size: return buf_bytes = self._write_buf.getvalue() - self._transport.write(buf_bytes[:self._write_buf_size]) + self._transport.write(buf_bytes[: self._write_buf_size]) self._update_write_time(time.time()) self._update_sent(self._write_buf_size) self._write_buf.seek(0) diff --git a/start_rc.py b/start_rc.py index 5daef64ad3..403f3576c7 100644 --- a/start_rc.py +++ b/start_rc.py @@ -63,7 +63,9 @@ def download_if_necessary(repo, group, artifact_id, version, is_test_artifact=Fa def start_rc(stdout=None, stderr=None): artifacts = [] - rc = download_if_necessary(ENTERPRISE_SNAPSHOT_REPO, HAZELCAST_GROUP, "hazelcast-remote-controller", RC_VERSION) + rc = download_if_necessary( + ENTERPRISE_SNAPSHOT_REPO, HAZELCAST_GROUP, "hazelcast-remote-controller", RC_VERSION + ) tests = download_if_necessary(REPO, HAZELCAST_GROUP, "hazelcast", SERVER_VERSION, True) sql = download_if_necessary(REPO, HAZELCAST_GROUP, "hazelcast-sql", SERVER_VERSION) diff --git a/tests/integration/asyncio/authentication_tests/authentication_test.py b/tests/integration/asyncio/authentication_tests/authentication_test.py index fbcb237c5b..97c29b346e 100644 --- a/tests/integration/asyncio/authentication_tests/authentication_test.py +++ b/tests/integration/asyncio/authentication_tests/authentication_test.py @@ -21,8 +21,12 @@ class AuthenticationTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): current_directory = os.path.dirname(__file__) rc = None - hazelcast_token_xml = get_abs_path(current_directory, "../../backward_compatible/authentication_tests/hazelcast-token.xml") - hazelcast_userpass_xml = get_abs_path(current_directory, "../../backward_compatible/authentication_tests/hazelcast-user-pass.xml") + hazelcast_token_xml = get_abs_path( + current_directory, "../../backward_compatible/authentication_tests/hazelcast-token.xml" + ) + hazelcast_userpass_xml = get_abs_path( + current_directory, "../../backward_compatible/authentication_tests/hazelcast-user-pass.xml" + ) def setUp(self): self.rc = self.create_rc() @@ -35,14 +39,18 @@ async def test_no_auth(self): cluster.start_member() with self.assertRaises(HazelcastError): - await HazelcastClient.create_and_start(cluster_name=cluster.id, cluster_connect_timeout=2) + await HazelcastClient.create_and_start( + cluster_name=cluster.id, cluster_connect_timeout=2 + ) async def test_token_auth(self): cluster = self.create_cluster(self.rc, self.configure_cluster(self.hazelcast_token_xml)) cluster.start_member() token_provider = BasicTokenProvider("Hazelcast") - client = await HazelcastClient.create_and_start(cluster_name=cluster.id, token_provider=token_provider) + client = await HazelcastClient.create_and_start( + cluster_name=cluster.id, token_provider=token_provider + ) self.assertTrue(client.lifecycle_service.is_running()) await client.shutdown() diff --git a/tests/integration/asyncio/client_test.py b/tests/integration/asyncio/client_test.py index db15c28140..8857bac69b 100644 --- a/tests/integration/asyncio/client_test.py +++ b/tests/integration/asyncio/client_test.py @@ -14,7 +14,6 @@ class ClientLabelsTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): - @classmethod def setUpClass(cls): cls.rc = cls.create_rc() diff --git a/tests/integration/asyncio/proxy/map_test.py b/tests/integration/asyncio/proxy/map_test.py index a5858b86f4..dc92a3a052 100644 --- a/tests/integration/asyncio/proxy/map_test.py +++ b/tests/integration/asyncio/proxy/map_test.py @@ -55,7 +55,8 @@ compare_client_version, compare_server_version, skip_if_client_version_older_than, - random_string, afill_map, + random_string, + afill_map, ) @@ -306,7 +307,9 @@ async def test_execute_on_entries_with_predicate(self): m = await self.fill_map() expected_entry_set = [(key, "processed") if key < "key-5" else (key, m[key]) for key in m] expected_values = [(key, "processed") for key in m if key < "key-5"] - values = await self.map.execute_on_entries(EntryProcessor("processed"), sql("__key < 'key-5'")) + values = await self.map.execute_on_entries( + EntryProcessor("processed"), sql("__key < 'key-5'") + ) self.assertCountEqual(expected_entry_set, await self.map.entry_set()) self.assertCountEqual(expected_values, values) @@ -559,7 +562,6 @@ async def fill_map(self, count=10): class MapStoreTest(SingleMemberTestCase): - @classmethod def configure_client(cls, config): config["cluster_name"] = cls.cluster.id @@ -569,7 +571,9 @@ def configure_client(cls, config): def configure_cluster(cls): path = os.path.abspath(__file__) dir_path = os.path.dirname(path) - with open(os.path.join(dir_path, "../../backward_compatible/proxy/hazelcast_mapstore.xml")) as f: + with open( + os.path.join(dir_path, "../../backward_compatible/proxy/hazelcast_mapstore.xml") + ) as f: return f.read() async def asyncSetUp(self): @@ -637,7 +641,6 @@ def assert_event(): class MapTTLTest(SingleMemberTestCase): - @classmethod def configure_client(cls, config): config["cluster_name"] = cls.cluster.id @@ -701,7 +704,6 @@ async def assert_map_not_contains(): class MapMaxIdleTest(SingleMemberTestCase): - @classmethod def configure_client(cls, config): config["cluster_name"] = cls.cluster.id @@ -760,7 +762,6 @@ async def test_set(self): compare_client_version("4.2.1") < 0, "Tests the features added in 4.2.1 version of the client" ) class MapAggregatorsIntTest(SingleMemberTestCase): - @classmethod def configure_client(cls, config): config["cluster_name"] = cls.cluster.id @@ -866,7 +867,6 @@ async def _fill_with_duplicate_values(self): compare_client_version("4.2.1") < 0, "Tests the features added in 4.2.1 version of the client" ) class MapAggregatorsLongTest(SingleMemberTestCase): - @classmethod def configure_client(cls, config): config["cluster_name"] = cls.cluster.id @@ -1071,4 +1071,4 @@ async def test_identity_with_predicate(self): self.assertCountEqual( [HazelcastJsonValue('{"attr1": 4, "attr2": 5, "attr3": 6}')], [attribute.value for attribute in attributes], - ) \ No newline at end of file + ) diff --git a/tests/util.py b/tests/util.py index e6b772eb1f..20dc74ec91 100644 --- a/tests/util.py +++ b/tests/util.py @@ -37,14 +37,17 @@ def fill_map(map, size=10, key_prefix="key", value_prefix="val"): map.put_all(entries) return entries + async def afill_map(map, size=10, key_prefix="key", value_prefix="val"): import asyncio + entries = dict() for i in range(size): entries[key_prefix + str(i)] = value_prefix + str(i) await map.put_all(entries) return entries + def get_ssl_config( cluster_name, enable_ssl=False, From fdda120a3cd9694ff069691693552d030031359e Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Fri, 19 Sep 2025 20:12:28 +0300 Subject: [PATCH 05/51] Updates --- hazelcast/internal/asyncio_compact.py | 2 +- hazelcast/internal/asyncio_connection.py | 3 +-- hazelcast/internal/asyncio_future.py | 5 ----- hazelcast/internal/asyncio_reactor.py | 6 +++--- 4 files changed, 5 insertions(+), 11 deletions(-) delete mode 100644 hazelcast/internal/asyncio_future.py diff --git a/hazelcast/internal/asyncio_compact.py b/hazelcast/internal/asyncio_compact.py index 94b22587c4..06f53ab97c 100644 --- a/hazelcast/internal/asyncio_compact.py +++ b/hazelcast/internal/asyncio_compact.py @@ -11,9 +11,9 @@ ) if typing.TYPE_CHECKING: - from hazelcast.cluster import ClusterService from hazelcast.config import Config from hazelcast.protocol.client_message import OutboundMessage + from hazelcast.internal.asyncio_cluster import ClusterService from hazelcast.internal.asyncio_invocation import InvocationService from hazelcast.internal.asyncio_reactor import AsyncioReactor from hazelcast.serialization.compact import ( diff --git a/hazelcast/internal/asyncio_connection.py b/hazelcast/internal/asyncio_connection.py index 4a863f7fe9..b372b5425d 100644 --- a/hazelcast/internal/asyncio_connection.py +++ b/hazelcast/internal/asyncio_connection.py @@ -3,10 +3,9 @@ import logging import random import struct -import threading import time import uuid -from typing import override, Coroutine +from typing import Coroutine from hazelcast import __version__ from hazelcast.config import ReconnectMode diff --git a/hazelcast/internal/asyncio_future.py b/hazelcast/internal/asyncio_future.py deleted file mode 100644 index b1bc578dd2..0000000000 --- a/hazelcast/internal/asyncio_future.py +++ /dev/null @@ -1,5 +0,0 @@ -import asyncio - - -def future_continue_with(future: asyncio.Future, callback) -> asyncio.Future: - future.add_done_callback(callback) diff --git a/hazelcast/internal/asyncio_reactor.py b/hazelcast/internal/asyncio_reactor.py index d0293cd2f1..16e7764246 100644 --- a/hazelcast/internal/asyncio_reactor.py +++ b/hazelcast/internal/asyncio_reactor.py @@ -157,14 +157,14 @@ def __init__( self._update_write_time = update_write_time self._update_sent = update_sent self._update_received = update_received - self._transport = None - self.start_time = None + self._transport: transports.BaseTransport | None = None + self.start_time: float | None = None self._write_buf = io.BytesIO() self._write_buf_size = 0 self._recv_buf = None self._alive = True - def connection_made(self, transport: transports.Transport): + def connection_made(self, transport: transports.BaseTransport): self._transport = transport self.start_time = time.time() self.write(self.PROTOCOL_STARTER) From fee5b45cbd2a0e7f629e977dd359a50d28fab794 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Fri, 19 Sep 2025 20:13:33 +0300 Subject: [PATCH 06/51] Updates --- hazelcast/internal/asyncio_connection.py | 1 - 1 file changed, 1 deletion(-) diff --git a/hazelcast/internal/asyncio_connection.py b/hazelcast/internal/asyncio_connection.py index b372b5425d..5f0e3c1c72 100644 --- a/hazelcast/internal/asyncio_connection.py +++ b/hazelcast/internal/asyncio_connection.py @@ -25,7 +25,6 @@ IllegalStateError, ClientOfflineError, ) -from hazelcast.future import ImmediateFuture, ImmediateExceptionFuture from hazelcast.internal.asyncio_invocation import Invocation from hazelcast.lifecycle import LifecycleState from hazelcast.protocol.client_message import ( From fc2c38b6d697afb0fd83aa02855390771bd038a2 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Fri, 19 Sep 2025 20:29:23 +0300 Subject: [PATCH 07/51] Updates --- hazelcast/asyncio/client.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/hazelcast/asyncio/client.py b/hazelcast/asyncio/client.py index de9d1ef80b..aed8f19add 100644 --- a/hazelcast/asyncio/client.py +++ b/hazelcast/asyncio/client.py @@ -36,8 +36,6 @@ __all__ = ("HazelcastClient",) -from hazelcast.vector import IndexConfig - _logger = logging.getLogger(__name__) @@ -359,11 +357,6 @@ def cp_subsystem(self) -> CPSubsystem: """CP Subsystem offers set of in-memory linearizable data structures.""" return self._cp_subsystem - @property - def sql(self) -> SqlService: - """Returns a service to execute distributed SQL queries.""" - return self._sql_service - def _create_address_provider(self): config = self._config cluster_members = config.cluster_members From 1772031b3631ff5a666fb7e797df539719d675da Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Fri, 19 Sep 2025 20:39:58 +0300 Subject: [PATCH 08/51] Removed docs, include HazelcastClient/Map in public API --- hazelcast/asyncio/__init__.py | 2 + hazelcast/asyncio/client.py | 88 ----------------------------------- 2 files changed, 2 insertions(+), 88 deletions(-) diff --git a/hazelcast/asyncio/__init__.py b/hazelcast/asyncio/__init__.py index e69de29bb2..e9064b18c4 100644 --- a/hazelcast/asyncio/__init__.py +++ b/hazelcast/asyncio/__init__.py @@ -0,0 +1,2 @@ +from hazelcast.asyncio.client import HazelcastClient +from hazelcast.internal.asyncio_proxy.map import Map \ No newline at end of file diff --git a/hazelcast/asyncio/client.py b/hazelcast/asyncio/client.py index aed8f19add..0d98a14c4d 100644 --- a/hazelcast/asyncio/client.py +++ b/hazelcast/asyncio/client.py @@ -40,9 +40,6 @@ class HazelcastClient: - """Hazelcast client instance to access and manipulate distributed data - structures on the Hazelcast clusters. - """ _CLIENT_ID = AtomicInteger() @@ -53,38 +50,6 @@ async def create_and_start(cls, config: Config = None, **kwargs) -> "HazelcastCl return client def __init__(self, config: Config = None, **kwargs): - """The client can be configured either by: - - - providing a configuration object as the first parameter of the - constructor - - .. code:: python - - from hazelcast import HazelcastClient - from hazelcast.config import Config - - config = Config() - config.cluster_name = "a-cluster" - client = HazelcastClient(config) - - - passing configuration options as keyword arguments - - .. code:: python - - from hazelcast import HazelcastClient - - client = HazelcastClient( - cluster_name="a-cluster", - ) - - - See the :class:`hazelcast.config.Config` documentation for the possible - configuration options. - - Args: - config: Optional configuration object. - **kwargs: Optional keyword arguments of the client configuration. - """ if config: if kwargs: raise InvalidConfigurationError( @@ -219,29 +184,11 @@ async def _start(self): _logger.info("Client started") async def get_map(self, name: str) -> Map[KeyType, ValueType]: - """Returns the distributed map instance with the specified name. - - Args: - name: Name of the distributed map. - - Returns: - Distributed map instance with the specified name. - """ return await self._proxy_manager.get_or_create(MAP_SERVICE, name) async def add_distributed_object_listener( self, listener_func: typing.Callable[[DistributedObjectEvent], None] ) -> str: - """Adds a listener which will be notified when a new distributed object - is created or destroyed. - - Args: - listener_func: Function to be called when a distributed object is - created or destroyed. - - Returns: - A registration id which is used as a key to remove the listener. - """ is_smart = self._config.smart_routing codec = client_add_distributed_object_listener_codec request = codec.encode_request(is_smart) @@ -261,28 +208,9 @@ def event_handler(client_message): ) async def remove_distributed_object_listener(self, registration_id: str) -> bool: - """Removes the specified distributed object listener. - - Returns silently if there is no such listener added before. - - Args: - registration_id: The id of registered listener. - - Returns: - ``True`` if registration is removed, ``False`` otherwise. - """ return await self._listener_service.deregister_listener(registration_id) async def get_distributed_objects(self) -> typing.List[Proxy]: - """Returns all distributed objects such as; queue, map, set, list, - topic, lock, multimap. - - Also, as a side effect, it clears the local instances of the destroyed - proxies. - - Returns: - List of instances created by Hazelcast. - """ request = client_get_distributed_objects_codec.encode_request() invocation = Invocation(request, response_handler=lambda m: m) await self._invocation_service.ainvoke(invocation) @@ -313,7 +241,6 @@ async def get_distributed_objects(self) -> typing.List[Proxy]: return self._proxy_manager.get_distributed_objects() async def shutdown(self) -> None: - """Shuts down this HazelcastClient.""" async with self._shutdown_lock: if self._internal_lifecycle_service.running: self._internal_lifecycle_service.fire_lifecycle_event(LifecycleState.SHUTTING_DOWN) @@ -328,33 +255,22 @@ async def shutdown(self) -> None: @property def name(self) -> str: - """Name of the client.""" return self._name @property def lifecycle_service(self) -> LifecycleService: - """Lifecycle service allows you to check if the client is running and - add and remove lifecycle listeners. - """ return self._lifecycle_service @property def partition_service(self) -> PartitionService: - """Partition service allows you to get partition count, introspect - the partition owners, and partition ids of keys. - """ return self._partition_service @property def cluster_service(self) -> ClusterService: - """ClusterService: Cluster service allows you to get the list of - the cluster members and add and remove membership listeners. - """ return self._cluster_service @property def cp_subsystem(self) -> CPSubsystem: - """CP Subsystem offers set of in-memory linearizable data structures.""" return self._cp_subsystem def _create_address_provider(self): @@ -399,10 +315,6 @@ def _init_load_balancer(config): class _ClientContext: - """ - Context holding all the required services, managers and the configuration - for a Hazelcast client. - """ def __init__(self): self.client = None From 170cf898aacc94193c399c9d19cfc202bd588978 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Fri, 19 Sep 2025 20:48:41 +0300 Subject: [PATCH 09/51] Updates --- tests/integration/asyncio/client_test.py | 2 +- tests/integration/asyncio/proxy/map_test.py | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/integration/asyncio/client_test.py b/tests/integration/asyncio/client_test.py index 8857bac69b..02ef4d1ea7 100644 --- a/tests/integration/asyncio/client_test.py +++ b/tests/integration/asyncio/client_test.py @@ -84,7 +84,7 @@ async def test_bytes_sent(self): bytes_sent = reactor._bytes_sent self.assertGreater(bytes_sent, 0) m = await self.client.get_map(random_string()) - m.set(random_string(), random_string()) + await m.set(random_string(), random_string()) self.assertGreater(reactor._bytes_sent, bytes_sent) diff --git a/tests/integration/asyncio/proxy/map_test.py b/tests/integration/asyncio/proxy/map_test.py index dc92a3a052..f3bfe626b8 100644 --- a/tests/integration/asyncio/proxy/map_test.py +++ b/tests/integration/asyncio/proxy/map_test.py @@ -598,10 +598,10 @@ async def test_load_all_with_key_set_loads_given_keys(self): self.assertCountEqual(entry_set, {"key0": "val0", "key1": "val1"}) async def test_load_all_overrides_entries_in_memory_by_default(self): - self.map.evict_all() - self.map.put_transient("key0", "new0") - self.map.put_transient("key1", "new1") - self.map.load_all(["key0", "key1"]) + await self.map.evict_all() + await self.map.put_transient("key0", "new0") + await self.map.put_transient("key1", "new1") + await self.map.load_all(["key0", "key1"]) entry_set = await self.map.get_all(["key0", "key1"]) self.assertCountEqual(entry_set, {"key0": "val0", "key1": "val1"}) @@ -773,7 +773,7 @@ async def asyncSetUp(self): self.map = await self.client.get_map(random_string()) await self.map.put_all({"key-%d" % i: i for i in range(50)}) - async def tearDown(self): + async def asyncTearDown(self): await self.map.destroy() await super().asyncTearDown() From 22449a84627a090f4b2b2a37e810ae81f69e7617 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Mon, 22 Sep 2025 12:04:55 +0300 Subject: [PATCH 10/51] black --- hazelcast/asyncio/__init__.py | 2 +- hazelcast/asyncio/client.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/hazelcast/asyncio/__init__.py b/hazelcast/asyncio/__init__.py index e9064b18c4..6137aac760 100644 --- a/hazelcast/asyncio/__init__.py +++ b/hazelcast/asyncio/__init__.py @@ -1,2 +1,2 @@ from hazelcast.asyncio.client import HazelcastClient -from hazelcast.internal.asyncio_proxy.map import Map \ No newline at end of file +from hazelcast.internal.asyncio_proxy.map import Map diff --git a/hazelcast/asyncio/client.py b/hazelcast/asyncio/client.py index 0d98a14c4d..290af32c6b 100644 --- a/hazelcast/asyncio/client.py +++ b/hazelcast/asyncio/client.py @@ -315,7 +315,6 @@ def _init_load_balancer(config): class _ClientContext: - def __init__(self): self.client = None self.config = None From 5406bc65c55c26dfea2c7a3bdd9771bf031789c4 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Mon, 22 Sep 2025 14:46:45 +0300 Subject: [PATCH 11/51] Ignore incorrect mypy errors --- hazelcast/asyncio/client.py | 4 ++-- hazelcast/internal/asyncio_proxy/map.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hazelcast/asyncio/client.py b/hazelcast/asyncio/client.py index 290af32c6b..508efa34c9 100644 --- a/hazelcast/asyncio/client.py +++ b/hazelcast/asyncio/client.py @@ -221,7 +221,7 @@ async def get_distributed_objects(self) -> typing.List[Proxy]: } response = client_get_distributed_objects_codec.decode_response(invocation.future.result()) - async with asyncio.TaskGroup() as tg: + async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined] for dist_obj_info in response: local_distributed_object_infos.discard(dist_obj_info) tg.create_task( @@ -230,7 +230,7 @@ async def get_distributed_objects(self) -> typing.List[Proxy]: ) ) - async with asyncio.TaskGroup() as tg: + async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined] for dist_obj_info in local_distributed_object_infos: tg.create_task( self._proxy_manager.destroy_proxy( diff --git a/hazelcast/internal/asyncio_proxy/map.py b/hazelcast/internal/asyncio_proxy/map.py index ea0765fc86..ab34407ddb 100644 --- a/hazelcast/internal/asyncio_proxy/map.py +++ b/hazelcast/internal/asyncio_proxy/map.py @@ -640,7 +640,7 @@ async def put_all(self, map: typing.Dict[KeyType, ValueType]) -> None: except KeyError: partition_map[partition_id] = [entry] - async with asyncio.TaskGroup() as tg: + async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined] for partition_id, entry_list in partition_map.items(): request = map_put_all_codec.encode_request( self.name, entry_list, False From a417a4a601e41828cda6b8ef20d529c97358c70f Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Wed, 24 Sep 2025 08:56:31 +0300 Subject: [PATCH 12/51] Updates --- hazelcast/internal/asyncio_connection.py | 33 ++++++++++-------------- hazelcast/internal/asyncio_reactor.py | 7 ----- 2 files changed, 14 insertions(+), 26 deletions(-) diff --git a/hazelcast/internal/asyncio_connection.py b/hazelcast/internal/asyncio_connection.py index 5f0e3c1c72..c78736cb2a 100644 --- a/hazelcast/internal/asyncio_connection.py +++ b/hazelcast/internal/asyncio_connection.py @@ -173,7 +173,7 @@ def __init__( self, self._client, config, reactor, invocation_service ) self._connection_listeners = [] - self._connect_all_members_timer = None + self._connect_all_members_task: asyncio.Task | None = None self._async_start = config.async_start self._connect_to_cluster_thread_running = False self._shuffle_member_list = config.shuffle_member_list @@ -274,8 +274,8 @@ async def shutdown(self): return self.live = False - if self._connect_all_members_timer: - self._connect_all_members_timer.cancel() + if self._connect_all_members_task: + self._connect_all_members_task.cancel() self._heartbeat_manager.shutdown() @@ -458,6 +458,7 @@ def _start_connect_all_members_timer(self): connecting_uuids = set() async def run(): + await asyncio.sleep(1) if not self._lifecycle_service.running: return @@ -479,13 +480,9 @@ async def run(): for item in member_uuids: connecting_uuids.discard(item) - self._connect_all_members_timer = self._reactor.add_timer( - 1, lambda: asyncio.create_task(run()) - ) + self._connect_all_members_task = asyncio.create_task(run()) - self._connect_all_members_timer = self._reactor.add_timer( - 1, lambda: asyncio.create_task(run()) - ) + self._connect_all_members_task = asyncio.create_task(run()) async def _connect_to_cluster(self): await self._sync_connect_to_cluster() @@ -816,12 +813,14 @@ def __init__(self, connection_manager, client, config, reactor, invocation_servi self._invocation_service = invocation_service self._heartbeat_timeout = config.heartbeat_timeout self._heartbeat_interval = config.heartbeat_interval - self._heartbeat_timer = None + self._heartbeat_task: asyncio.Task | None = None def start(self): """Starts sending periodic HeartBeat operations.""" async def _heartbeat(): + await asyncio.sleep(self._heartbeat_interval) + _logger.debug("heartbeat") conn_manager = self._connection_manager if not conn_manager.live: return @@ -830,18 +829,14 @@ async def _heartbeat(): async with asyncio.TaskGroup() as tg: for connection in list(conn_manager.active_connections.values()): tg.create_task(self._check_connection(now, connection)) - self._heartbeat_timer = self._reactor.add_timer( - self._heartbeat_interval, lambda: asyncio.create_task(_heartbeat()) - ) + self._heartbeat_task = asyncio.create_task(_heartbeat()) - self._heartbeat_timer = self._reactor.add_timer( - self._heartbeat_interval, lambda: asyncio.create_task(_heartbeat()) - ) + self._heartbeat_task = asyncio.create_task(_heartbeat()) def shutdown(self): """Stops HeartBeat operations.""" - if self._heartbeat_timer: - self._heartbeat_timer.cancel() + if self._heartbeat_task: + self._heartbeat_task.cancel() async def _check_connection(self, now, connection): if not connection.live: @@ -858,7 +853,7 @@ async def _check_connection(self, now, connection): if (now - connection.last_write_time) > self._heartbeat_interval: request = client_ping_codec.encode_request() invocation = Invocation(request, connection=connection, urgent=True) - self._invocation_service.invoke(invocation) + asyncio.create_task(self._invocation_service.ainvoke(invocation)) _frame_header = struct.Struct(" Date: Thu, 25 Sep 2025 14:40:51 +0300 Subject: [PATCH 13/51] Updates --- hazelcast/internal/asyncio_reactor.py | 50 ++++++++------------------- 1 file changed, 14 insertions(+), 36 deletions(-) diff --git a/hazelcast/internal/asyncio_reactor.py b/hazelcast/internal/asyncio_reactor.py index da97f43b4b..92888c103e 100644 --- a/hazelcast/internal/asyncio_reactor.py +++ b/hazelcast/internal/asyncio_reactor.py @@ -90,15 +90,7 @@ async def create_and_connect( return this def _create_protocol(self): - return HazelcastProtocol( - self._loop, - self._reader, - self._address, - self._update_read_time, - self._update_write_time, - self._update_sent, - self._update_received, - ) + return HazelcastProtocol(self) async def _create_connection(self): loop = self._loop @@ -133,23 +125,8 @@ class HazelcastProtocol(asyncio.BufferedProtocol): PROTOCOL_STARTER = b"CP2" - def __init__( - self, - loop: AbstractEventLoop, - reader, - address, - update_read_time, - update_write_time, - update_sent, - update_received, - ): - self._loop = loop - self._reader = reader - self._address = address - self._update_read_time = update_read_time - self._update_write_time = update_write_time - self._update_sent = update_sent - self._update_received = update_received + def __init__(self, conn: AsyncioConnection): + self._conn = conn self._transport: transports.BaseTransport | None = None self.start_time: float | None = None self._write_buf = io.BytesIO() @@ -161,11 +138,12 @@ def connection_made(self, transport: transports.BaseTransport): self._transport = transport self.start_time = time.time() self.write(self.PROTOCOL_STARTER) - _logger.debug("Connected to %s", self._address) - self._loop.call_soon(self._write_loop) + _logger.debug("Connected to %s", self._conn._address) + self._conn._loop.call_soon(self._write_loop) def connection_lost(self, exc): self._alive = False + self._conn._loop.create_task(self._conn.close_connection(str(exc), None)) return False def close(self): @@ -183,11 +161,11 @@ def get_buffer(self, sizehint): def buffer_updated(self, nbytes): recv_bytes = self._recv_buf[:nbytes] - self._update_read_time(time.time()) - self._update_received(nbytes) - self._reader.read(recv_bytes) - if self._reader.length: - self._reader.process() + self._conn._update_read_time(time.time()) + self._conn._update_received(nbytes) + self._conn._reader.read(recv_bytes) + if self._conn._reader.length: + self._conn._reader.process() def eof_received(self): self._alive = False @@ -197,11 +175,11 @@ def _do_write(self): return buf_bytes = self._write_buf.getvalue() self._transport.write(buf_bytes[: self._write_buf_size]) - self._update_write_time(time.time()) - self._update_sent(self._write_buf_size) + self._conn._update_write_time(time.time()) + self._conn._update_sent(self._write_buf_size) self._write_buf.seek(0) self._write_buf_size = 0 def _write_loop(self): self._do_write() - return self._loop.call_later(0.01, self._write_loop) + return self._conn._loop.call_later(0.01, self._write_loop) From baa3bc1175bd9cd920d3170c443286e4f9a0e485 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Mon, 29 Sep 2025 14:42:25 +0300 Subject: [PATCH 14/51] Annotate optional params --- hazelcast/asyncio/client.py | 4 ++-- hazelcast/internal/asyncio_proxy/map.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hazelcast/asyncio/client.py b/hazelcast/asyncio/client.py index 508efa34c9..0848445cfb 100644 --- a/hazelcast/asyncio/client.py +++ b/hazelcast/asyncio/client.py @@ -44,12 +44,12 @@ class HazelcastClient: _CLIENT_ID = AtomicInteger() @classmethod - async def create_and_start(cls, config: Config = None, **kwargs) -> "HazelcastClient": + async def create_and_start(cls, config: Config|None = None, **kwargs) -> "HazelcastClient": client = HazelcastClient(config, **kwargs) await client._start() return client - def __init__(self, config: Config = None, **kwargs): + def __init__(self, config: Config|None = None, **kwargs): if config: if kwargs: raise InvalidConfigurationError( diff --git a/hazelcast/internal/asyncio_proxy/map.py b/hazelcast/internal/asyncio_proxy/map.py index ab34407ddb..fbbc76c31d 100644 --- a/hazelcast/internal/asyncio_proxy/map.py +++ b/hazelcast/internal/asyncio_proxy/map.py @@ -395,7 +395,7 @@ async def evict_all(self) -> None: return await self._invoke(request) async def execute_on_entries( - self, entry_processor: typing.Any, predicate: Predicate = None + self, entry_processor: typing.Any, predicate: Predicate|None = None ) -> typing.List[typing.Any]: if predicate: try: @@ -518,7 +518,7 @@ async def is_empty(self) -> bool: request = map_is_empty_codec.encode_request(self.name) return await self._invoke(request, map_is_empty_codec.decode_response) - async def key_set(self, predicate: Predicate = None) -> typing.List[ValueType]: + async def key_set(self, predicate: Predicate|None = None) -> typing.List[ValueType]: if predicate: if isinstance(predicate, _PagingPredicate): predicate.iteration_type = IterationType.KEY From ebfc9e22e4cc4a8d49ea8c9b1b535253966be9a1 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Mon, 29 Sep 2025 14:46:47 +0300 Subject: [PATCH 15/51] black --- hazelcast/asyncio/client.py | 4 ++-- hazelcast/internal/asyncio_proxy/map.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hazelcast/asyncio/client.py b/hazelcast/asyncio/client.py index 0848445cfb..0f6db252fe 100644 --- a/hazelcast/asyncio/client.py +++ b/hazelcast/asyncio/client.py @@ -44,12 +44,12 @@ class HazelcastClient: _CLIENT_ID = AtomicInteger() @classmethod - async def create_and_start(cls, config: Config|None = None, **kwargs) -> "HazelcastClient": + async def create_and_start(cls, config: Config | None = None, **kwargs) -> "HazelcastClient": client = HazelcastClient(config, **kwargs) await client._start() return client - def __init__(self, config: Config|None = None, **kwargs): + def __init__(self, config: Config | None = None, **kwargs): if config: if kwargs: raise InvalidConfigurationError( diff --git a/hazelcast/internal/asyncio_proxy/map.py b/hazelcast/internal/asyncio_proxy/map.py index fbbc76c31d..9f2f765ec1 100644 --- a/hazelcast/internal/asyncio_proxy/map.py +++ b/hazelcast/internal/asyncio_proxy/map.py @@ -395,7 +395,7 @@ async def evict_all(self) -> None: return await self._invoke(request) async def execute_on_entries( - self, entry_processor: typing.Any, predicate: Predicate|None = None + self, entry_processor: typing.Any, predicate: Predicate | None = None ) -> typing.List[typing.Any]: if predicate: try: @@ -518,7 +518,7 @@ async def is_empty(self) -> bool: request = map_is_empty_codec.encode_request(self.name) return await self._invoke(request, map_is_empty_codec.decode_response) - async def key_set(self, predicate: Predicate|None = None) -> typing.List[ValueType]: + async def key_set(self, predicate: Predicate | None = None) -> typing.List[ValueType]: if predicate: if isinstance(predicate, _PagingPredicate): predicate.iteration_type = IterationType.KEY From 6928837683400000db616d8272b975b952006eaf Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Tue, 30 Sep 2025 11:31:04 +0300 Subject: [PATCH 16/51] Remove update to test util --- tests/integration/asyncio/proxy/map_test.py | 6 ++++-- tests/integration/asyncio/util.py | 9 +++++++++ tests/util.py | 10 ---------- 3 files changed, 13 insertions(+), 12 deletions(-) create mode 100644 tests/integration/asyncio/util.py diff --git a/tests/integration/asyncio/proxy/map_test.py b/tests/integration/asyncio/proxy/map_test.py index f3bfe626b8..f389fe5c77 100644 --- a/tests/integration/asyncio/proxy/map_test.py +++ b/tests/integration/asyncio/proxy/map_test.py @@ -56,9 +56,11 @@ compare_server_version, skip_if_client_version_older_than, random_string, - afill_map, ) +from tests.integration.asyncio.util import fill_map + + class EntryProcessor(IdentifiedDataSerializable): FACTORY_ID = 66 @@ -579,7 +581,7 @@ def configure_cluster(cls): async def asyncSetUp(self): await super().asyncSetUp() self.map = await self.client.get_map("mapstore-test") - self.entries = await afill_map(self.map, size=10, key_prefix="key", value_prefix="val") + self.entries = await fill_map(self.map, size=10, key_prefix="key", value_prefix="val") async def asyncTearDown(self): await self.map.destroy() diff --git a/tests/integration/asyncio/util.py b/tests/integration/asyncio/util.py new file mode 100644 index 0000000000..fedd590f8a --- /dev/null +++ b/tests/integration/asyncio/util.py @@ -0,0 +1,9 @@ + +async def fill_map(map, size=10, key_prefix="key", value_prefix="val"): + entries = dict() + for i in range(size): + entries[key_prefix + str(i)] = value_prefix + str(i) + await map.put_all(entries) + return entries + + diff --git a/tests/util.py b/tests/util.py index 20dc74ec91..6492f2cd7f 100644 --- a/tests/util.py +++ b/tests/util.py @@ -38,16 +38,6 @@ def fill_map(map, size=10, key_prefix="key", value_prefix="val"): return entries -async def afill_map(map, size=10, key_prefix="key", value_prefix="val"): - import asyncio - - entries = dict() - for i in range(size): - entries[key_prefix + str(i)] = value_prefix + str(i) - await map.put_all(entries) - return entries - - def get_ssl_config( cluster_name, enable_ssl=False, From 3e03cbf078c7abdbe98eeac58e1b4f7ff3baf439 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Tue, 30 Sep 2025 11:31:53 +0300 Subject: [PATCH 17/51] black --- tests/integration/asyncio/proxy/map_test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/integration/asyncio/proxy/map_test.py b/tests/integration/asyncio/proxy/map_test.py index f389fe5c77..b63ae9e0fe 100644 --- a/tests/integration/asyncio/proxy/map_test.py +++ b/tests/integration/asyncio/proxy/map_test.py @@ -61,7 +61,6 @@ from tests.integration.asyncio.util import fill_map - class EntryProcessor(IdentifiedDataSerializable): FACTORY_ID = 66 CLASS_ID = 1 From 51ced7a50aa477ddb2fd1bcf524c5c717ec1d63d Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Tue, 30 Sep 2025 11:32:10 +0300 Subject: [PATCH 18/51] black --- tests/integration/asyncio/ssl_test.py | 134 ++++++++++++++++++++++++++ tests/integration/asyncio/util.py | 3 - 2 files changed, 134 insertions(+), 3 deletions(-) create mode 100644 tests/integration/asyncio/ssl_test.py diff --git a/tests/integration/asyncio/ssl_test.py b/tests/integration/asyncio/ssl_test.py new file mode 100644 index 0000000000..1f1f1cbdfb --- /dev/null +++ b/tests/integration/asyncio/ssl_test.py @@ -0,0 +1,134 @@ +import os + +import pytest + +from tests.base import HazelcastTestCase +from hazelcast.asyncio.client import HazelcastClient +from hazelcast.errors import HazelcastError +from hazelcast.config import SSLProtocol +from tests.util import get_ssl_config, fill_map, get_abs_path + + +@pytest.mark.enterprise +class SSLTest(HazelcastTestCase): + current_directory = os.path.dirname(__file__) + rc = None + hazelcast_ssl_xml = get_abs_path( + current_directory, "../../integration/backward_compatible/hazelcast-ssl.xml" + ) + default_ca_xml = get_abs_path( + current_directory, "../../integration/backward_compatible/hazelcast-default-ca.xml" + ) + + def setUp(self): + self.rc = self.create_rc() + + def tearDown(self): + self.rc.exit() + + def test_ssl_disabled(self): + cluster = self.create_cluster(self.rc, self.read_ssl_config()) + cluster.start_member() + + with self.assertRaises(HazelcastError): + HazelcastClient(**get_ssl_config(cluster.id, False)) + + def test_ssl_enabled_is_client_live(self): + cluster = self.create_cluster(self.rc, self.read_ssl_config()) + cluster.start_member() + + client = HazelcastClient( + **get_ssl_config( + cluster.id, True, get_abs_path(self.current_directory, "server1-cert.pem") + ) + ) + self.assertTrue(client.lifecycle_service.is_running()) + client.shutdown() + + def test_ssl_enabled_trust_default_certificates(self): + cluster = self.create_cluster(self.rc, self.read_default_ca_config()) + cluster.start_member() + + client = HazelcastClient(**get_ssl_config(cluster.id, True)) + self.assertTrue(client.lifecycle_service.is_running()) + client.shutdown() + + def test_ssl_enabled_dont_trust_self_signed_certificates(self): + # Member started with self-signed certificate + cluster = self.create_cluster(self.rc, self.read_ssl_config()) + cluster.start_member() + + with self.assertRaises(HazelcastError): + HazelcastClient(**get_ssl_config(cluster.id, True)) + + def test_ssl_enabled_map_size(self): + cluster = self.create_cluster(self.rc, self.read_ssl_config()) + cluster.start_member() + + client = HazelcastClient( + **get_ssl_config( + cluster.id, True, get_abs_path(self.current_directory, "server1-cert.pem") + ) + ) + test_map = client.get_map("test_map").blocking() + fill_map(test_map, 10) + self.assertEqual(test_map.size(), 10) + client.shutdown() + + def test_ssl_enabled_with_custom_ciphers(self): + cluster = self.create_cluster(self.rc, self.read_ssl_config()) + cluster.start_member() + + client = HazelcastClient( + **get_ssl_config( + cluster.id, + True, + get_abs_path(self.current_directory, "server1-cert.pem"), + ciphers="ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-GCM-SHA384", + ) + ) + self.assertTrue(client.lifecycle_service.is_running()) + client.shutdown() + + def test_ssl_enabled_with_invalid_ciphers(self): + cluster = self.create_cluster(self.rc, self.read_ssl_config()) + cluster.start_member() + + with self.assertRaises(HazelcastError): + HazelcastClient( + **get_ssl_config( + cluster.id, + True, + get_abs_path(self.current_directory, "server1-cert.pem"), + ciphers="INVALID-CIPHER1:INVALID_CIPHER2", + ) + ) + + def test_ssl_enabled_with_protocol_mismatch(self): + cluster = self.create_cluster(self.rc, self.read_ssl_config()) + cluster.start_member() + + # Member configured with TLSv1 + with self.assertRaises(HazelcastError): + HazelcastClient( + **get_ssl_config( + cluster.id, + True, + get_abs_path(self.current_directory, "server1-cert.pem"), + protocol=SSLProtocol.SSLv3, + ) + ) + + def read_default_ca_config(self): + with open(self.default_ca_xml, "r") as f: + xml_config = f.read() + + keystore_path = get_abs_path(self.current_directory, "keystore.jks") + return xml_config % (keystore_path, keystore_path) + + def read_ssl_config(self): + with open(self.hazelcast_ssl_xml, "r") as f: + xml_config = f.read() + + keystore_path = get_abs_path(self.current_directory, "server1.keystore") + return xml_config % keystore_path diff --git a/tests/integration/asyncio/util.py b/tests/integration/asyncio/util.py index fedd590f8a..e101a58103 100644 --- a/tests/integration/asyncio/util.py +++ b/tests/integration/asyncio/util.py @@ -1,9 +1,6 @@ - async def fill_map(map, size=10, key_prefix="key", value_prefix="val"): entries = dict() for i in range(size): entries[key_prefix + str(i)] = value_prefix + str(i) await map.put_all(entries) return entries - - From e635b94efdda2aeb67c5e86a0286f3a3f690b02e Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Tue, 30 Sep 2025 11:33:51 +0300 Subject: [PATCH 19/51] update --- tests/integration/asyncio/ssl_test.py | 134 -------------------------- 1 file changed, 134 deletions(-) delete mode 100644 tests/integration/asyncio/ssl_test.py diff --git a/tests/integration/asyncio/ssl_test.py b/tests/integration/asyncio/ssl_test.py deleted file mode 100644 index 1f1f1cbdfb..0000000000 --- a/tests/integration/asyncio/ssl_test.py +++ /dev/null @@ -1,134 +0,0 @@ -import os - -import pytest - -from tests.base import HazelcastTestCase -from hazelcast.asyncio.client import HazelcastClient -from hazelcast.errors import HazelcastError -from hazelcast.config import SSLProtocol -from tests.util import get_ssl_config, fill_map, get_abs_path - - -@pytest.mark.enterprise -class SSLTest(HazelcastTestCase): - current_directory = os.path.dirname(__file__) - rc = None - hazelcast_ssl_xml = get_abs_path( - current_directory, "../../integration/backward_compatible/hazelcast-ssl.xml" - ) - default_ca_xml = get_abs_path( - current_directory, "../../integration/backward_compatible/hazelcast-default-ca.xml" - ) - - def setUp(self): - self.rc = self.create_rc() - - def tearDown(self): - self.rc.exit() - - def test_ssl_disabled(self): - cluster = self.create_cluster(self.rc, self.read_ssl_config()) - cluster.start_member() - - with self.assertRaises(HazelcastError): - HazelcastClient(**get_ssl_config(cluster.id, False)) - - def test_ssl_enabled_is_client_live(self): - cluster = self.create_cluster(self.rc, self.read_ssl_config()) - cluster.start_member() - - client = HazelcastClient( - **get_ssl_config( - cluster.id, True, get_abs_path(self.current_directory, "server1-cert.pem") - ) - ) - self.assertTrue(client.lifecycle_service.is_running()) - client.shutdown() - - def test_ssl_enabled_trust_default_certificates(self): - cluster = self.create_cluster(self.rc, self.read_default_ca_config()) - cluster.start_member() - - client = HazelcastClient(**get_ssl_config(cluster.id, True)) - self.assertTrue(client.lifecycle_service.is_running()) - client.shutdown() - - def test_ssl_enabled_dont_trust_self_signed_certificates(self): - # Member started with self-signed certificate - cluster = self.create_cluster(self.rc, self.read_ssl_config()) - cluster.start_member() - - with self.assertRaises(HazelcastError): - HazelcastClient(**get_ssl_config(cluster.id, True)) - - def test_ssl_enabled_map_size(self): - cluster = self.create_cluster(self.rc, self.read_ssl_config()) - cluster.start_member() - - client = HazelcastClient( - **get_ssl_config( - cluster.id, True, get_abs_path(self.current_directory, "server1-cert.pem") - ) - ) - test_map = client.get_map("test_map").blocking() - fill_map(test_map, 10) - self.assertEqual(test_map.size(), 10) - client.shutdown() - - def test_ssl_enabled_with_custom_ciphers(self): - cluster = self.create_cluster(self.rc, self.read_ssl_config()) - cluster.start_member() - - client = HazelcastClient( - **get_ssl_config( - cluster.id, - True, - get_abs_path(self.current_directory, "server1-cert.pem"), - ciphers="ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-GCM-SHA384", - ) - ) - self.assertTrue(client.lifecycle_service.is_running()) - client.shutdown() - - def test_ssl_enabled_with_invalid_ciphers(self): - cluster = self.create_cluster(self.rc, self.read_ssl_config()) - cluster.start_member() - - with self.assertRaises(HazelcastError): - HazelcastClient( - **get_ssl_config( - cluster.id, - True, - get_abs_path(self.current_directory, "server1-cert.pem"), - ciphers="INVALID-CIPHER1:INVALID_CIPHER2", - ) - ) - - def test_ssl_enabled_with_protocol_mismatch(self): - cluster = self.create_cluster(self.rc, self.read_ssl_config()) - cluster.start_member() - - # Member configured with TLSv1 - with self.assertRaises(HazelcastError): - HazelcastClient( - **get_ssl_config( - cluster.id, - True, - get_abs_path(self.current_directory, "server1-cert.pem"), - protocol=SSLProtocol.SSLv3, - ) - ) - - def read_default_ca_config(self): - with open(self.default_ca_xml, "r") as f: - xml_config = f.read() - - keystore_path = get_abs_path(self.current_directory, "keystore.jks") - return xml_config % (keystore_path, keystore_path) - - def read_ssl_config(self): - with open(self.hazelcast_ssl_xml, "r") as f: - xml_config = f.read() - - keystore_path = get_abs_path(self.current_directory, "server1.keystore") - return xml_config % keystore_path From 4f103f69034380743e81728af9f5f7700f508152 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Tue, 30 Sep 2025 15:21:32 +0300 Subject: [PATCH 20/51] Added support for SSL --- hazelcast/internal/asyncio_connection.py | 5 +- hazelcast/internal/asyncio_reactor.py | 59 +++++++++++++++++++----- 2 files changed, 50 insertions(+), 14 deletions(-) diff --git a/hazelcast/internal/asyncio_connection.py b/hazelcast/internal/asyncio_connection.py index c78736cb2a..8db67caf81 100644 --- a/hazelcast/internal/asyncio_connection.py +++ b/hazelcast/internal/asyncio_connection.py @@ -395,13 +395,12 @@ async def _get_or_connect_to_member(self, member): translated = self._translate_member_address(member) connection = await self._create_connection(translated) - response = await self._authenticate(connection) # .continue_with(self._on_auth, connection) + response = await self._authenticate(connection) await self._on_auth(response, connection) return connection async def _create_connection(self, address): - factory = self._reactor.connection_factory - return await factory( + return await self._reactor.connection_factory( self, self._connection_id_generator.get_and_increment(), address, diff --git a/hazelcast/internal/asyncio_reactor.py b/hazelcast/internal/asyncio_reactor.py index 92888c103e..988b21c486 100644 --- a/hazelcast/internal/asyncio_reactor.py +++ b/hazelcast/internal/asyncio_reactor.py @@ -1,9 +1,11 @@ import asyncio import io import logging +import ssl import time from asyncio import AbstractEventLoop, transports +from hazelcast.config import Config, SSLProtocol from hazelcast.internal.asyncio_connection import Connection from hazelcast.core import Address @@ -83,25 +85,24 @@ async def create_and_connect( this = cls( loop, reactor, connection_manager, connection_id, address, config, message_callback ) - if this._config.ssl_enabled: - await this._create_ssl_connection() - else: - await this._create_connection() + await this._create_connection(config) return this def _create_protocol(self): return HazelcastProtocol(self) - async def _create_connection(self): - loop = self._loop - res = await loop.create_connection( - self._create_protocol, host=self._address.host, port=self._address.port + async def _create_connection(self, config): + ssl_context = None + if config.ssl_enabled: + ssl_context = self._create_ssl_context(config) + res = await self._loop.create_connection( + self._create_protocol, + host=self._address.host, + port=self._address.port, + ssl=ssl_context, ) _sock, self._proto = res - async def _create_ssl_connection(self): - raise NotImplementedError - def _write(self, buf): self._proto.write(buf) @@ -120,6 +121,42 @@ def _update_sent(self, sent): def _update_received(self, received): self._reactor.update_bytes_received(received) + def _create_ssl_context(self, config: Config): + ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + protocol = config.ssl_protocol + # Use only the configured protocol + try: + if protocol != SSLProtocol.SSLv2: + ssl_context.options |= ssl.OP_NO_SSLv2 + if protocol != SSLProtocol.SSLv3: + ssl_context.options |= ssl.OP_NO_SSLv3 + if protocol != SSLProtocol.TLSv1: + ssl_context.options |= ssl.OP_NO_TLSv1 + if protocol != SSLProtocol.TLSv1_1: + ssl_context.options |= ssl.OP_NO_TLSv1_1 + if protocol != SSLProtocol.TLSv1_2: + ssl_context.options |= ssl.OP_NO_TLSv1_2 + if protocol != SSLProtocol.TLSv1_3: + ssl_context.options |= ssl.OP_NO_TLSv1_3 + except AttributeError: + pass + + ssl_context.verify_mode = ssl.CERT_REQUIRED + if config.ssl_cafile: + ssl_context.load_verify_locations(config.ssl_cafile) + else: + ssl_context.load_default_certs() + if config.ssl_certfile: + ssl_context.load_cert_chain( + config.ssl_certfile, config.ssl_keyfile, config.ssl_password + ) + if config.ssl_ciphers: + ssl_context.set_ciphers(config.ssl_ciphers) + if config.ssl_check_hostname: + ssl_context.check_hostname = True + + return ssl_context + class HazelcastProtocol(asyncio.BufferedProtocol): From 042cc58902139739bbad9f8ea659df76b63fad79 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Tue, 30 Sep 2025 15:22:39 +0300 Subject: [PATCH 21/51] Added SSL tests --- .../integration/asyncio/ssl_tests/__init__.py | 0 .../integration/asyncio/ssl_tests/ssl_test.py | 134 ++++++++++++++++++ 2 files changed, 134 insertions(+) create mode 100644 tests/integration/asyncio/ssl_tests/__init__.py create mode 100644 tests/integration/asyncio/ssl_tests/ssl_test.py diff --git a/tests/integration/asyncio/ssl_tests/__init__.py b/tests/integration/asyncio/ssl_tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/integration/asyncio/ssl_tests/ssl_test.py b/tests/integration/asyncio/ssl_tests/ssl_test.py new file mode 100644 index 0000000000..6190c7571a --- /dev/null +++ b/tests/integration/asyncio/ssl_tests/ssl_test.py @@ -0,0 +1,134 @@ +import os +import unittest + +import pytest + +from tests.integration.asyncio.base import HazelcastTestCase +from hazelcast.asyncio.client import HazelcastClient +from hazelcast.errors import HazelcastError +from hazelcast.config import SSLProtocol +from tests.util import get_ssl_config, get_abs_path +from tests.integration.asyncio.util import fill_map + + +@pytest.mark.enterprise +class SSLTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): + current_directory = os.path.abspath( + os.path.join(os.path.dirname(__file__), "../../backward_compatible/ssl_tests") + ) + rc = None + hazelcast_ssl_xml = get_abs_path(current_directory, "hazelcast-ssl.xml") + default_ca_xml = get_abs_path(current_directory, "hazelcast-default-ca.xml") + + def setUp(self): + self.rc = self.create_rc() + + def tearDown(self): + self.rc.exit() + + async def test_ssl_disabled(self): + cluster = self.create_cluster(self.rc, self.read_ssl_config()) + cluster.start_member() + + with self.assertRaises(HazelcastError): + await HazelcastClient.create_and_start(**get_ssl_config(cluster.id, False)) + + async def test_ssl_enabled_is_client_live(self): + cluster = self.create_cluster(self.rc, self.read_ssl_config()) + cluster.start_member() + + client = await HazelcastClient.create_and_start( + **get_ssl_config( + cluster.id, True, get_abs_path(self.current_directory, "server1-cert.pem") + ) + ) + self.assertTrue(client.lifecycle_service.is_running()) + await client.shutdown() + + async def test_ssl_enabled_trust_default_certificates(self): + cluster = self.create_cluster(self.rc, self.read_default_ca_config()) + cluster.start_member() + + client = await HazelcastClient.create_and_start(**get_ssl_config(cluster.id, True)) + self.assertTrue(client.lifecycle_service.is_running()) + await client.shutdown() + + async def test_ssl_enabled_dont_trust_self_signed_certificates(self): + # Member started with self-signed certificate + cluster = self.create_cluster(self.rc, self.read_ssl_config()) + cluster.start_member() + + with self.assertRaises(HazelcastError): + await HazelcastClient.create_and_start(**get_ssl_config(cluster.id, True)) + + async def test_ssl_enabled_map_size(self): + cluster = self.create_cluster(self.rc, self.read_ssl_config()) + cluster.start_member() + + client = await HazelcastClient.create_and_start( + **get_ssl_config( + cluster.id, True, get_abs_path(self.current_directory, "server1-cert.pem") + ) + ) + test_map = await client.get_map("test_map") + await fill_map(test_map, 10) + self.assertEqual(await test_map.size(), 10) + await client.shutdown() + + async def test_ssl_enabled_with_custom_ciphers(self): + cluster = self.create_cluster(self.rc, self.read_ssl_config()) + cluster.start_member() + + client = await HazelcastClient.create_and_start( + **get_ssl_config( + cluster.id, + True, + get_abs_path(self.current_directory, "server1-cert.pem"), + ciphers="ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-GCM-SHA384", + ) + ) + self.assertTrue(client.lifecycle_service.is_running()) + await client.shutdown() + + async def test_ssl_enabled_with_invalid_ciphers(self): + cluster = self.create_cluster(self.rc, self.read_ssl_config()) + cluster.start_member() + + with self.assertRaises(HazelcastError): + await HazelcastClient.create_and_start( + **get_ssl_config( + cluster.id, + True, + get_abs_path(self.current_directory, "server1-cert.pem"), + ciphers="INVALID-CIPHER1:INVALID_CIPHER2", + ) + ) + + async def test_ssl_enabled_with_protocol_mismatch(self): + cluster = self.create_cluster(self.rc, self.read_ssl_config()) + cluster.start_member() + + # Member configured with TLSv1 + with self.assertRaises(HazelcastError): + await HazelcastClient.create_and_start( + **get_ssl_config( + cluster.id, + True, + get_abs_path(self.current_directory, "server1-cert.pem"), + protocol=SSLProtocol.SSLv3, + ) + ) + + def read_default_ca_config(self): + with open(self.default_ca_xml, "r") as f: + xml_config = f.read() + + keystore_path = get_abs_path(self.current_directory, "keystore.jks") + return xml_config % (keystore_path, keystore_path) + + def read_ssl_config(self): + with open(self.hazelcast_ssl_xml, "r") as f: + xml_config = f.read() + + keystore_path = get_abs_path(self.current_directory, "server1.keystore") + return xml_config % keystore_path From 265a2b4a3672aa7f81152637d75918854a793052 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Tue, 30 Sep 2025 15:33:13 +0300 Subject: [PATCH 22/51] Added mutual authentication test --- .../ssl_tests/mutual_authentication_test.py | 169 ++++++++++++++++++ 1 file changed, 169 insertions(+) create mode 100644 tests/integration/asyncio/ssl_tests/mutual_authentication_test.py diff --git a/tests/integration/asyncio/ssl_tests/mutual_authentication_test.py b/tests/integration/asyncio/ssl_tests/mutual_authentication_test.py new file mode 100644 index 0000000000..2d392278d2 --- /dev/null +++ b/tests/integration/asyncio/ssl_tests/mutual_authentication_test.py @@ -0,0 +1,169 @@ +import os +import unittest + +import pytest + +from tests.integration.asyncio.base import HazelcastTestCase +from hazelcast.asyncio.client import HazelcastClient +from hazelcast.errors import HazelcastError +from tests.util import get_ssl_config, get_abs_path + + +@pytest.mark.enterprise +class MutualAuthenticationTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): + current_directory = os.path.abspath( + os.path.join(os.path.dirname(__file__), "../../backward_compatible/ssl_tests") + ) + rc = None + mutual_auth = True + ma_req_xml = get_abs_path(current_directory, "hazelcast-ma-required.xml") + ma_opt_xml = get_abs_path(current_directory, "hazelcast-ma-optional.xml") + + def setUp(self): + self.rc = self.create_rc() + + def tearDown(self): + self.rc.exit() + + async def test_ma_required_client_and_server_authenticated(self): + cluster = self.create_cluster(self.rc, self.read_config(True)) + cluster.start_member() + client = await HazelcastClient.create_and_start( + **get_ssl_config( + cluster.id, + True, + get_abs_path(self.current_directory, "server1-cert.pem"), + get_abs_path(self.current_directory, "client1-cert.pem"), + get_abs_path(self.current_directory, "client1-key.pem"), + ) + ) + self.assertTrue(client.lifecycle_service.is_running()) + await client.shutdown() + + async def test_ma_required_server_not_authenticated(self): + cluster = self.create_cluster(self.rc, self.read_config(True)) + cluster.start_member() + with self.assertRaises(HazelcastError): + await HazelcastClient.create_and_start( + **get_ssl_config( + cluster.id, + True, + get_abs_path(self.current_directory, "server2-cert.pem"), + get_abs_path(self.current_directory, "client1-cert.pem"), + get_abs_path(self.current_directory, "client1-key.pem"), + ) + ) + + async def test_ma_required_client_not_authenticated(self): + cluster = self.create_cluster(self.rc, self.read_config(True)) + cluster.start_member() + with self.assertRaises(HazelcastError): + await HazelcastClient.create_and_start( + **get_ssl_config( + cluster.id, + True, + get_abs_path(self.current_directory, "server1-cert.pem"), + get_abs_path(self.current_directory, "client2-cert.pem"), + get_abs_path(self.current_directory, "client2-key.pem"), + ) + ) + + async def test_ma_required_client_and_server_not_authenticated(self): + cluster = self.create_cluster(self.rc, self.read_config(True)) + cluster.start_member() + with self.assertRaises(HazelcastError): + await HazelcastClient.create_and_start( + **get_ssl_config( + cluster.id, + True, + get_abs_path(self.current_directory, "server2-cert.pem"), + get_abs_path(self.current_directory, "client2-cert.pem"), + get_abs_path(self.current_directory, "client2-key.pem"), + ) + ) + + async def test_ma_optional_client_and_server_authenticated(self): + cluster = self.create_cluster(self.rc, self.read_config(False)) + cluster.start_member() + client = await HazelcastClient.create_and_start( + **get_ssl_config( + cluster.id, + True, + get_abs_path(self.current_directory, "server1-cert.pem"), + get_abs_path(self.current_directory, "client1-cert.pem"), + get_abs_path(self.current_directory, "client1-key.pem"), + ) + ) + self.assertTrue(client.lifecycle_service.is_running()) + await client.shutdown() + + async def test_ma_optional_server_not_authenticated(self): + cluster = self.create_cluster(self.rc, self.read_config(False)) + cluster.start_member() + with self.assertRaises(HazelcastError): + await HazelcastClient.create_and_start( + **get_ssl_config( + cluster.id, + True, + get_abs_path(self.current_directory, "server2-cert.pem"), + get_abs_path(self.current_directory, "client1-cert.pem"), + get_abs_path(self.current_directory, "client1-key.pem"), + ) + ) + + async def test_ma_optional_client_not_authenticated(self): + cluster = self.create_cluster(self.rc, self.read_config(False)) + cluster.start_member() + with self.assertRaises(HazelcastError): + await HazelcastClient.create_and_start( + **get_ssl_config( + cluster.id, + True, + get_abs_path(self.current_directory, "server1-cert.pem"), + get_abs_path(self.current_directory, "client2-cert.pem"), + get_abs_path(self.current_directory, "client2-key.pem"), + ) + ) + + async def test_ma_optional_client_and_server_not_authenticated(self): + cluster = self.create_cluster(self.rc, self.read_config(False)) + cluster.start_member() + with self.assertRaises(HazelcastError): + await HazelcastClient.create_and_start( + **get_ssl_config( + cluster.id, + True, + get_abs_path(self.current_directory, "server2-cert.pem"), + get_abs_path(self.current_directory, "client2-cert.pem"), + get_abs_path(self.current_directory, "client2-key.pem"), + ) + ) + + async def test_ma_required_with_no_cert_file(self): + cluster = self.create_cluster(self.rc, self.read_config(True)) + cluster.start_member() + with self.assertRaises(HazelcastError): + await HazelcastClient.create_and_start( + **get_ssl_config( + cluster.id, True, get_abs_path(self.current_directory, "server1-cert.pem") + ) + ) + + async def test_ma_optional_with_no_cert_file(self): + cluster = self.create_cluster(self.rc, self.read_config(False)) + cluster.start_member() + client = await HazelcastClient.create_and_start( + **get_ssl_config( + cluster.id, True, get_abs_path(self.current_directory, "server1-cert.pem") + ) + ) + self.assertTrue(client.lifecycle_service.is_running()) + await client.shutdown() + + def read_config(self, is_ma_required): + file_path = self.ma_req_xml if is_ma_required else self.ma_opt_xml + with open(file_path, "r") as f: + xml_config = f.read() + keystore_path = get_abs_path(self.current_directory, "server1.keystore") + truststore_path = get_abs_path(self.current_directory, "server1.truststore") + return xml_config % (keystore_path, truststore_path) From 293975d14b59595db9bd8789cb29b797a9a6b2ac Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Wed, 1 Oct 2025 09:31:55 +0300 Subject: [PATCH 23/51] Added hostname verification tests --- hazelcast/internal/asyncio_reactor.py | 8 +- .../hostname_verification/__init__.py | 0 .../ssl_hostname_verification_test.py | 133 ++++++++++++++++++ 3 files changed, 139 insertions(+), 2 deletions(-) create mode 100644 tests/integration/asyncio/ssl_tests/hostname_verification/__init__.py create mode 100644 tests/integration/asyncio/ssl_tests/hostname_verification/ssl_hostname_verification_test.py diff --git a/hazelcast/internal/asyncio_reactor.py b/hazelcast/internal/asyncio_reactor.py index 988b21c486..a44d656449 100644 --- a/hazelcast/internal/asyncio_reactor.py +++ b/hazelcast/internal/asyncio_reactor.py @@ -85,21 +85,25 @@ async def create_and_connect( this = cls( loop, reactor, connection_manager, connection_id, address, config, message_callback ) - await this._create_connection(config) + await this._create_connection(config, address) return this def _create_protocol(self): return HazelcastProtocol(self) - async def _create_connection(self, config): + async def _create_connection(self, config, address): ssl_context = None if config.ssl_enabled: ssl_context = self._create_ssl_context(config) + server_hostname = None + if config.ssl_check_hostname: + server_hostname = address.host res = await self._loop.create_connection( self._create_protocol, host=self._address.host, port=self._address.port, ssl=ssl_context, + server_hostname=server_hostname, ) _sock, self._proto = res diff --git a/tests/integration/asyncio/ssl_tests/hostname_verification/__init__.py b/tests/integration/asyncio/ssl_tests/hostname_verification/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/integration/asyncio/ssl_tests/hostname_verification/ssl_hostname_verification_test.py b/tests/integration/asyncio/ssl_tests/hostname_verification/ssl_hostname_verification_test.py new file mode 100644 index 0000000000..c47f3647d4 --- /dev/null +++ b/tests/integration/asyncio/ssl_tests/hostname_verification/ssl_hostname_verification_test.py @@ -0,0 +1,133 @@ +import os +import sys +import unittest + +import pytest + +from hazelcast.asyncio.client import HazelcastClient +from hazelcast.config import SSLProtocol +from hazelcast.errors import IllegalStateError +from tests.integration.asyncio.base import HazelcastTestCase +from tests.util import compare_client_version, get_abs_path + +current_directory = os.path.abspath( + os.path.join(os.path.dirname(__file__), "../../../backward_compatible/ssl_tests/hostname_verification") +) + +MEMBER_CONFIG = """ + + + + + com.hazelcast.nio.ssl.BasicSSLContextFactory + + + %s + 123456 + PKCS12 + TLSv1.2 + + + + +""" + + +@unittest.skipIf( + sys.version_info < (3, 7), + "Hostname verification feature requires Python 3.7+", +) +@unittest.skipIf( + compare_client_version("5.1") < 0, + "Tests the features added in 5.1 version of the client", +) +@pytest.mark.enterprise +class SslHostnameVerificationTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): + def setUp(self): + self.rc = self.create_rc() + self.cluster = None + + async def asyncTearDown(self): + await self.shutdown_all_clients() + self.rc.terminateCluster(self.cluster.id) + self.rc.exit() + + async def test_hostname_verification_with_loopback_san(self): + # SAN entry is present with different possible values + file_name = "tls-host-loopback-san" + self.start_member_with(f"{file_name}.p12") + await self.start_client_with(f"{file_name}.pem", "127.0.0.1:5701") + await self.start_client_with(f"{file_name}.pem", "localhost:5701") + + async def test_hostname_verification_with_loopback_dns_san(self): + # SAN entry is present, but only with `dns:localhost` + file_name = "tls-host-loopback-san-dns" + self.start_member_with(f"{file_name}.p12") + await self.start_client_with(f"{file_name}.pem", "localhost:5701") + with self.assertRaisesRegex(IllegalStateError, "Unable to connect to any cluster"): + await self.start_client_with(f"{file_name}.pem", "127.0.0.1:5701") + + async def test_hostname_verification_with_different_san(self): + # There is a valid entry, but it does not match with the address of the member. + file_name = "tls-host-not-our-san" + self.start_member_with(f"{file_name}.p12") + with self.assertRaisesRegex(IllegalStateError, "Unable to connect to any cluster"): + await self.start_client_with(f"{file_name}.pem", "localhost:5701") + with self.assertRaisesRegex(IllegalStateError, "Unable to connect to any cluster"): + await self.start_client_with(f"{file_name}.pem", "127.0.0.1:5701") + + async def test_hostname_verification_with_loopback_cn(self): + # No entry in SAN but an entry in CN which checked as a fallback + # when no entry in SAN is present. + file_name = "tls-host-loopback-cn" + self.start_member_with(f"{file_name}.p12") + await self.start_client_with(f"{file_name}.pem", "localhost:5701") + # See https://stackoverflow.com/a/8444863/12394291. IP addresses in CN + # are not supported. So, we don't have a test for it. + with self.assertRaisesRegex(IllegalStateError, "Unable to connect to any cluster"): + await self.start_client_with(f"{file_name}.pem", "127.0.0.1:5701") + + async def test_hostname_verification_with_no_entry(self): + # No entry either in the SAN or CN. No way to verify hostname. + file_name = "tls-host-no-entry" + self.start_member_with(f"{file_name}.p12") + with self.assertRaisesRegex(IllegalStateError, "Unable to connect to any cluster"): + await self.start_client_with(f"{file_name}.pem", "localhost:5701") + with self.assertRaisesRegex(IllegalStateError, "Unable to connect to any cluster"): + await self.start_client_with(f"{file_name}.pem", "127.0.0.1:5701") + + async def test_hostname_verification_disabled(self): + # When hostname verification is disabled, the scenarious that + # would fail in `test_hostname_verification_with_no_entry` will + # no longer fail, showing that it is working as expected. + file_name = "tls-host-no-entry" + self.start_member_with(f"{file_name}.p12") + await self.start_client_with(f"{file_name}.pem", "localhost:5701", check_hostname=False) + await self.start_client_with(f"{file_name}.pem", "127.0.0.1:5701", check_hostname=False) + + async def start_client_with( + self, + truststore_name: str, + member_address: str, + *, + check_hostname=True, + ) -> HazelcastClient: + return await self.create_client( + { + "cluster_name": self.cluster.id, + "cluster_members": [member_address], + "ssl_enabled": True, + "ssl_protocol": SSLProtocol.TLSv1_2, + "ssl_cafile": get_abs_path(current_directory, truststore_name), + "ssl_check_hostname": check_hostname, + "cluster_connect_timeout": 0, + } + ) + + def start_member_with(self, keystore_name: str) -> None: + config = MEMBER_CONFIG % get_abs_path(current_directory, keystore_name) + self.cluster = self.create_cluster(self.rc, config) + self.cluster.start_member() From 27184785945786049b03d7b83e9bd03e85edf038 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Wed, 1 Oct 2025 09:41:44 +0300 Subject: [PATCH 24/51] black --- .../hostname_verification/ssl_hostname_verification_test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/integration/asyncio/ssl_tests/hostname_verification/ssl_hostname_verification_test.py b/tests/integration/asyncio/ssl_tests/hostname_verification/ssl_hostname_verification_test.py index c47f3647d4..87fff817ec 100644 --- a/tests/integration/asyncio/ssl_tests/hostname_verification/ssl_hostname_verification_test.py +++ b/tests/integration/asyncio/ssl_tests/hostname_verification/ssl_hostname_verification_test.py @@ -11,7 +11,9 @@ from tests.util import compare_client_version, get_abs_path current_directory = os.path.abspath( - os.path.join(os.path.dirname(__file__), "../../../backward_compatible/ssl_tests/hostname_verification") + os.path.join( + os.path.dirname(__file__), "../../../backward_compatible/ssl_tests/hostname_verification" + ) ) MEMBER_CONFIG = """ From 58783dc4418102e38c1daa15c3ac13df119da746 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Wed, 1 Oct 2025 14:52:04 +0300 Subject: [PATCH 25/51] Ported more integration tests - Fixed a bug where an asyncio Lock was tried to be acquired without a release - Store asyncio tasks in order not to lose them - Removed start/shutdown from AsyncioReactor --- hazelcast/asyncio/client.py | 2 - hazelcast/internal/asyncio_connection.py | 39 ++- hazelcast/internal/asyncio_reactor.py | 16 +- tests/integration/asyncio/cluster_test.py | 317 ++++++++++++++++++ .../asyncio/connection_strategy_test.py | 101 ++++++ 5 files changed, 447 insertions(+), 28 deletions(-) create mode 100644 tests/integration/asyncio/cluster_test.py create mode 100644 tests/integration/asyncio/connection_strategy_test.py diff --git a/hazelcast/asyncio/client.py b/hazelcast/asyncio/client.py index 0f6db252fe..08e3a7aeb4 100644 --- a/hazelcast/asyncio/client.py +++ b/hazelcast/asyncio/client.py @@ -162,7 +162,6 @@ def _init_context(self): ) async def _start(self): - self._reactor.start() try: self._internal_lifecycle_service.start() self._invocation_service.start() @@ -250,7 +249,6 @@ async def shutdown(self) -> None: await self._connection_manager.shutdown() self._invocation_service.shutdown() self._statistics.shutdown() - self._reactor.shutdown() self._internal_lifecycle_service.fire_lifecycle_event(LifecycleState.SHUTDOWN) @property diff --git a/hazelcast/internal/asyncio_connection.py b/hazelcast/internal/asyncio_connection.py index 8db67caf81..109771f78a 100644 --- a/hazelcast/internal/asyncio_connection.py +++ b/hazelcast/internal/asyncio_connection.py @@ -185,6 +185,9 @@ def __init__( self._use_public_ip = ( isinstance(address_provider, DefaultAddressProvider) and config.use_public_ip ) + # asyncio tasks are weakly referenced + # storing tasks here in order not to lose them midway + self._tasks = set() def add_listener(self, on_connection_opened=None, on_connection_closed=None): """Registers a ConnectionListener. @@ -315,22 +318,21 @@ async def on_connection_close(self, closed_connection): disconnected = False removed = False trigger_reconnection = False - async with self._lock: - connection = self.active_connections.get(remote_uuid, None) - if connection == closed_connection: - self.active_connections.pop(remote_uuid, None) - removed = True - _logger.info( - "Removed connection to %s:%s, connection: %s", - remote_address, - remote_uuid, - connection, - ) + connection = self.active_connections.get(remote_uuid, None) + if connection == closed_connection: + self.active_connections.pop(remote_uuid, None) + removed = True + _logger.info( + "Removed connection to %s:%s, connection: %s", + remote_address, + remote_uuid, + connection, + ) - if not self.active_connections: - trigger_reconnection = True - if self._client_state == ClientState.INITIALIZED_ON_CLUSTER: - disconnected = True + if not self.active_connections: + trigger_reconnection = True + if self._client_state == ClientState.INITIALIZED_ON_CLUSTER: + disconnected = True if disconnected: self._lifecycle_service.fire_lifecycle_event(LifecycleState.DISCONNECTED) @@ -813,6 +815,9 @@ def __init__(self, connection_manager, client, config, reactor, invocation_servi self._heartbeat_timeout = config.heartbeat_timeout self._heartbeat_interval = config.heartbeat_interval self._heartbeat_task: asyncio.Task | None = None + # asyncio tasks are weakly referenced + # storing tasks here in order not to lose them midway + self._tasks = set() def start(self): """Starts sending periodic HeartBeat operations.""" @@ -852,7 +857,9 @@ async def _check_connection(self, now, connection): if (now - connection.last_write_time) > self._heartbeat_interval: request = client_ping_codec.encode_request() invocation = Invocation(request, connection=connection, urgent=True) - asyncio.create_task(self._invocation_service.ainvoke(invocation)) + task = asyncio.create_task(self._invocation_service.ainvoke(invocation)) + self._tasks.add(task) + task.add_done_callback(self._tasks.discard) _frame_header = struct.Struct(" + hot-restart-test + + %s + + + %s + + """ % ( + port, + self.tmp_dir, + ) + + +_SERVER_PORT = 5701 +_CLIENT_PORT = 5702 +_SERVER_WITH_CLIENT_ENDPOINT = """ + + + + %s + + + %s + + + +""" % ( + _SERVER_PORT, + _CLIENT_PORT, +) + + +@unittest.skipIf( + compare_client_version("5.0") < 0, "Tests the features added in 5.0 version of the client" +) +class AdvancedNetworkConfigTest(SingleMemberTestCase): + @classmethod + def configure_cluster(cls): + return _SERVER_WITH_CLIENT_ENDPOINT + + @classmethod + def configure_client(cls, config): + config["cluster_members"] = ["localhost:%s" % _CLIENT_PORT] + config["cluster_name"] = cls.cluster.id + return config + + def test_member_list(self): + members = self.client.cluster_service.get_members() + self.assertEqual(1, len(members)) + member = members[0] + # Make sure member address is assigned to client endpoint port + self.assertEqual(_CLIENT_PORT, member.address.port) + + # Make sure there are mappings for CLIENT and MEMBER endpoints + self.assertEqual(2, len(member.address_map)) + self.assertEqual( + _SERVER_PORT, member.address_map.get(EndpointQualifier(ProtocolType.MEMBER, None)).port + ) + self.assertEqual( + _CLIENT_PORT, + member.address_map.get(EndpointQualifier(ProtocolType.CLIENT, None)).port, + ) diff --git a/tests/integration/asyncio/connection_strategy_test.py b/tests/integration/asyncio/connection_strategy_test.py new file mode 100644 index 0000000000..ccd51d267d --- /dev/null +++ b/tests/integration/asyncio/connection_strategy_test.py @@ -0,0 +1,101 @@ +import unittest + +from hazelcast.asyncio import HazelcastClient +from hazelcast.config import ReconnectMode +from hazelcast.errors import ClientOfflineError, HazelcastClientNotActiveError +from hazelcast.lifecycle import LifecycleState +from tests.integration.asyncio.base import HazelcastTestCase +from tests.util import random_string + + +class ConnectionStrategyTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): + @classmethod + def setUpClass(cls): + cls.rc = cls.create_rc() + + @classmethod + def tearDownClass(cls): + cls.rc.exit() + + def setUp(self): + self.client = None + self.cluster = None + + async def asyncTearDown(self): + if self.client: + await self.client.shutdown() + self.client = None + if self.cluster: + self.rc.terminateCluster(self.cluster.id) + self.cluster = None + + async def test_off_reconnect_mode(self): + self.cluster = self.rc.createCluster(None, None) + member = self.rc.startMember(self.cluster.id) + + def collector(): + events = [] + + def on_state_change(event): + if event == LifecycleState.SHUTDOWN: + events.append(event) + + on_state_change.events = events + return on_state_change + + event_collector = collector() + + self.client = await HazelcastClient.create_and_start( + cluster_members=["localhost:5701"], + cluster_name=self.cluster.id, + reconnect_mode=ReconnectMode.OFF, + lifecycle_listeners=[event_collector], + ) + m = await self.client.get_map(random_string()) + # no exception at this point + await m.put(1, 1) + self.rc.shutdownMember(self.cluster.id, member.uuid) + await self.assertTrueEventually(lambda: self.assertEqual(1, len(event_collector.events))) + with self.assertRaises(HazelcastClientNotActiveError): + await m.put(1, 1) + + async def test_async_reconnect_mode(self): + import logging + + logging.basicConfig(level=logging.DEBUG) + self.cluster = self.rc.createCluster(None, None) + member = self.rc.startMember(self.cluster.id) + + def collector(event_type): + events = [] + + def on_state_change(event): + if event == event_type: + events.append(event) + + on_state_change.events = events + return on_state_change + + disconnected_collector = collector(LifecycleState.DISCONNECTED) + self.client = await HazelcastClient.create_and_start( + cluster_members=["localhost:5701"], + cluster_name=self.cluster.id, + reconnect_mode=ReconnectMode.ASYNC, + lifecycle_listeners=[disconnected_collector], + ) + m = await self.client.get_map(random_string()) + # no exception at this point + await m.put(1, 1) + self.rc.shutdownMember(self.cluster.id, member.uuid) + await self.assertTrueEventually( + lambda: self.assertEqual(1, len(disconnected_collector.events)) + ) + with self.assertRaises(ClientOfflineError): + await m.put(1, 1) + connected_collector = collector(LifecycleState.CONNECTED) + self.client.lifecycle_service.add_listener(connected_collector) + self.rc.startMember(self.cluster.id) + await self.assertTrueEventually( + lambda: self.assertEqual(1, len(connected_collector.events)) + ) + await m.put(1, 1) From 3cf99826047baa8b34ad2595ed090518d383c37e Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Thu, 2 Oct 2025 09:17:00 +0300 Subject: [PATCH 26/51] Ported hazelcast json value test --- .../asyncio/hazelcast_json_value_test.py | 74 +++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 tests/integration/asyncio/hazelcast_json_value_test.py diff --git a/tests/integration/asyncio/hazelcast_json_value_test.py b/tests/integration/asyncio/hazelcast_json_value_test.py new file mode 100644 index 0000000000..2193d4d2b3 --- /dev/null +++ b/tests/integration/asyncio/hazelcast_json_value_test.py @@ -0,0 +1,74 @@ +from hazelcast.core import HazelcastJsonValue +from hazelcast.predicate import greater, equal +from tests.integration.asyncio.base import SingleMemberTestCase + + +class HazelcastJsonValueWithMapTest(SingleMemberTestCase): + @classmethod + def setUpClass(cls): + super(HazelcastJsonValueWithMapTest, cls).setUpClass() + cls.json_str = '{"key": "value"}' + cls.json_obj = {"key": "value"} + + @classmethod + def configure_client(cls, config): + config["cluster_name"] = cls.cluster.id + return config + + async def asyncSetUp(self): + await super().asyncSetUp() + self.map = await self.client.get_map("json-test") + + async def asyncTearDown(self): + await self.map.destroy() + + async def test_storing_hazelcast_json_value_as_key(self): + json_value = HazelcastJsonValue(self.json_str) + await self.map.put(json_value, 0) + self.assertEqual(0, await self.map.get(json_value)) + + async def test_storing_hazelcast_json_value_as_value(self): + json_value = HazelcastJsonValue(self.json_str) + await self.map.put(0, json_value) + self.assertEqual(json_value.to_string(), (await self.map.get(0)).to_string()) + + async def test_storing_hazelcast_json_value_with_invalid_str(self): + json_value = HazelcastJsonValue('{"a') + await self.map.put(0, json_value) + self.assertEqual(json_value.to_string(), (await self.map.get(0)).to_string()) + + async def test_querying_over_keys_with_hazelcast_json_value(self): + json_value = HazelcastJsonValue({"a": 1}) + json_value2 = HazelcastJsonValue({"a": 3}) + await self.map.put(json_value, 1) + await self.map.put(json_value2, 2) + results = await self.map.key_set(greater("__key.a", 2)) + self.assertEqual(1, len(results)) + self.assertEqual(json_value2.to_string(), results[0].to_string()) + + async def test_querying_nested_attr_over_keys_with_hazelcast_json_value(self): + json_value = HazelcastJsonValue({"a": 1, "b": {"c": "d"}}) + json_value2 = HazelcastJsonValue({"a": 2, "b": {"c": "e"}}) + await self.map.put(json_value, 1) + await self.map.put(json_value2, 2) + results = await self.map.key_set(equal("__key.b.c", "d")) + self.assertEqual(1, len(results)) + self.assertEqual(json_value.to_string(), results[0].to_string()) + + async def test_querying_over_values_with_hazelcast_json_value(self): + json_value = HazelcastJsonValue({"a": 1}) + json_value2 = HazelcastJsonValue({"a": 3}) + await self.map.put(1, json_value) + await self.map.put(2, json_value2) + results = await self.map.values(greater("a", 2)) + self.assertEqual(1, len(results)) + self.assertEqual(json_value2.to_string(), results[0].to_string()) + + async def test_querying_nested_attr_over_values_with_hazelcast_json_value(self): + json_value = HazelcastJsonValue({"a": 1, "b": {"c": "d"}}) + json_value2 = HazelcastJsonValue({"a": 2, "b": {"c": "e"}}) + await self.map.put(1, json_value) + await self.map.put(2, json_value2) + results = await self.map.values(equal("b.c", "d")) + self.assertEqual(1, len(results)) + self.assertEqual(json_value.to_string(), results[0].to_string()) From c1798ead8576406d5e0102c0739a1d3f62b59df0 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Thu, 2 Oct 2025 09:47:24 +0300 Subject: [PATCH 27/51] Ported heart beat test --- tests/integration/asyncio/heartbeat_test.py | 99 +++++++++++++++++++++ tests/integration/asyncio/util.py | 25 ++++++ 2 files changed, 124 insertions(+) create mode 100644 tests/integration/asyncio/heartbeat_test.py diff --git a/tests/integration/asyncio/heartbeat_test.py b/tests/integration/asyncio/heartbeat_test.py new file mode 100644 index 0000000000..aea18055b6 --- /dev/null +++ b/tests/integration/asyncio/heartbeat_test.py @@ -0,0 +1,99 @@ +import asyncio +import threading +import unittest + +from hazelcast.asyncio import HazelcastClient +from hazelcast.core import Address +from tests.integration.asyncio.base import HazelcastTestCase +from tests.integration.asyncio.util import open_connection_to_address, wait_for_partition_table + + +class HeartbeatTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): + rc = None + + @classmethod + def setUpClass(cls): + cls.rc = cls.create_rc() + + @classmethod + def tearDownClass(cls): + cls.rc.exit() + + async def asyncSetUp(self): + self.cluster = self.create_cluster(self.rc) + self.member = self.rc.startMember(self.cluster.id) + self.client = await HazelcastClient.create_and_start( + cluster_name=self.cluster.id, + heartbeat_interval=0.5, + heartbeat_timeout=2, + ) + + async def asyncTearDown(self): + await self.client.shutdown() + self.rc.shutdownCluster(self.cluster.id) + + async def test_heartbeat_stopped_and_restored(self): + member2 = self.rc.startMember(self.cluster.id) + addr = Address(member2.host, member2.port) + await wait_for_partition_table(self.client) + await open_connection_to_address(self.client, member2.uuid) + + def connection_collector(): + connections = [] + + def collector(c, *_): + connections.append(c) + + collector.connections = connections + return collector + + connection_added_collector = connection_collector() + connection_removed_collector = connection_collector() + self.client._connection_manager.add_listener( + connection_added_collector, connection_removed_collector + ) + assertion_succeeded = False + + def run(): + nonlocal assertion_succeeded + # It is possible for client to override the set last_read_time + # of the connection, in case of the periodically sent heartbeat + # requests getting responses, right after we try to set a new + # value to it, before the next iteration of the heartbeat manager. + # In this case, the connection won't be closed, and the test would + # fail. To avoid it, we will try multiple times. + for i in range(10): + if assertion_succeeded: + # We have successfully simulated heartbeat loss + return + + for connection in self.client._connection_manager.active_connections.values(): + if connection.remote_address == addr: + connection.last_read_time -= 2 + break + + asyncio.sleep((i + 1) * 0.1) + + simulation_thread = threading.Thread(target=run) + simulation_thread.start() + + async def assert_heartbeat_stopped_and_restored(): + nonlocal assertion_succeeded + self.assertGreaterEqual(len(connection_added_collector.connections), 1) + self.assertGreaterEqual(len(connection_removed_collector.connections), 1) + + stopped_connection = connection_removed_collector.connections[0] + restored_connection = connection_added_collector.connections[0] + + self.assertEqual( + stopped_connection.connected_address, + Address(member2.host, member2.port), + ) + self.assertEqual( + restored_connection.connected_address, + Address(member2.host, member2.port), + ) + assertion_succeeded = True + + await self.assertTrueEventually(assert_heartbeat_stopped_and_restored) + simulation_thread.join() diff --git a/tests/integration/asyncio/util.py b/tests/integration/asyncio/util.py index e101a58103..fe6fdecb92 100644 --- a/tests/integration/asyncio/util.py +++ b/tests/integration/asyncio/util.py @@ -1,6 +1,31 @@ +from uuid import uuid4 + +import asyncio + + async def fill_map(map, size=10, key_prefix="key", value_prefix="val"): entries = dict() for i in range(size): entries[key_prefix + str(i)] = value_prefix + str(i) await map.put_all(entries) return entries + +async def open_connection_to_address(client, uuid): + key = generate_key_owned_by_instance(client, uuid) + m = await client.get_map(str(uuid4())) + await m.put(key, 0) + await m.destroy() + +def generate_key_owned_by_instance(client, uuid): + while True: + key = str(uuid4()) + partition_id = client.partition_service.get_partition_id(key) + owner = str(client.partition_service.get_partition_owner(partition_id)) + if owner == uuid: + return key + +async def wait_for_partition_table(client): + m = await client.get_map(str(uuid4())) + while not client.partition_service.get_partition_owner(0): + await m.put(str(uuid4()), 0) + await asyncio.sleep(0.1) From e92936aec7313736de27096672753368c5335054 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Mon, 20 Oct 2025 12:01:10 +0300 Subject: [PATCH 28/51] Ported more tests --- hazelcast/internal/asyncio_reactor.py | 1 + tests/integration/asyncio/heartbeat_test.py | 4 +- tests/integration/asyncio/invocation_test.py | 62 ++++++++++++ tests/integration/asyncio/lifecycle_test.py | 101 +++++++++++++++++++ 4 files changed, 167 insertions(+), 1 deletion(-) create mode 100644 tests/integration/asyncio/invocation_test.py create mode 100644 tests/integration/asyncio/lifecycle_test.py diff --git a/hazelcast/internal/asyncio_reactor.py b/hazelcast/internal/asyncio_reactor.py index 9d169897fa..37552d9bd2 100644 --- a/hazelcast/internal/asyncio_reactor.py +++ b/hazelcast/internal/asyncio_reactor.py @@ -61,6 +61,7 @@ def __init__( self._address = address self._config = config self._proto = None + self.connected_address = address @classmethod async def create_and_connect( diff --git a/tests/integration/asyncio/heartbeat_test.py b/tests/integration/asyncio/heartbeat_test.py index aea18055b6..3f9015363a 100644 --- a/tests/integration/asyncio/heartbeat_test.py +++ b/tests/integration/asyncio/heartbeat_test.py @@ -34,6 +34,8 @@ async def asyncTearDown(self): async def test_heartbeat_stopped_and_restored(self): member2 = self.rc.startMember(self.cluster.id) + # TODO: remove this + await asyncio.sleep(1) addr = Address(member2.host, member2.port) await wait_for_partition_table(self.client) await open_connection_to_address(self.client, member2.uuid) @@ -67,7 +69,7 @@ def run(): # We have successfully simulated heartbeat loss return - for connection in self.client._connection_manager.active_connections.values(): + for connection in list(self.client._connection_manager.active_connections.values()): if connection.remote_address == addr: connection.last_read_time -= 2 break diff --git a/tests/integration/asyncio/invocation_test.py b/tests/integration/asyncio/invocation_test.py new file mode 100644 index 0000000000..ae84d13e86 --- /dev/null +++ b/tests/integration/asyncio/invocation_test.py @@ -0,0 +1,62 @@ +import asyncio +import time +import unittest + +from mock import MagicMock + +from hazelcast.asyncio import HazelcastClient +from hazelcast.errors import OperationTimeoutError +from hazelcast.internal.asyncio_invocation import Invocation +from hazelcast.protocol.client_message import OutboundMessage +from hazelcast.serialization import LE_INT +from tests.integration.asyncio.base import HazelcastTestCase + + +class InvocationTimeoutTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): + @classmethod + def setUpClass(cls): + cls.rc = cls.create_rc() + cls.cluster = cls.create_cluster(cls.rc, None) + cls.member = cls.cluster.start_member() + + @classmethod + def tearDownClass(cls): + cls.rc.terminateCluster(cls.cluster.id) + cls.rc.exit() + + async def asyncSetUp(self): + self.client = await HazelcastClient.create_and_start(cluster_name=self.cluster.id, invocation_timeout=1) + + async def asyncTearDown(self): + await self.client.shutdown() + + def configure_client(cls, config): + config["cluster_name"] = cls.cluster.id + config["invocation_timeout"] = 1 + return config + + async def test_invocation_timeout(self): + request = OutboundMessage(bytearray(22), True) + invocation_service = self.client._invocation_service + invocation = Invocation(request, partition_id=1) + + def mock(*_): + time.sleep(2) + return False + + invocation_service._invoke_on_partition_owner = MagicMock(side_effect=mock) + invocation_service._invoke_on_random_connection = MagicMock(return_value=False) + invocation_service.invoke(invocation) + with self.assertRaises(OperationTimeoutError): + await invocation.future + + async def test_invocation_not_timed_out_when_there_is_no_exception(self): + buf = bytearray(22) + LE_INT.pack_into(buf, 0, 22) + request = OutboundMessage(buf, True) + invocation_service = self.client._invocation_service + invocation = Invocation(request) + invocation_service.invoke(invocation) + await asyncio.sleep(2) + self.assertFalse(invocation.future.done()) + self.assertEqual(1, len(invocation_service._pending)) diff --git a/tests/integration/asyncio/lifecycle_test.py b/tests/integration/asyncio/lifecycle_test.py new file mode 100644 index 0000000000..4efa014053 --- /dev/null +++ b/tests/integration/asyncio/lifecycle_test.py @@ -0,0 +1,101 @@ +import unittest + +from hazelcast.lifecycle import LifecycleState +from tests.integration.asyncio.base import HazelcastTestCase +from tests.util import event_collector + + +class LifecycleTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): + rc = None + + def setUp(self): + self.rc = self.create_rc() + self.cluster = self.create_cluster(self.rc) + + async def asyncTearDown(self): + await self.shutdown_all_clients() + self.rc.exit() + + async def test_lifecycle_listener_receives_events_in_order(self): + collector = event_collector() + self.cluster.start_member() + client = await self.create_client( + { + "cluster_name": self.cluster.id, + "lifecycle_listeners": [ + collector, + ], + } + ) + await client.shutdown() + self.assertEqual( + collector.events, + [ + LifecycleState.STARTING, + LifecycleState.STARTED, + LifecycleState.CONNECTED, + LifecycleState.SHUTTING_DOWN, + LifecycleState.DISCONNECTED, + LifecycleState.SHUTDOWN, + ], + ) + + async def test_lifecycle_listener_receives_events_in_order_after_startup(self): + self.cluster.start_member() + collector = event_collector() + client = await self.create_client( + { + "cluster_name": self.cluster.id, + } + ) + client.lifecycle_service.add_listener(collector) + await client.shutdown() + self.assertEqual( + collector.events, + [LifecycleState.SHUTTING_DOWN, LifecycleState.DISCONNECTED, LifecycleState.SHUTDOWN], + ) + + async def test_lifecycle_listener_receives_disconnected_event(self): + member = self.cluster.start_member() + collector = event_collector() + client = await self.create_client( + { + "cluster_name": self.cluster.id, + } + ) + client.lifecycle_service.add_listener(collector) + member.shutdown() + + def assertion(): + self.assertEqual(collector.events, [LifecycleState.DISCONNECTED]) + + await self.assertTrueEventually(assertion) + + await client.shutdown() + + async def test_remove_lifecycle_listener(self): + collector = event_collector() + self.cluster.start_member() + client = await self.create_client( + { + "cluster_name": self.cluster.id, + } + ) + registration_id = client.lifecycle_service.add_listener(collector) + client.lifecycle_service.remove_listener(registration_id) + await client.shutdown() + self.assertEqual(collector.events, []) + + async def test_exception_in_listener(self): + def listener(_): + raise RuntimeError("error") + + self.cluster.start_member() + await self.create_client( + { + "cluster_name": self.cluster.id, + "lifecycle_listeners": [ + listener, + ], + } + ) From 120a58aca365d25001e650e169339e7a5b05b2d2 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Wed, 22 Oct 2025 13:55:02 +0300 Subject: [PATCH 29/51] black --- tests/integration/asyncio/invocation_test.py | 4 +++- tests/integration/asyncio/util.py | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/integration/asyncio/invocation_test.py b/tests/integration/asyncio/invocation_test.py index ae84d13e86..97c95b96fb 100644 --- a/tests/integration/asyncio/invocation_test.py +++ b/tests/integration/asyncio/invocation_test.py @@ -25,7 +25,9 @@ def tearDownClass(cls): cls.rc.exit() async def asyncSetUp(self): - self.client = await HazelcastClient.create_and_start(cluster_name=self.cluster.id, invocation_timeout=1) + self.client = await HazelcastClient.create_and_start( + cluster_name=self.cluster.id, invocation_timeout=1 + ) async def asyncTearDown(self): await self.client.shutdown() diff --git a/tests/integration/asyncio/util.py b/tests/integration/asyncio/util.py index fe6fdecb92..6a15d9c8ec 100644 --- a/tests/integration/asyncio/util.py +++ b/tests/integration/asyncio/util.py @@ -10,12 +10,14 @@ async def fill_map(map, size=10, key_prefix="key", value_prefix="val"): await map.put_all(entries) return entries + async def open_connection_to_address(client, uuid): key = generate_key_owned_by_instance(client, uuid) m = await client.get_map(str(uuid4())) await m.put(key, 0) await m.destroy() + def generate_key_owned_by_instance(client, uuid): while True: key = str(uuid4()) @@ -24,6 +26,7 @@ def generate_key_owned_by_instance(client, uuid): if owner == uuid: return key + async def wait_for_partition_table(client): m = await client.get_map(str(uuid4())) while not client.partition_service.get_partition_owner(0): From 80880b8c77e42bc664f0818e624c46b0f8370955 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Fri, 31 Oct 2025 01:15:29 +0300 Subject: [PATCH 30/51] Fixed type hints --- hazelcast/internal/asyncio_reactor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hazelcast/internal/asyncio_reactor.py b/hazelcast/internal/asyncio_reactor.py index 37552d9bd2..cd81146d02 100644 --- a/hazelcast/internal/asyncio_reactor.py +++ b/hazelcast/internal/asyncio_reactor.py @@ -160,7 +160,7 @@ class HazelcastProtocol(asyncio.BufferedProtocol): def __init__(self, conn: AsyncioConnection): self._conn = conn - self._transport: transports.BaseTransport | None = None + self._transport: transports.Transport | None = None self.start_time: float | None = None self._write_buf = io.BytesIO() self._write_buf_size = 0 @@ -170,7 +170,7 @@ def __init__(self, conn: AsyncioConnection): # storing tasks here in order not to lose them midway self._tasks: set = set() - def connection_made(self, transport: transports.BaseTransport): + def connection_made(self, transport: transports.Transport): self._transport = transport self.start_time = time.time() self.write(self.PROTOCOL_STARTER) From 6431acc8928f9380418a86928028c9741a879cce Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Fri, 14 Nov 2025 10:39:58 +0300 Subject: [PATCH 31/51] type hints --- hazelcast/internal/asyncio_reactor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hazelcast/internal/asyncio_reactor.py b/hazelcast/internal/asyncio_reactor.py index cd81146d02..37552d9bd2 100644 --- a/hazelcast/internal/asyncio_reactor.py +++ b/hazelcast/internal/asyncio_reactor.py @@ -160,7 +160,7 @@ class HazelcastProtocol(asyncio.BufferedProtocol): def __init__(self, conn: AsyncioConnection): self._conn = conn - self._transport: transports.Transport | None = None + self._transport: transports.BaseTransport | None = None self.start_time: float | None = None self._write_buf = io.BytesIO() self._write_buf_size = 0 @@ -170,7 +170,7 @@ def __init__(self, conn: AsyncioConnection): # storing tasks here in order not to lose them midway self._tasks: set = set() - def connection_made(self, transport: transports.Transport): + def connection_made(self, transport: transports.BaseTransport): self._transport = transport self.start_time = time.time() self.write(self.PROTOCOL_STARTER) From e9a9b5e60af91d0760e51e24545ff07afed37603 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Mon, 17 Nov 2025 12:38:53 +0300 Subject: [PATCH 32/51] Ported more tests --- tests/integration/asyncio/listener_test.py | 112 ++++ tests/integration/asyncio/predicate_test.py | 571 ++++++++++++++++++ tests/integration/asyncio/proxy/map_test.py | 1 - tests/integration/asyncio/reconnect_test.py | 268 ++++++++ tests/integration/asyncio/shutdown_test.py | 56 ++ .../asyncio/smart_listener_test.py | 45 ++ 6 files changed, 1052 insertions(+), 1 deletion(-) create mode 100644 tests/integration/asyncio/listener_test.py create mode 100644 tests/integration/asyncio/predicate_test.py create mode 100644 tests/integration/asyncio/reconnect_test.py create mode 100644 tests/integration/asyncio/shutdown_test.py create mode 100644 tests/integration/asyncio/smart_listener_test.py diff --git a/tests/integration/asyncio/listener_test.py b/tests/integration/asyncio/listener_test.py new file mode 100644 index 0000000000..758ed7244e --- /dev/null +++ b/tests/integration/asyncio/listener_test.py @@ -0,0 +1,112 @@ +import asyncio +import unittest + +from parameterized import parameterized + +from tests.integration.asyncio.base import HazelcastTestCase +from tests.integration.asyncio.util import ( + generate_key_owned_by_instance, + wait_for_partition_table, +) +from tests.util import ( + random_string, + event_collector, +) + +LISTENER_TYPES = [ + ( + "smart", + True, + ), + ( + "non-smart", + False, + ), +] + + +class ListenerRemoveMemberTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): + def setUp(self): + self.rc = self.create_rc() + self.cluster = self.create_cluster(self.rc, None) + self.m1 = self.cluster.start_member() + self.m2 = self.cluster.start_member() + self.client_config = { + "cluster_name": self.cluster.id, + "heartbeat_interval": 1.0, + } + self.collector = event_collector() + + async def asyncTearDown(self): + await self.shutdown_all_clients() + self.rc.terminateCluster(self.cluster.id) + self.rc.exit() + + @parameterized.expand(LISTENER_TYPES) + async def test_remove_member(self, _, is_smart): + self.client_config["smart_routing"] = is_smart + client = await self.create_client(self.client_config) + await wait_for_partition_table(client) + key_m1 = generate_key_owned_by_instance(client, self.m1.uuid) + random_map = await client.get_map(random_string()).blocking() + random_map.add_entry_listener(added_func=self.collector) + self.m1.shutdown() + random_map.put(key_m1, "value2") + + def assert_event(): + self.assertEqual(1, len(self.collector.events)) + + await self.assertTrueEventually(assert_event) + + +class ListenerAddMemberTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): + def setUp(self): + self.rc = self.create_rc() + self.cluster = self.create_cluster(self.rc, None) + self.m1 = self.cluster.start_member() + self.client_config = { + "cluster_name": self.cluster.id, + } + self.collector = event_collector() + + async def asyncTearDown(self): + await self.shutdown_all_clients() + self.rc.terminateCluster(self.cluster.id) + self.rc.exit() + + @parameterized.expand(LISTENER_TYPES) + async def test_add_member(self, _, is_smart): + self.client_config["smart_routing"] = is_smart + client = await self.create_client(self.client_config) + random_map = await client.get_map(random_string()).blocking() + random_map.add_entry_listener(added_func=self.collector, updated_func=self.collector) + m2 = self.cluster.start_member() + await wait_for_partition_table(client) + key_m2 = generate_key_owned_by_instance(client, m2.uuid) + + assertion_succeeded = False + + async def run(): + nonlocal assertion_succeeded + # When a new connection is added, we add the existing + # listeners to it, but we do it non-blocking. So, it might + # be the case that, the listener registration request is + # sent to the new member, but not completed yet. + # So, we might not get an event for the put. To avoid this, + # we will put multiple times. + for i in range(10): + if assertion_succeeded: + # We have successfully got an event + return + + await random_map.put(key_m2, f"value-{i}") + await asyncio.sleep((i + 1) * 0.1) + + asyncio.create_task(run()) + + def assert_event(): + nonlocal assertion_succeeded + self.assertGreaterEqual(len(self.collector.events), 1) + assertion_succeeded = True + + await self.assertTrueEventually(assert_event) diff --git a/tests/integration/asyncio/predicate_test.py b/tests/integration/asyncio/predicate_test.py new file mode 100644 index 0000000000..f872548037 --- /dev/null +++ b/tests/integration/asyncio/predicate_test.py @@ -0,0 +1,571 @@ +import os +import unittest + +from hazelcast.predicate import ( + equal, + and_, + between, + less, + less_or_equal, + greater, + greater_or_equal, + or_, + not_equal, + not_, + like, + ilike, + regex, + sql, + true, + false, + in_, + instance_of, + paging, +) +from hazelcast.serialization.api import Portable, IdentifiedDataSerializable +from hazelcast.util import IterationType +from tests.integration.asyncio.base import SingleMemberTestCase, HazelcastTestCase +from tests.integration.backward_compatible.util import ( + write_string_to_writer, + read_string_from_reader, +) +from tests.util import random_string, get_abs_path +from hazelcast.asyncio import HazelcastClient + + +class PredicateTest(SingleMemberTestCase): + @classmethod + def configure_client(cls, config): + config["cluster_name"] = cls.cluster.id + return config + + async def asyncSetUp(self): + await super().asyncSetUp() + self.map = await self.client.get_map(random_string()) + + async def asyncTearDown(self): + await self.map.destroy() + await super().asyncTearDown() + + async def fill_map(self, count=10): + m = {"key-%d" % x: "value-%d" % x for x in range(0, count)} + await self.map.put_all(m) + return m + + async def fill_map_numeric(self, count=100): + m = {n: n for n in range(count)} + await self.map.put_all(m) + + async def test_key_set(self): + await self.fill_map() + key_set = await self.map.key_set() + list(key_set) + key_set_list = list(key_set) + assert key_set_list[0] + + async def test_sql(self): + await self.fill_map() + predicate = sql("this == 'value-1'") + self.assertCountEqual(await self.map.key_set(predicate), ["key-1"]) + + async def test_and(self): + await self.fill_map() + predicate = and_(equal("this", "value-1"), equal("this", "value-2")) + self.assertCountEqual(await self.map.key_set(predicate), []) + + async def test_or(self): + await self.fill_map() + predicate = or_(equal("this", "value-1"), equal("this", "value-2")) + self.assertCountEqual(await self.map.key_set(predicate), ["key-1", "key-2"]) + + async def test_not(self): + await self.fill_map(count=3) + predicate = not_(equal("this", "value-1")) + self.assertCountEqual(await self.map.key_set(predicate), ["key-0", "key-2"]) + + async def test_between(self): + await self.fill_map_numeric() + predicate = between("this", 1, 20) + self.assertCountEqual(await self.map.key_set(predicate), list(range(1, 21))) + + async def test_equal(self): + await self.fill_map() + predicate = equal("this", "value-1") + self.assertCountEqual(await self.map.key_set(predicate), ["key-1"]) + + async def test_not_equal(self): + await self.fill_map(count=3) + predicate = not_equal("this", "value-1") + self.assertCountEqual(await self.map.key_set(predicate), ["key-0", "key-2"]) + + async def test_in(self): + await self.fill_map_numeric(count=10) + predicate = in_("this", 1, 5, 7) + self.assertCountEqual(await self.map.key_set(predicate), [1, 5, 7]) + + async def test_less_than(self): + await self.fill_map_numeric() + predicate = less("this", 10) + self.assertCountEqual(await self.map.key_set(predicate), list(range(0, 10))) + + async def test_less_than_or_equal(self): + await self.fill_map_numeric() + predicate = less_or_equal("this", 10) + self.assertCountEqual(await self.map.key_set(predicate), list(range(0, 11))) + + async def test_greater_than(self): + await self.fill_map_numeric() + predicate = greater("this", 10) + self.assertCountEqual(await self.map.key_set(predicate), list(range(11, 100))) + + async def test_greater_than_or_equal(self): + await self.fill_map_numeric() + predicate = greater_or_equal("this", 10) + self.assertCountEqual(await self.map.key_set(predicate), list(range(10, 100))) + + async def test_like(self): + await self.map.put("key-1", "a_value") + await self.map.put("key-2", "b_value") + await self.map.put("key-3", "aa_value") + await self.map.put("key-4", "AA_value") + predicate = like("this", "a%") + self.assertCountEqual(await self.map.key_set(predicate), ["key-1", "key-3"]) + + async def test_ilike(self): + await self.map.put("key-1", "a_value") + await self.map.put("key-2", "b_value") + await self.map.put("key-3", "AA_value") + predicate = ilike("this", "a%") + self.assertCountEqual(await self.map.key_set(predicate), ["key-1", "key-3"]) + + async def test_regex(self): + await self.map.put("key-1", "car") + await self.map.put("key-2", "cry") + await self.map.put("key-3", "giraffe") + predicate = regex("this", "c[ar].*") + self.assertCountEqual(await self.map.key_set(predicate), ["key-1", "key-2"]) + + async def test_instance_of(self): + await self.map.put("key-1", True) + await self.map.put("key-2", 5) + await self.map.put("key-3", "str") + predicate = instance_of("java.lang.Boolean") + self.assertCountEqual(await self.map.key_set(predicate), ["key-1"]) + + async def test_true(self): + m = await self.fill_map() + predicate = true() + self.assertCountEqual(await self.map.key_set(predicate), list(m.keys())) + + async def test_false(self): + await self.fill_map() + predicate = false() + self.assertCountEqual(await self.map.key_set(predicate), []) + + async def test_paging(self): + await self.fill_map_numeric() + predicate = paging(less("this", 4), 2) + self.assertCountEqual([0, 1], await self.map.key_set(predicate)) + predicate.next_page() + self.assertCountEqual([2, 3], await self.map.key_set(predicate)) + predicate.next_page() + self.assertCountEqual([], await self.map.key_set(predicate)) + + +class SimplePortable(Portable): + def __init__(self, field=None): + self.field = field + + def write_portable(self, writer): + writer.write_int("field", self.field) + + def read_portable(self, reader): + self.field = reader.read_int("field") + + def get_factory_id(self): + return 1 + + def get_class_id(self): + return 1 + + +class PredicatePortableTest(SingleMemberTestCase): + @classmethod + def configure_client(cls, config): + config["cluster_name"] = cls.cluster.id + config["portable_factories"] = {1: {1: SimplePortable}} + return config + + async def asyncSetUp(self): + await super().asyncSetUp() + self.map = await self.client.get_map(random_string()) + + async def tearDown(self): + await self.map.destroy() + await super().asyncTearDown() + + async def fill_map(self, count=1000): + m = {x: SimplePortable(x) for x in range(0, count)} + await self.map.put_all(m) + return m + + async def test_predicate_portable_key(self): + _map = await self.fill_map() + map_keys = list(_map.keys()) + predicate = sql("field >= 900") + entries = await self.map.entry_set(predicate) + self.assertEqual(len(entries), 100) + for k, v in entries: + self.assertGreaterEqual(v.field, 900) + self.assertIn(k, map_keys) + + +class NestedPredicatePortableTest(SingleMemberTestCase): + class Body(Portable): + def __init__(self, name=None, limb=None): + self.name = name + self.limb = limb + + def get_class_id(self): + return 1 + + def get_factory_id(self): + return 1 + + def get_class_version(self): + return 15 + + def write_portable(self, writer): + write_string_to_writer(writer, "name", self.name) + writer.write_portable("limb", self.limb) + + def read_portable(self, reader): + self.name = read_string_from_reader(reader, "name") + self.limb = reader.read_portable("limb") + + def __eq__(self, other): + return isinstance(other, self.__class__) and (self.name, self.limb) == ( + other.name, + other.limb, + ) + + class Limb(Portable): + def __init__(self, name=None): + self.name = name + + def get_class_id(self): + return 2 + + def get_factory_id(self): + return 1 + + def get_class_version(self): + return 2 + + def write_portable(self, writer): + write_string_to_writer(writer, "name", self.name) + + def read_portable(self, reader): + self.name = read_string_from_reader(reader, "name") + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.name == other.name + + @classmethod + def configure_client(cls, config): + config["cluster_name"] = cls.cluster.id + config["portable_factories"] = { + 1: { + 1: NestedPredicatePortableTest.Body, + 2: NestedPredicatePortableTest.Limb, + }, + } + return config + + async def asyncSetUp(self): + await super().asyncSetUp() + self.map = await self.client.get_map(random_string()) + await self.map.put( + 1, NestedPredicatePortableTest.Body("body1", NestedPredicatePortableTest.Limb("hand")) + ) + await self.map.put( + 2, NestedPredicatePortableTest.Body("body2", NestedPredicatePortableTest.Limb("leg")) + ) + + async def asyncTearDown(self): + await self.map.destroy() + await super().asyncTearDown() + + async def test_adding_indexes(self): + # single-attribute index + await self.map.add_index(attributes=["name"]) + # nested-attribute index + await self.map.add_index(attributes=["limb.name"]) + + async def test_single_attribute_query_portable_predicates(self): + predicate = equal("limb.name", "hand") + values = await self.map.values(predicate) + self.assertEqual(1, len(values)) + self.assertEqual("body1", values[0].name) + + async def test_nested_attribute_query_sql_predicate(self): + predicate = sql("limb.name == 'leg'") + values = await self.map.values(predicate) + self.assertEqual(1, len(values)) + self.assertEqual("body2", values[0].name) + + +class PagingPredicateTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): + rc = None + cluster = None + client = None + map = None + + @classmethod + def setUpClass(cls): + cls.rc = cls.create_rc() + cls.cluster = cls.create_cluster(cls.rc, cls.configure_cluster()) + cls.cluster.start_member() + cls.cluster.start_member() + + @classmethod + def tearDownClass(cls): + cls.rc.shutdownCluster(cls.cluster.id) + cls.rc.exit() + + async def asyncSetUp(self): + self.client = await HazelcastClient.create_and_start(cluster_name=self.cluster.id) + self.map = await self.client.get_map(random_string()) + await self.map.clear() + + async def asyncTearDown(self): + await self.map.destroy() + await self.client.shutdown() + + @staticmethod + def configure_cluster(): + current_directory = os.path.dirname(__file__) + dir_path = os.path.dirname(current_directory) + path = os.path.join(dir_path, "backward_compatible/proxy/hazelcast.xml") + with open(path, "r") as f: + return f.read() + + def test_with_inner_paging_predicate(self): + predicate = paging(true(), 1) + + with self.assertRaises(TypeError): + paging(predicate, 1) + + def test_with_non_positive_page_size(self): + with self.assertRaises(ValueError): + paging(true(), 0) + + with self.assertRaises(ValueError): + paging(true(), -1) + + def test_previous_page_when_index_is_zero(self): + predicate = paging(true(), 2) + self.assertEqual(0, predicate.previous_page()) + self.assertEqual(0, predicate.previous_page()) + + async def test_entry_set_with_paging_predicate(self): + await self.fill_map(3) + entry_set = await self.map.entry_set(paging(greater_or_equal("this", 2), 1)) + self.assertEqual(len(entry_set), 1) + self.assertEqual(entry_set[0], ("key-2", 2)) + + async def test_key_set_with_paging_predicate(self): + await self.fill_map(3) + key_set = await self.map.key_set(paging(greater_or_equal("this", 2), 1)) + self.assertEqual(len(key_set), 1) + self.assertEqual(key_set[0], "key-2") + + async def test_values_with_paging_predicate(self): + await self.fill_map(3) + values = await self.map.values(paging(greater_or_equal("this", 2), 1)) + self.assertEqual(len(values), 1) + self.assertEqual(values[0], 2) + + async def test_with_none_inner_predicate(self): + await self.fill_map(3) + predicate = paging(None, 10) + self.assertEqual(await self.map.values(predicate), [0, 1, 2]) + + async def test_first_page(self): + await self.fill_map() + predicate = paging(greater_or_equal("this", 40), 2) + self.assertEqual(await self.map.values(predicate), [40, 41]) + + async def test_next_page(self): + await self.fill_map() + predicate = paging(greater_or_equal("this", 40), 2) + predicate.next_page() + self.assertEqual(await self.map.values(predicate), [42, 43]) + + async def test_set_page(self): + await self.fill_map() + predicate = paging(greater_or_equal("this", 40), 2) + predicate.page = 4 + self.assertEqual(await self.map.values(predicate), [48, 49]) + + def test_get_page(self): + predicate = paging(greater_or_equal("this", 40), 2) + predicate.page = 4 + self.assertEqual(predicate.page, 4) + + def test_page_size(self): + predicate = paging(greater_or_equal("this", 40), 2) + self.assertEqual(predicate.page_size, 2) + + async def test_previous_page(self): + await self.fill_map() + predicate = paging(greater_or_equal("this", 40), 2) + predicate.page = 4 + predicate.previous_page() + self.assertEqual(await self.map.values(predicate), [46, 47]) + + async def test_get_4th_then_previous_page(self): + await self.fill_map() + predicate = paging(greater_or_equal("this", 40), 2) + predicate.page = 4 + await self.map.values(predicate) + predicate.previous_page() + self.assertEqual(await self.map.values(predicate), [46, 47]) + + async def test_get_3rd_then_next_page(self): + await self.fill_map() + predicate = paging(greater_or_equal("this", 40), 2) + predicate.page = 3 + await self.map.values(predicate) + predicate.next_page() + self.assertEqual(await self.map.values(predicate), [48, 49]) + + async def test_set_nonexistent_page(self): + # Trying to get page 10, which is out of range, should return empty list. + await self.fill_map() + predicate = paging(greater_or_equal("this", 40), 2) + predicate.page = 10 + self.assertEqual(await self.map.values(predicate), []) + + async def test_nonexistent_previous_page(self): + # Trying to get previous page while already at first page should return first page. + await self.fill_map() + predicate = paging(greater_or_equal("this", 40), 2) + predicate.previous_page() + self.assertEqual(await self.map.values(predicate), [40, 41]) + + async def test_nonexistent_next_page(self): + # Trying to get next page while already at last page should return empty list. + await self.fill_map() + predicate = paging(greater_or_equal("this", 40), 2) + predicate.page = 4 + predicate.next_page() + self.assertEqual(await self.map.values(predicate), []) + + async def test_get_half_full_last_page(self): + # Page size set to 2, but last page only has 1 element. + await self.fill_map() + predicate = paging(greater_or_equal("this", 41), 2) + predicate.page = 4 + self.assertEqual(await self.map.values(predicate), [49]) + + async def test_reset(self): + await self.fill_map() + predicate = paging(greater_or_equal("this", 40), 2) + self.assertEqual(await self.map.values(predicate), [40, 41]) + predicate.next_page() + self.assertEqual(await self.map.values(predicate), [42, 43]) + predicate.reset() + self.assertEqual(await self.map.values(predicate), [40, 41]) + + async def test_empty_map(self): + # Empty map should return empty list. + predicate = paging(greater_or_equal("this", 30), 2) + self.assertEqual(await self.map.values(predicate), []) + + async def test_equal_values_paging(self): + await self.fill_map() + # keys[50 - 99], values[0 - 49]: + m = {"key-%d" % i: i - 50 for i in range(50, 100)} + await self.map.put_all(m) + predicate = paging(less_or_equal("this", 8), 5) + self.assertEqual(await self.map.values(predicate), [0, 0, 1, 1, 2]) + predicate.next_page() + self.assertEqual(await self.map.values(predicate), [2, 3, 3, 4, 4]) + predicate.next_page() + self.assertEqual(await self.map.values(predicate), [5, 5, 6, 6, 7]) + predicate.next_page() + self.assertEqual(await self.map.values(predicate), [7, 8, 8]) + + async def test_entry_set_with_custom_comparator(self): + m = await self.fill_map() + predicate = paging(less("this", 10), 5, CustomComparator(1, IterationType.KEY)) + + def entries(start, end): + return list( + sorted( + map(lambda k: (k, m[k]), filter(lambda k: start <= m[k] < end, m)), + key=lambda e: e[1], + reverse=True, + ) + ) + + self.assertEqual(entries(5, 10), await self.map.entry_set(predicate)) + predicate.next_page() + self.assertEqual(entries(0, 5), await self.map.entry_set(predicate)) + predicate.next_page() + self.assertEqual([], await self.map.entry_set(predicate)) + + async def test_key_set_with_custom_comparator(self): + m = await self.fill_map() + predicate = paging(less("this", 10), 5, CustomComparator(1, IterationType.KEY)) + keys = list(sorted(m.keys(), key=lambda k: m[k])) + self.assertEqual(keys[9:4:-1], await self.map.key_set(predicate)) + predicate.next_page() + self.assertEqual(keys[4::-1], await self.map.key_set(predicate)) + predicate.next_page() + self.assertEqual([], await self.map.key_set(predicate)) + + async def test_values_with_custom_comparator(self): + m = await self.fill_map() + predicate = paging(less("this", 10), 5, CustomComparator(1, IterationType.KEY)) + values = list(sorted(m.values())) + self.assertEqual(values[9:4:-1], await self.map.values(predicate)) + predicate.next_page() + self.assertEqual(values[4::-1], await self.map.values(predicate)) + predicate.next_page() + self.assertEqual([], await self.map.values(predicate)) + + async def fill_map(self, count=50): + m = {"key-%d" % x: x for x in range(count)} + await self.map.put_all(m) + return m + + +class CustomComparator(IdentifiedDataSerializable): + """ + For type: + + - 0 -> lexicographical order + - 1 -> reverse lexicographical + - 2 -> length increasing order + + Iteration type is same as the ``hazelcast.util.IterationType`` + """ + + def __init__(self, order, iteration_type): + self.order = order + self.iteration_type = iteration_type + + def write_data(self, object_data_output): + object_data_output.write_int(self.order) + object_data_output.write_int(self.iteration_type) + + def read_data(self, object_data_input): + pass + + def get_factory_id(self): + return 66 + + def get_class_id(self): + return 2 diff --git a/tests/integration/asyncio/proxy/map_test.py b/tests/integration/asyncio/proxy/map_test.py index b63ae9e0fe..2b4a5d5360 100644 --- a/tests/integration/asyncio/proxy/map_test.py +++ b/tests/integration/asyncio/proxy/map_test.py @@ -40,7 +40,6 @@ from hazelcast.core import HazelcastJsonValue from hazelcast.config import IndexType, IntType -from hazelcast.errors import HazelcastError from hazelcast.predicate import greater_or_equal, less_or_equal, sql, paging, true from hazelcast.internal.asyncio_proxy.map import EntryEventType from hazelcast.serialization.api import IdentifiedDataSerializable diff --git a/tests/integration/asyncio/reconnect_test.py b/tests/integration/asyncio/reconnect_test.py new file mode 100644 index 0000000000..5b5887c42a --- /dev/null +++ b/tests/integration/asyncio/reconnect_test.py @@ -0,0 +1,268 @@ +import asyncio +import sys +import unittest + +from hazelcast.asyncio import HazelcastClient +from hazelcast.errors import HazelcastError, TargetDisconnectedError +from hazelcast.lifecycle import LifecycleState +from hazelcast.util import AtomicInteger +from tests.integration.asyncio.base import HazelcastTestCase +from tests.util import event_collector + + +class ReconnectTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): + rc = None + + def setUp(self): + self.rc = self.create_rc() + self.cluster = self.create_cluster(self.rc) + + async def asyncTearDown(self): + await self.shutdown_all_clients() + self.rc.exit() + + async def test_start_client_with_no_member(self): + with self.assertRaises(HazelcastError): + await self.create_client( + { + "cluster_members": [ + "127.0.0.1:5701", + "127.0.0.1:5702", + "127.0.0.1:5703", + ], + "cluster_connect_timeout": 2, + } + ) + + async def test_start_client_before_member(self): + async def run(): + await asyncio.sleep(1.0) + self.cluster.start_member() + + asyncio.create_task(run()) + await self.create_client( + { + "cluster_name": self.cluster.id, + "cluster_connect_timeout": 5.0, + } + ) + + async def test_restart_member(self): + member = self.cluster.start_member() + client = await self.create_client( + { + "cluster_name": self.cluster.id, + "cluster_connect_timeout": 5.0, + } + ) + + state = [None] + + def listener(s): + state[0] = s + + client.lifecycle_service.add_listener(listener) + + member.shutdown() + await self.assertTrueEventually( + lambda: self.assertEqual(state[0], LifecycleState.DISCONNECTED) + ) + self.cluster.start_member() + await self.assertTrueEventually( + lambda: self.assertEqual(state[0], LifecycleState.CONNECTED) + ) + + async def test_listener_re_register(self): + member = self.cluster.start_member() + client = await self.create_client( + { + "cluster_name": self.cluster.id, + "cluster_connect_timeout": 5.0, + } + ) + + map = await client.get_map("map") + collector = event_collector() + reg_id = await map.add_entry_listener(added_func=collector) + self.logger.info("Registered listener with id %s", reg_id) + member.shutdown() + self.cluster.start_member() + count = AtomicInteger() + + async def assert_events(): + if client.lifecycle_service.is_running(): + while True: + try: + await map.put("key-%d" % count.get_and_increment(), "value") + break + except TargetDisconnectedError: + pass + self.assertGreater(len(collector.events), 0) + else: + self.fail("Client disconnected...") + + await self.assertTrueEventually(assert_events) + + async def test_member_list_after_reconnect(self): + old_member = self.cluster.start_member() + client = await self.create_client( + { + "cluster_name": self.cluster.id, + "cluster_connect_timeout": 5.0, + } + ) + old_member.shutdown() + new_member = self.cluster.start_member() + + def assert_member_list(): + members = client.cluster_service.get_members() + self.assertEqual(1, len(members)) + self.assertEqual(new_member.uuid, str(members[0].uuid)) + + await self.assertTrueEventually(assert_member_list) + + async def test_reconnect_toNewNode_ViaLastMemberList(self): + old_member = self.cluster.start_member() + client = await self.create_client( + { + "cluster_name": self.cluster.id, + "cluster_members": [ + "127.0.0.1:5701", + ], + "smart_routing": False, + "cluster_connect_timeout": 10.0, + } + ) + new_member = self.cluster.start_member() + old_member.shutdown() + + def assert_member_list(): + members = client.cluster_service.get_members() + self.assertEqual(1, len(members)) + self.assertEqual(new_member.uuid, str(members[0].uuid)) + + await self.assertTrueEventually(assert_member_list) + + +class ReconnectWithDifferentInterfacesTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): + def _create_cluster_config(self, public_address, heartbeat_seconds=300): + return """ + + + %s + + + %d + + """ % ( + public_address, + heartbeat_seconds, + ) + + def setUp(self): + self.rc = self.create_rc() + self.client = None + + async def asyncTearDown(self): + if self.client: + # If the test is failed, and we couldn't shutdown + # the client, try to shutdown here to make sure that + # we are not going to affect other tests. If the client + # is already shutdown, then this is basically no-op. + await self.client.shutdown() + + self.rc.exit() + + async def test_connection_count_after_reconnect_with_member_hostname_client_ip(self): + await self._verify_connection_count_after_reconnect("localhost", "127.0.0.1") + + async def test_connection_count_after_reconnect_with_member_hostname_client_hostname(self): + await self._verify_connection_count_after_reconnect("localhost", "localhost") + + async def test_connection_count_after_reconnect_with_member_ip_client_ip(self): + await self._verify_connection_count_after_reconnect("127.0.0.1", "127.0.0.1") + + async def test_connection_count_after_reconnect_with_member_ip_client_hostname(self): + await self._verify_connection_count_after_reconnect("127.0.0.1", "localhost") + + async def test_listeners_after_client_disconnected_with_member_hostname_client_ip(self): + await self._verify_listeners_after_client_disconnected("localhost", "127.0.0.1") + + async def test_listeners_after_client_disconnected_with_member_hostname_client_hostname(self): + await self._verify_listeners_after_client_disconnected("localhost", "localhost") + + async def test_listeners_after_client_disconnected_with_member_ip_client_ip(self): + await self._verify_listeners_after_client_disconnected("127.0.0.1", "127.0.0.1") + + async def test_listeners_after_client_disconnected_with_member_ip_client_hostname(self): + await self._verify_listeners_after_client_disconnected("127.0.0.1", "localhost") + + async def _verify_connection_count_after_reconnect(self, member_address, client_address): + cluster = self.create_cluster(self.rc, self._create_cluster_config(member_address)) + member = cluster.start_member() + + disconnected = asyncio.Event() + reconnected = asyncio.Event() + + def listener(state): + if state == "DISCONNECTED": + disconnected.set() + + if state == "CONNECTED" and disconnected.is_set(): + reconnected.set() + + client = await HazelcastClient.create_and_start( + cluster_name=cluster.id, + cluster_members=[client_address], + cluster_connect_timeout=sys.maxsize, + lifecycle_listeners=[listener], + ) + + self.client = client + await self.assertTrueEventually( + lambda: self.assertEqual(1, len(client._connection_manager.active_connections)) + ) + member.shutdown() + await self.assertTrueEventually(lambda: self.assertTrue(disconnected.is_set())) + cluster.start_member() + await self.assertTrueEventually(lambda: self.assertTrue(reconnected.is_set())) + self.assertEqual(1, len(client._connection_manager.active_connections)) + await client.shutdown() + self.rc.terminateCluster(cluster.id) + + async def _verify_listeners_after_client_disconnected(self, member_address, client_address): + heartbeat_seconds = 2 + cluster = self.create_cluster( + self.rc, self._create_cluster_config(member_address, heartbeat_seconds) + ) + member = cluster.start_member() + client = await HazelcastClient.create_and_start( + cluster_name=cluster.id, + cluster_members=[client_address], + cluster_connect_timeout=sys.maxsize, + ) + self.client = client + test_map = await client.get_map("test") + event_count = AtomicInteger() + await test_map.add_entry_listener( + added_func=lambda _: event_count.get_and_increment(), include_value=False + ) + await self.assertTrueEventually( + lambda: self.assertEqual(1, len(client._connection_manager.active_connections)) + ) + member.shutdown() + await asyncio.sleep(2 * heartbeat_seconds) + cluster.start_member() + + async def assertion(): + await test_map.remove(1) + await test_map.put(1, 2) + self.assertNotEqual(0, event_count.get()) + + await self.assertTrueEventually(assertion) + + await client.shutdown() + self.rc.terminateCluster(cluster.id) diff --git a/tests/integration/asyncio/shutdown_test.py b/tests/integration/asyncio/shutdown_test.py new file mode 100644 index 0000000000..920fbef0c1 --- /dev/null +++ b/tests/integration/asyncio/shutdown_test.py @@ -0,0 +1,56 @@ +import asyncio +import unittest + +from hazelcast.errors import HazelcastClientNotActiveError +from tests.integration.asyncio.base import HazelcastTestCase + + +class ShutdownTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): + rc = None + + def setUp(self): + self.rc = self.create_rc() + self.cluster = self.create_cluster(self.rc) + + async def asyncTearDown(self): + await self.shutdown_all_clients() + self.rc.terminateCluster(self.cluster.id) + self.rc.exit() + + async def test_shutdown_not_hang_on_member_closed(self): + member = self.cluster.start_member() + client = await self.create_client( + { + "cluster_name": self.cluster.id, + "cluster_connect_timeout": 5.0, + } + ) + my_map = await client.get_map("test") + await my_map.put("key", "value") + member.shutdown() + with self.assertRaises(HazelcastClientNotActiveError): + while True: + await my_map.get("key") + + async def test_invocations_finalised_when_client_shutdowns(self): + self.cluster.start_member() + client = await self.create_client( + { + "cluster_name": self.cluster.id, + } + ) + m = await client.get_map("test") + await m.put("key", "value") + + async def run(): + for _ in range(1000): + try: + await m.get("key") + except Exception: + pass + + async with asyncio.TaskGroup() as tg: + for _ in range(10): + tg.create_task(run()) + + await client.shutdown() diff --git a/tests/integration/asyncio/smart_listener_test.py b/tests/integration/asyncio/smart_listener_test.py new file mode 100644 index 0000000000..ade74ccd74 --- /dev/null +++ b/tests/integration/asyncio/smart_listener_test.py @@ -0,0 +1,45 @@ +import asyncio +import unittest + +from tests.integration.asyncio.base import HazelcastTestCase +from tests.util import random_string, event_collector + + +class SmartListenerTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): + + rc = None + cluster = None + + @classmethod + def setUpClass(cls): + cls.rc = cls.create_rc() + cls.cluster = cls.create_cluster(cls.rc, None) + cls.cluster.start_member() + cls.cluster.start_member() + + @classmethod + def tearDownClass(cls): + cls.rc.terminateCluster(cls.cluster.id) + cls.rc.exit() + + async def asyncSetUp(self): + self.client = await self.create_client( + { + "cluster_name": self.cluster.id, + "smart_routing": True, + } + ) + self.collector = event_collector() + + async def asyncTearDown(self): + await self.shutdown_all_clients() + + async def test_map_smart_listener_local_only(self): + map = await self.client.get_map(random_string()) + await map.add_entry_listener(added_func=self.collector) + await map.put("key", "value") + self.assert_event_received_once() + + def assert_event_received_once(self): + asyncio.sleep(2) + self.assertEqual(1, len(self.collector.events)) From 5334cd1fb629948259d503e4c33a594c1298463f Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Mon, 17 Nov 2025 18:21:06 +0300 Subject: [PATCH 33/51] Added near cache, statistics, statistics tests --- hazelcast/asyncio/client.py | 4 +- hazelcast/internal/asyncio_proxy/map.py | 172 +++++++- hazelcast/internal/asyncio_reactor.py | 3 +- hazelcast/internal/asyncio_statistics.py | 394 +++++++++++++++++++ tests/integration/asyncio/statistics_test.py | 266 +++++++++++++ 5 files changed, 834 insertions(+), 5 deletions(-) create mode 100644 hazelcast/internal/asyncio_statistics.py create mode 100644 tests/integration/asyncio/statistics_test.py diff --git a/hazelcast/asyncio/client.py b/hazelcast/asyncio/client.py index 08e3a7aeb4..758ae7011f 100644 --- a/hazelcast/asyncio/client.py +++ b/hazelcast/asyncio/client.py @@ -30,7 +30,7 @@ from hazelcast.internal.asyncio_reactor import AsyncioReactor from hazelcast.serialization import SerializationServiceV1 from hazelcast.sql import SqlService, _InternalSqlService -from hazelcast.statistics import Statistics +from hazelcast.internal.asyncio_statistics import Statistics from hazelcast.types import KeyType, ValueType, ItemType, MessageType from hazelcast.util import AtomicInteger, RoundRobinLB @@ -176,7 +176,7 @@ async def _start(self): self._listener_service.start() await self._invocation_service.add_backup_listener() self._load_balancer.init(self._cluster_service) - self._statistics.start() + await self._statistics.start() except Exception: await self.shutdown() raise diff --git a/hazelcast/internal/asyncio_proxy/map.py b/hazelcast/internal/asyncio_proxy/map.py index 9f2f765ec1..b7d5456de7 100644 --- a/hazelcast/internal/asyncio_proxy/map.py +++ b/hazelcast/internal/asyncio_proxy/map.py @@ -64,7 +64,7 @@ map_put_transient_with_max_idle_codec, map_set_with_max_idle_codec, map_remove_interceptor_codec, - map_remove_all_codec, + map_remove_all_codec, map_add_near_cache_invalidation_listener_codec, ) from hazelcast.internal.asyncio_proxy.base import ( Proxy, @@ -971,8 +971,176 @@ def handler(message): return self._invoke_on_key(request, key_data, handler) +class MapFeatNearCache(Map[KeyType, ValueType]): + """Map proxy implementation featuring Near Cache""" + + def __init__(self, service_name, name, context): + super(MapFeatNearCache, self).__init__(service_name, name, context) + self._invalidation_listener_id = None + self._near_cache = context.near_cache_manager.get_or_create_near_cache(name) + if self._near_cache.invalidate_on_change: + self._add_near_cache_invalidation_listener() + + async def clear(self): + self._near_cache._clear() + return await super(MapFeatNearCache, self).clear() + + async def evict_all(self): + self._near_cache.clear() + return await super(MapFeatNearCache, self).evict_all() + + async def load_all(self, keys=None, replace_existing_values=True): + if keys is None and replace_existing_values: + self._near_cache.clear() + return await super(MapFeatNearCache, self).load_all(keys, replace_existing_values) + + def _on_destroy(self): + self._remove_near_cache_invalidation_listener() + self._near_cache.clear() + super(MapFeatNearCache, self)._on_destroy() + + async def _add_near_cache_invalidation_listener(self): + codec = map_add_near_cache_invalidation_listener_codec + request = codec.encode_request(self.name, EntryEventType.INVALIDATION, self._is_smart) + self._invalidation_listener_id = await self._register_listener( + request, + lambda r: codec.decode_response(r), + lambda reg_id: map_remove_entry_listener_codec.encode_request(self.name, reg_id), + lambda m: codec.handle(m, self._handle_invalidation, self._handle_batch_invalidation), + ) + + async def _remove_near_cache_invalidation_listener(self): + if self._invalidation_listener_id: + await self.remove_entry_listener(self._invalidation_listener_id) + + def _handle_invalidation(self, key, source_uuid, partition_uuid, sequence): + # key is always ``Data`` + # null key means near cache has to remove all entries in it. + # see MapAddNearCacheEntryListenerMessageTask. + if key is None: + self._near_cache._clear() + else: + self._invalidate_cache(key) + + def _handle_batch_invalidation(self, keys, source_uuids, partition_uuids, sequences): + # key_list is always list of ``Data`` + for key_data in keys: + self._invalidate_cache(key_data) + + def _invalidate_cache(self, key_data): + self._near_cache._invalidate(key_data) + + def _invalidate_cache_batch(self, key_data_list): + for key_data in key_data_list: + self._near_cache._invalidate(key_data) + + # internals + async def _contains_key_internal(self, key_data): + try: + return self._near_cache[key_data] + except KeyError: + return await super(MapFeatNearCache, self)._contains_key_internal(key_data) + + async def _get_internal(self, key_data): + try: + return self._near_cache[key_data] + except KeyError: + value = await super(MapFeatNearCache, self)._get_internal(key_data) + self._near_cache.__setitem__(key_data, value) + return value + + async def _get_all_internal(self, partition_to_keys, tasks=None): + tasks = tasks or [] + for key_dic in partition_to_keys.values(): + for key in list(key_dic.keys()): + try: + key_data = key_dic[key] + value = self._near_cache[key_data] + future = asyncio.Future() + future.set_result((key, value)) + tasks.append(future) + del key_dic[key] + except KeyError: + pass + return await super(MapFeatNearCache, self)._get_all_internal(partition_to_keys, tasks) + + def _try_remove_internal(self, key_data, timeout): + self._invalidate_cache(key_data) + return super(MapFeatNearCache, self)._try_remove_internal(key_data, timeout) + + def _try_put_internal(self, key_data, value_data, timeout): + self._invalidate_cache(key_data) + return super(MapFeatNearCache, self)._try_put_internal(key_data, value_data, timeout) + + def _set_internal(self, key_data, value_data, ttl, max_idle): + self._invalidate_cache(key_data) + return super(MapFeatNearCache, self)._set_internal(key_data, value_data, ttl, max_idle) + + def _set_ttl_internal(self, key_data, ttl): + self._invalidate_cache(key_data) + return super(MapFeatNearCache, self)._set_ttl_internal(key_data, ttl) + + def _replace_internal(self, key_data, value_data): + self._invalidate_cache(key_data) + return super(MapFeatNearCache, self)._replace_internal(key_data, value_data) + + def _replace_if_same_internal(self, key_data, old_value_data, new_value_data): + self._invalidate_cache(key_data) + return super(MapFeatNearCache, self)._replace_if_same_internal( + key_data, old_value_data, new_value_data + ) + + def _remove_internal(self, key_data): + self._invalidate_cache(key_data) + return super(MapFeatNearCache, self)._remove_internal(key_data) + + def _remove_all_internal(self, predicate_data): + self._near_cache.clear() + return super(MapFeatNearCache, self)._remove_all_internal(predicate_data) + + def _remove_if_same_internal_(self, key_data, value_data): + self._invalidate_cache(key_data) + return super(MapFeatNearCache, self)._remove_if_same_internal_(key_data, value_data) + + def _put_transient_internal(self, key_data, value_data, ttl, max_idle): + self._invalidate_cache(key_data) + return super(MapFeatNearCache, self)._put_transient_internal( + key_data, value_data, ttl, max_idle + ) + + def _put_internal(self, key_data, value_data, ttl, max_idle): + self._invalidate_cache(key_data) + return super(MapFeatNearCache, self)._put_internal(key_data, value_data, ttl, max_idle) + + def _put_if_absent_internal(self, key_data, value_data, ttl, max_idle): + self._invalidate_cache(key_data) + return super(MapFeatNearCache, self)._put_if_absent_internal( + key_data, value_data, ttl, max_idle + ) + + def _load_all_internal(self, key_data_list, replace_existing_values): + self._invalidate_cache_batch(key_data_list) + return super(MapFeatNearCache, self)._load_all_internal( + key_data_list, replace_existing_values + ) + + def _execute_on_key_internal(self, key_data, entry_processor_data): + self._invalidate_cache(key_data) + return super(MapFeatNearCache, self)._execute_on_key_internal( + key_data, entry_processor_data + ) + + def _evict_internal(self, key_data): + self._invalidate_cache(key_data) + return super(MapFeatNearCache, self)._evict_internal(key_data) + + def _delete_internal(self, key_data): + self._invalidate_cache(key_data) + return super(MapFeatNearCache, self)._delete_internal(key_data) + + def create_map_proxy(service_name, name, context): near_cache_config = context.config.near_caches.get(name, None) if near_cache_config is None: return Map(service_name, name, context) - raise InvalidConfigurationError("near cache is not supported") + return MapFeatNearCache(service_name, name, context) diff --git a/hazelcast/internal/asyncio_reactor.py b/hazelcast/internal/asyncio_reactor.py index 37552d9bd2..ecfa1f2a47 100644 --- a/hazelcast/internal/asyncio_reactor.py +++ b/hazelcast/internal/asyncio_reactor.py @@ -97,7 +97,8 @@ async def _create_connection(self, config, address): ssl=ssl_context, server_hostname=server_hostname, ) - _sock, self._proto = res + sock, self._proto = res + self.local_address = Address(*sock._sock.getsockname()) def _write(self, buf): self._proto.write(buf) diff --git a/hazelcast/internal/asyncio_statistics.py b/hazelcast/internal/asyncio_statistics.py new file mode 100644 index 0000000000..71377481b7 --- /dev/null +++ b/hazelcast/internal/asyncio_statistics.py @@ -0,0 +1,394 @@ +import asyncio +import logging +import os + +from hazelcast.core import CLIENT_TYPE +from hazelcast.internal.asyncio_invocation import Invocation +from hazelcast.metrics import MetricsCompressor, MetricDescriptor, ValueType, ProbeUnit +from hazelcast.protocol.codec import client_statistics_codec +from hazelcast.util import current_time_in_millis, to_millis, to_nanos, current_time +from hazelcast import __version__ + +try: + # psutil does not support type hints + import psutil # type: ignore[import] + + _PSUTIL_ENABLED = True +except ImportError: + _PSUTIL_ENABLED = False + +_logger = logging.getLogger(__name__) + +_NEAR_CACHE_CATEGORY_PREFIX = "nc." +_ATTRIBUTE_SEPARATOR = "," +_KEY_VALUE_SEPARATOR = "=" +_EMPTY_ATTRIBUTE_VALUE = "" + +_NEAR_CACHE_DESCRIPTOR_PREFIX = "nearcache" +_NEAR_CACHE_DESCRIPTOR_DISCRIMINATOR = "name" + +_TCP_METRICS_PREFIX = "tcp" + + +class Statistics: + def __init__( + self, client, config, reactor, connection_manager, invocation_service, near_cache_manager + ): + self._client = client + self._reactor = reactor + self._connection_manager = connection_manager + self._invocation_service = invocation_service + self._near_cache_manager = near_cache_manager + self._enabled = config.statistics_enabled + self._period = config.statistics_period + self._statistics_task = None + self._registered_system_gauges = {} + self._registered_process_gauges = {} + + async def start(self): + if not self._enabled: + return + self._register_gauges() + + async def _statistics_task(): + await asyncio.sleep(self._period) + if not self._client.lifecycle_service.is_running(): + return + try: + await self._collect_and_send_stats() + finally: + self._statistics_task = asyncio.create_task(_statistics_task()) + + self._statistics_task = asyncio.create_task(_statistics_task()) + _logger.info("Client statistics enabled with the period of %s seconds.", self._period) + + def shutdown(self): + if self._statistics_task: + self._statistics_task.cancel() + + def _register_gauges(self): + if not _PSUTIL_ENABLED: + _logger.warning( + "Statistics collection is enabled, but psutil is not found. " + "Runtime and system related metrics will not be collected." + ) + return + + self._register_system_gauge( + "os.totalPhysicalMemorySize", + lambda: psutil.virtual_memory().total, + ) + self._register_system_gauge( + "os.freePhysicalMemorySize", + lambda: psutil.virtual_memory().free, + ) + self._register_system_gauge( + "os.committedVirtualMemorySize", + lambda: psutil.virtual_memory().used, + ) + self._register_system_gauge( + "os.totalSwapSpaceSize", + lambda: psutil.swap_memory().total, + ) + self._register_system_gauge( + "os.freeSwapSpaceSize", + lambda: psutil.swap_memory().free, + ) + self._register_system_gauge( + "os.systemLoadAverage", + lambda: os.getloadavg()[0], + ValueType.DOUBLE, + ) + self._register_system_gauge( + "runtime.availableProcessors", + lambda: psutil.cpu_count(), + ) + + self._register_process_gauge( + "runtime.usedMemory", + lambda p: p.memory_info().rss, + ) + self._register_process_gauge( + "os.openFileDescriptorCount", + lambda p: p.num_fds(), + ) + self._register_process_gauge( + "os.maxFileDescriptorCount", + lambda p: p.rlimit(psutil.RLIMIT_NOFILE)[1], + ) + self._register_process_gauge( + "os.processCpuTime", + lambda p: to_nanos(sum(p.cpu_times())), + ) + self._register_process_gauge( + "runtime.uptime", + lambda p: to_millis(current_time() - p.create_time()), + ) + + def _register_system_gauge(self, gauge_name, gauge_fn, value_type=ValueType.LONG): + # Try a gauge function read, we will register it if it succeeds. + try: + gauge_fn() + self._registered_system_gauges[gauge_name] = (gauge_fn, value_type) + except Exception as e: + _logger.debug( + "Unable to register the system related gauge %s. Error: %s", gauge_name, e + ) + + def _register_process_gauge(self, gauge_name, gauge_fn, value_type=ValueType.LONG): + # Try a gauge function read, we will register it if it succeeds. + try: + process = psutil.Process() + gauge_fn(process) + self._registered_process_gauges[gauge_name] = (gauge_fn, value_type) + except Exception as e: + _logger.debug( + "Unable to register the process related gauge %s. Error: %s", gauge_name, e + ) + + async def _collect_and_send_stats(self): + connection = self._connection_manager.get_random_connection() + if not connection: + _logger.debug("Cannot send client statistics to the server. No connection found.") + return + + collection_timestamp = current_time_in_millis() + attributes = [] + compressor = MetricsCompressor() + + self._add_client_attributes(attributes, connection) + self._add_near_cache_metrics(attributes, compressor) + self._add_system_and_process_metrics(attributes, compressor) + self._add_tcp_metrics(compressor) + await self._send_stats( + collection_timestamp, "".join(attributes), compressor.generate_blob(), connection + ) + + async def _send_stats(self, collection_timestamp, attributes, metrics_blob, connection): + request = client_statistics_codec.encode_request( + collection_timestamp, attributes, metrics_blob + ) + invocation = Invocation(request, connection=connection) + await self._invocation_service.ainvoke(invocation) + + def _add_system_and_process_metrics(self, attributes, compressor): + if not _PSUTIL_ENABLED: + # Nothing to do if psutil is not found + return + + for gauge_name, (gauge_fn, value_type) in self._registered_system_gauges.items(): + try: + value = gauge_fn() + self._add_system_or_process_metric( + attributes, compressor, gauge_name, value, value_type + ) + except: + _logger.exception("Error while collecting '%s'.", gauge_name) + + if not self._registered_process_gauges: + # Do not create the process object if no process-related + # metric is registered. + return + + process = psutil.Process() + for gauge_name, (gauge_fn, value_type) in self._registered_process_gauges.items(): + try: + value = gauge_fn(process) + self._add_system_or_process_metric( + attributes, compressor, gauge_name, value, value_type + ) + except: + _logger.exception("Error while collecting '%s'.", gauge_name) + + def _add_system_or_process_metric(self, attributes, compressor, gauge_name, value, value_type): + # We don't have any metrics that do not have prefix. + # Necessary care must be taken when we will send simple + # named metrics. + prefix, metric_name = gauge_name.rsplit(".", 1) + descriptor = MetricDescriptor(metric=metric_name, prefix=prefix) + self._add_metric(compressor, descriptor, value, value_type) + self._add_attribute(attributes, gauge_name, value) + + def _add_client_attributes(self, attributes, connection): + self._add_attribute(attributes, "lastStatisticsCollectionTime", current_time_in_millis()) + self._add_attribute(attributes, "enterprise", "false") + self._add_attribute(attributes, "clientType", CLIENT_TYPE) + self._add_attribute(attributes, "clientVersion", __version__) + self._add_attribute( + attributes, "clusterConnectionTimestamp", to_millis(connection.start_time) + ) + + local_address = connection.local_address + local_address = str(local_address.host) + ":" + str(local_address.port) + self._add_attribute(attributes, "clientAddress", local_address) + self._add_attribute(attributes, "clientName", self._client.name) + + def _add_near_cache_metrics(self, attributes, compressor): + for near_cache in self._near_cache_manager.list_near_caches(): + nc_name = near_cache.name + nc_name_with_prefix = self._get_name_with_prefix(nc_name) + nc_name_with_prefix.append(".") + nc_name_with_prefix = "".join(nc_name_with_prefix) + + near_cache_stats = near_cache.get_statistics() + self._add_near_cache_metric( + attributes, + compressor, + "creationTime", + to_millis(near_cache_stats["creation_time"]), + ValueType.LONG, + ProbeUnit.MS, + nc_name, + nc_name_with_prefix, + ) + + self._add_near_cache_metric( + attributes, + compressor, + "evictions", + near_cache_stats["evictions"], + ValueType.LONG, + ProbeUnit.COUNT, + nc_name, + nc_name_with_prefix, + ) + + self._add_near_cache_metric( + attributes, + compressor, + "hits", + near_cache_stats["hits"], + ValueType.LONG, + ProbeUnit.COUNT, + nc_name, + nc_name_with_prefix, + ) + + self._add_near_cache_metric( + attributes, + compressor, + "misses", + near_cache_stats["misses"], + ValueType.LONG, + ProbeUnit.COUNT, + nc_name, + nc_name_with_prefix, + ) + + self._add_near_cache_metric( + attributes, + compressor, + "ownedEntryCount", + near_cache_stats["owned_entry_count"], + ValueType.LONG, + ProbeUnit.COUNT, + nc_name, + nc_name_with_prefix, + ) + + self._add_near_cache_metric( + attributes, + compressor, + "expirations", + near_cache_stats["expirations"], + ValueType.LONG, + ProbeUnit.COUNT, + nc_name, + nc_name_with_prefix, + ) + + self._add_near_cache_metric( + attributes, + compressor, + "invalidations", + near_cache_stats["invalidations"], + ValueType.LONG, + ProbeUnit.COUNT, + nc_name, + nc_name_with_prefix, + ) + + self._add_near_cache_metric( + attributes, + compressor, + "invalidationRequests", + near_cache_stats["invalidation_requests"], + ValueType.LONG, + ProbeUnit.COUNT, + nc_name, + nc_name_with_prefix, + ) + + self._add_near_cache_metric( + attributes, + compressor, + "ownedEntryMemoryCost", + near_cache_stats["owned_entry_memory_cost"], + ValueType.LONG, + ProbeUnit.BYTES, + nc_name, + nc_name_with_prefix, + ) + + def _add_near_cache_metric( + self, attributes, compressor, metric, value, value_type, unit, nc_name, nc_name_with_prefix + ): + descriptor = MetricDescriptor( + metric=metric, + prefix=_NEAR_CACHE_DESCRIPTOR_PREFIX, + discriminator=_NEAR_CACHE_DESCRIPTOR_DISCRIMINATOR, + discriminator_value=nc_name, + unit=unit, + ) + try: + self._add_metric(compressor, descriptor, value, value_type) + self._add_attribute(attributes, metric, value, nc_name_with_prefix) + except: + _logger.exception( + "Error while collecting %s metric for near cache '%s'.", metric, nc_name + ) + + def _add_tcp_metrics(self, compressor): + self._add_tcp_metric(compressor, "bytesSend", self._reactor._bytes_sent) + self._add_tcp_metric(compressor, "bytesReceived", self._reactor._bytes_received) + + def _add_tcp_metric( + self, compressor, metric, value, value_type=ValueType.LONG, unit=ProbeUnit.BYTES + ): + descriptor = MetricDescriptor( + metric=metric, + prefix=_TCP_METRICS_PREFIX, + unit=unit, + ) + try: + self._add_metric(compressor, descriptor, value, value_type) + except: + _logger.exception("Error while collecting '%s.%s'.", _TCP_METRICS_PREFIX, metric) + + def _add_metric(self, compressor, descriptor, value, value_type): + if value_type == ValueType.LONG: + compressor.add_long(descriptor, value) + elif value_type == ValueType.DOUBLE: + compressor.add_double(descriptor, value) + else: + raise ValueError("Unexpected type: " + value_type) + + def _add_attribute(self, attributes, name, value, key_prefix=None): + if len(attributes) != 0: + attributes.append(_ATTRIBUTE_SEPARATOR) + + if key_prefix: + attributes.append(key_prefix) + + attributes.append(name) + attributes.append(_KEY_VALUE_SEPARATOR) + attributes.append(str(value)) + + def _get_name_with_prefix(self, name): + return [_NEAR_CACHE_CATEGORY_PREFIX, self._escape_special_characters(name)] + + def _escape_special_characters(self, name): + escaped_name = ( + name.replace("\\", "\\\\").replace(",", "\\,").replace(".", "\\.").replace("=", "\\=") + ) + return escaped_name[1:] if name[0] == "/" else escaped_name diff --git a/tests/integration/asyncio/statistics_test.py b/tests/integration/asyncio/statistics_test.py new file mode 100644 index 0000000000..01b22abd0f --- /dev/null +++ b/tests/integration/asyncio/statistics_test.py @@ -0,0 +1,266 @@ +import asyncio +import itertools +import unittest +import zlib + +from hazelcast import __version__ +from hazelcast.asyncio import HazelcastClient +from hazelcast.core import CLIENT_TYPE +from hazelcast.serialization import BE_INT, INT_SIZE_IN_BYTES +from hazelcast.statistics import Statistics +from tests.integration.asyncio.base import HazelcastTestCase +from tests.hzrc.ttypes import Lang +from tests.util import get_current_timestamp, random_string, skip_if_client_version_older_than + + +class StatisticsTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): + DEFAULT_STATS_PERIOD = 3 + STATS_PERIOD = 1 + + @classmethod + def setUpClass(cls): + cls.rc = cls.create_rc() + cls.cluster = cls.create_cluster(cls.rc) + cls.member = cls.cluster.start_member() + + @classmethod + def tearDownClass(cls): + cls.rc.exit() + + async def test_statistics_disabled_by_default(self): + client = await HazelcastClient.create_and_start( + cluster_name=self.cluster.id, cluster_connect_timeout=30.0 + ) + await asyncio.sleep(2 * self.DEFAULT_STATS_PERIOD) + client_uuid = client._connection_manager.client_uuid + response = self.get_client_stats_from_server(client_uuid) + self.assertTrue(response.success) + self.assertIsNone(response.result) + await client.shutdown() + + async def test_statistics_enabled(self): + client = await HazelcastClient.create_and_start( + cluster_name=self.cluster.id, cluster_connect_timeout=30.0, statistics_enabled=True + ) + client_uuid = client._connection_manager.client_uuid + await asyncio.sleep(2 * self.DEFAULT_STATS_PERIOD) + await self.wait_for_statistics_collection(client_uuid) + await client.shutdown() + + async def test_statistics_period(self): + client = await HazelcastClient.create_and_start( + cluster_name=self.cluster.id, + cluster_connect_timeout=30.0, + statistics_enabled=True, + statistics_period=self.STATS_PERIOD, + ) + client_uuid = client._connection_manager.client_uuid + await asyncio.sleep(2 * self.STATS_PERIOD) + response1 = await self.wait_for_statistics_collection(client_uuid) + await asyncio.sleep(2 * self.STATS_PERIOD) + response2 = await self.wait_for_statistics_collection(client_uuid) + self.assertNotEqual(response1, response2) + await client.shutdown() + + async def test_statistics_content(self): + map_name = random_string() + client = await HazelcastClient.create_and_start( + cluster_name=self.cluster.id, + cluster_connect_timeout=30.0, + statistics_enabled=True, + statistics_period=self.STATS_PERIOD, + near_caches={ + map_name: {}, + }, + ) + client_uuid = client._connection_manager.client_uuid + await client.get_map(map_name) + await asyncio.sleep(2 * self.STATS_PERIOD) + response = await self.wait_for_statistics_collection(client_uuid) + result = response.result.decode("utf-8") + info = client._internal_cluster_service.get_local_client() + local_address = "%s:%s" % (info.address.host, info.address.port) + # Check near cache and client statistics + self.assertEqual(1, result.count("clientName=" + client.name)) + self.assertEqual(1, result.count("lastStatisticsCollectionTime=")) + self.assertEqual(1, result.count("enterprise=false")) + self.assertEqual(1, result.count("clientType=" + CLIENT_TYPE)) + self.assertEqual(1, result.count("clientVersion=" + __version__)) + self.assertEqual(1, result.count("clusterConnectionTimestamp=")) + self.assertEqual(1, result.count("clientAddress=" + local_address)) + self.assertEqual(1, result.count("nc." + map_name + ".creationTime")) + self.assertEqual(1, result.count("nc." + map_name + ".evictions")) + self.assertEqual(1, result.count("nc." + map_name + ".hits")) + self.assertEqual(1, result.count("nc." + map_name + ".misses")) + self.assertEqual(1, result.count("nc." + map_name + ".ownedEntryCount")) + self.assertEqual(1, result.count("nc." + map_name + ".expirations")) + self.assertEqual(1, result.count("nc." + map_name + ".invalidations")) + self.assertEqual(1, result.count("nc." + map_name + ".invalidationRequests")) + self.assertEqual(1, result.count("nc." + map_name + ".ownedEntryMemoryCost")) + # Check OS and runtime statistics. We cannot know what kind of statistics will be available + # in different platforms. So, first try to get these statistics and then check the + # response content + for stat_name in self.get_runtime_and_system_metrics(client): + self.assertEqual(1, result.count(stat_name)) + + await client.shutdown() + + async def test_special_characters(self): + map_name = random_string() + ",t=es\\t" + client = await HazelcastClient.create_and_start( + cluster_name=self.cluster.id, + cluster_connect_timeout=30.0, + statistics_enabled=True, + statistics_period=self.STATS_PERIOD, + near_caches={ + map_name: {}, + }, + ) + client_uuid = client._connection_manager.client_uuid + await client.get_map(map_name) + await asyncio.sleep(2 * self.STATS_PERIOD) + response = await self.wait_for_statistics_collection(client_uuid) + result = response.result.decode("utf-8") + unescaped_result = self.unescape_special_chars(result) + self.assertEqual(-1, result.find(map_name)) + self.assertNotEqual(-1, unescaped_result.find(map_name)) + await client.shutdown() + + async def test_near_cache_stats(self): + map_name = random_string() + client = await HazelcastClient.create_and_start( + cluster_name=self.cluster.id, + cluster_connect_timeout=30.0, + statistics_enabled=True, + statistics_period=self.STATS_PERIOD, + near_caches={ + map_name: {}, + }, + ) + client_uuid = client._connection_manager.client_uuid + test_map = await client.get_map(map_name) + await asyncio.sleep(2 * self.STATS_PERIOD) + response = await self.wait_for_statistics_collection(client_uuid) + result = response.result.decode("utf-8") + self.assertEqual(1, result.count("nc." + map_name + ".evictions=0")) + self.assertEqual(1, result.count("nc." + map_name + ".hits=0")) + self.assertEqual(1, result.count("nc." + map_name + ".misses=0")) + self.assertEqual(1, result.count("nc." + map_name + ".ownedEntryCount=0")) + self.assertEqual(1, result.count("nc." + map_name + ".expirations=0")) + self.assertEqual(1, result.count("nc." + map_name + ".invalidations=0")) + self.assertEqual(1, result.count("nc." + map_name + ".invalidationRequests=0")) + await test_map.put(1, 2) # invalidation request + await test_map.get(1) # cache miss + await test_map.get(1) # cache hit + await test_map.put(1, 3) # invalidation + invalidation request + await test_map.get(1) # cache miss + await asyncio.sleep(2 * self.STATS_PERIOD) + response = await self.wait_for_statistics_collection(client_uuid) + result = response.result.decode("utf-8") + self.assertEqual(1, result.count("nc." + map_name + ".evictions=0")) + self.assertEqual(1, result.count("nc." + map_name + ".hits=1")) + self.assertEqual(1, result.count("nc." + map_name + ".misses=2")) + self.assertEqual(1, result.count("nc." + map_name + ".ownedEntryCount=1")) + self.assertEqual(1, result.count("nc." + map_name + ".expirations=0")) + self.assertEqual(1, result.count("nc." + map_name + ".invalidations=1")) + self.assertEqual(1, result.count("nc." + map_name + ".invalidationRequests=2")) + await client.shutdown() + + async def test_metrics_blob(self): + skip_if_client_version_older_than(self, "4.2.1") + map_name = random_string() + client = await HazelcastClient.create_and_start( + cluster_name=self.cluster.id, + cluster_connect_timeout=30.0, + statistics_enabled=True, + statistics_period=self.STATS_PERIOD, + near_caches={ + map_name: {}, + }, + ) + client_uuid = client._connection_manager.client_uuid + await client.get_map(map_name) + await asyncio.sleep(2 * self.STATS_PERIOD) + response = await self.wait_for_statistics_collection(client_uuid, get_metric_blob=True) + result = bytearray(response.result) + # We will try to decompress the blob according to its contract + # to verify we have sent something that make sense + pos = 2 # Skip the version + dict_buf_size = BE_INT.unpack_from(result, pos)[0] + pos += INT_SIZE_IN_BYTES + dict_buf = result[pos : pos + dict_buf_size] + self.assertTrue(len(dict_buf) > 0) + pos += dict_buf_size + pos += INT_SIZE_IN_BYTES # Skip metric count + metrics_buf = result[pos:] + self.assertTrue(len(metrics_buf) > 0) + # If we are able to decompress it, we count the blob + # as valid. + zlib.decompress(dict_buf) + zlib.decompress(metrics_buf) + await client.shutdown() + + def get_metrics_blob(self, client_uuid): + script = ( + """ + stats = instance_0.getOriginal().node.getClientEngine().getClientStatistics(); + keys = stats.keySet().toArray(); + for(i=0; i < keys.length; i++) { + if (keys[i].toString().equals("%s")) { + result = stats.get(keys[i]).metricsBlob(); + break; + } + }""" + % client_uuid + ) + + return self.rc.executeOnController(self.cluster.id, script, Lang.JAVASCRIPT) + + def get_client_stats_from_server(self, client_uuid): + script = ( + """ + stats = instance_0.getOriginal().node.getClientEngine().getClientStatistics(); + keys = stats.keySet().toArray(); + for(i=0; i < keys.length; i++) { + if (keys[i].toString().equals("%s")) { + result = stats.get(keys[i]).clientAttributes(); + break; + } + }""" + % client_uuid + ) + + return self.rc.executeOnController(self.cluster.id, script, Lang.JAVASCRIPT) + + def unescape_special_chars(self, value): + return ( + value.replace("\\,", ",").replace("\\=", "=").replace("\\.", ".").replace("\\\\", "\\") + ) + + def verify_response_not_empty(self, response): + if not response.success or response.result is None: + raise AssertionError + + async def wait_for_statistics_collection(self, client_uuid, timeout=30, get_metric_blob=False): + timeout_time = get_current_timestamp() + timeout + while get_current_timestamp() < timeout_time: + if get_metric_blob: + response = self.get_metrics_blob(client_uuid) + else: + response = self.get_client_stats_from_server(client_uuid) + + try: + self.verify_response_not_empty(response) + return response + except AssertionError: + await asyncio.sleep(0.1) + + raise AssertionError + + def get_runtime_and_system_metrics(self, client): + s = Statistics(client, client._config, None, None, None, None) + try: + # Compatibility for <4.2.1 clients + return s._get_os_and_runtime_stats() + except: + return itertools.chain(s._registered_system_gauges, s._registered_process_gauges) From a14290a4ec120a23ebdcc05bd3f20bf7c2e82f2a Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Mon, 17 Nov 2025 18:22:27 +0300 Subject: [PATCH 34/51] Black --- hazelcast/internal/asyncio_proxy/map.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hazelcast/internal/asyncio_proxy/map.py b/hazelcast/internal/asyncio_proxy/map.py index b7d5456de7..a8495044d9 100644 --- a/hazelcast/internal/asyncio_proxy/map.py +++ b/hazelcast/internal/asyncio_proxy/map.py @@ -64,7 +64,8 @@ map_put_transient_with_max_idle_codec, map_set_with_max_idle_codec, map_remove_interceptor_codec, - map_remove_all_codec, map_add_near_cache_invalidation_listener_codec, + map_remove_all_codec, + map_add_near_cache_invalidation_listener_codec, ) from hazelcast.internal.asyncio_proxy.base import ( Proxy, From 492ccc122d317690d5c3d61b5d4e03ed5ece1396 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Tue, 18 Nov 2025 10:45:05 +0300 Subject: [PATCH 35/51] Fixed getting local address --- hazelcast/internal/asyncio_reactor.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hazelcast/internal/asyncio_reactor.py b/hazelcast/internal/asyncio_reactor.py index ecfa1f2a47..8c6c4a0fd7 100644 --- a/hazelcast/internal/asyncio_reactor.py +++ b/hazelcast/internal/asyncio_reactor.py @@ -98,7 +98,9 @@ async def _create_connection(self, config, address): server_hostname=server_hostname, ) sock, self._proto = res - self.local_address = Address(*sock._sock.getsockname()) + sockname = sock._sock.getsockname() + host, port = sockname[0], sockname[1] + self.local_address = Address(host, port) def _write(self, buf): self._proto.write(buf) From e8a260008ae3da8975375fd1973fa45f2eb1699c Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Tue, 18 Nov 2025 10:57:06 +0300 Subject: [PATCH 36/51] Fixed getting local address, take 2 --- hazelcast/internal/asyncio_reactor.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/hazelcast/internal/asyncio_reactor.py b/hazelcast/internal/asyncio_reactor.py index 8c6c4a0fd7..8565bc0cf3 100644 --- a/hazelcast/internal/asyncio_reactor.py +++ b/hazelcast/internal/asyncio_reactor.py @@ -98,7 +98,11 @@ async def _create_connection(self, config, address): server_hostname=server_hostname, ) sock, self._proto = res - sockname = sock._sock.getsockname() + if hasattr(sock, "_ssl_protocol"): + sock = sock._ssl_protocol._transport._sock + else: + sock = sock._sock + sockname = sock.getsockname() host, port = sockname[0], sockname[1] self.local_address = Address(host, port) From 24eb6bfd6ca6045ea3d9a7ce807d3a0c5093c9bd Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Tue, 18 Nov 2025 18:04:05 +0300 Subject: [PATCH 37/51] Added nearcache tests --- hazelcast/internal/asyncio_listener.py | 36 ++-- hazelcast/internal/asyncio_proxy/manager.py | 7 +- hazelcast/internal/asyncio_proxy/map.py | 7 +- .../asyncio/proxy/map_nearcache_test.py | 195 ++++++++++++++++++ 4 files changed, 219 insertions(+), 26 deletions(-) create mode 100644 tests/integration/asyncio/proxy/map_nearcache_test.py diff --git a/hazelcast/internal/asyncio_listener.py b/hazelcast/internal/asyncio_listener.py index dbd04956ea..43642761a6 100644 --- a/hazelcast/internal/asyncio_listener.py +++ b/hazelcast/internal/asyncio_listener.py @@ -175,10 +175,8 @@ async def _register_on_connection( self, user_registration_id, listener_registration, connection ): registration_map = listener_registration.connection_registrations - if connection in registration_map: return - registration_request = listener_registration.registration_request.copy() invocation = Invocation( registration_request, @@ -187,26 +185,20 @@ async def _register_on_connection( response_handler=lambda m: m, urgent=True, ) - self._invocation_service.invoke(invocation) - - def callback(f): - try: - response = f.result() - server_registration_id = listener_registration.decode_register_response(response) - correlation_id = registration_request.get_correlation_id() - registration = _EventRegistration(server_registration_id, correlation_id) - registration_map[connection] = registration - except Exception as e: - if connection.live: - _logger.exception( - "Listener %s can not be added to a new connection: %s", - user_registration_id, - connection, - ) - raise e - - invocation.future.add_done_callback(callback) - return await invocation.future + response = await self._invocation_service.ainvoke(invocation) + try: + server_registration_id = listener_registration.decode_register_response(response) + correlation_id = registration_request.get_correlation_id() + registration = _EventRegistration(server_registration_id, correlation_id) + registration_map[connection] = registration + except Exception as e: + if connection.live: + _logger.exception( + "Listener %s can not be added to a new connection: %s", + user_registration_id, + connection, + ) + raise e async def _connection_added(self, connection): async with self._registration_lock: diff --git a/hazelcast/internal/asyncio_proxy/manager.py b/hazelcast/internal/asyncio_proxy/manager.py index 6bf635bcfc..a5028addca 100644 --- a/hazelcast/internal/asyncio_proxy/manager.py +++ b/hazelcast/internal/asyncio_proxy/manager.py @@ -8,7 +8,10 @@ MAP_SERVICE = "hz:impl:mapService" -_proxy_init: typing.Dict[str, typing.Callable[[str, str, typing.Any], Proxy]] = { +_proxy_init: typing.Dict[ + str, + typing.Callable[[str, str, typing.Any], typing.Coroutine[typing.Any, typing.Any, typing.Any]], +] = { MAP_SERVICE: create_map_proxy, } @@ -34,7 +37,7 @@ async def _create_proxy(self, service_name, name, create_on_remote) -> Proxy: invocation_service = self._context.invocation_service await invocation_service.ainvoke(invocation) - return _proxy_init[service_name](service_name, name, self._context) + return await _proxy_init[service_name](service_name, name, self._context) async def destroy_proxy(self, service_name, name, destroy_on_remote=True): ns = (service_name, name) diff --git a/hazelcast/internal/asyncio_proxy/map.py b/hazelcast/internal/asyncio_proxy/map.py index a8495044d9..5fd2ffc475 100644 --- a/hazelcast/internal/asyncio_proxy/map.py +++ b/hazelcast/internal/asyncio_proxy/map.py @@ -1140,8 +1140,11 @@ def _delete_internal(self, key_data): return super(MapFeatNearCache, self)._delete_internal(key_data) -def create_map_proxy(service_name, name, context): +async def create_map_proxy(service_name, name, context): near_cache_config = context.config.near_caches.get(name, None) if near_cache_config is None: return Map(service_name, name, context) - return MapFeatNearCache(service_name, name, context) + nc = MapFeatNearCache(service_name, name, context) + if nc._near_cache.invalidate_on_change: + await nc._add_near_cache_invalidation_listener() + return nc diff --git a/tests/integration/asyncio/proxy/map_nearcache_test.py b/tests/integration/asyncio/proxy/map_nearcache_test.py new file mode 100644 index 0000000000..fcb3658905 --- /dev/null +++ b/tests/integration/asyncio/proxy/map_nearcache_test.py @@ -0,0 +1,195 @@ +import asyncio +import os + +from hazelcast.config import ReconnectMode +from hazelcast.errors import ClientOfflineError +from hazelcast.lifecycle import LifecycleState +from hazelcast.predicate import true +from tests.hzrc.ttypes import Lang + +from tests.integration.asyncio.base import SingleMemberTestCase, HazelcastTestCase +from tests.util import random_string, skip_if_client_version_older_than +from hazelcast.asyncio import HazelcastClient + + +class MapTest(SingleMemberTestCase): + @classmethod + def configure_cluster(cls): + path = os.path.abspath(__file__) + dir_path = os.path.dirname(path) + with open(os.path.join(dir_path, "../../backward_compatible/proxy/hazelcast.xml")) as f: + return f.read() + + @classmethod + def configure_client(cls, config): + cls.map_name = random_string() + config["cluster_name"] = cls.cluster.id + config["near_caches"] = {cls.map_name: {}} + return config + + async def asyncSetUp(self): + await super().asyncSetUp() + self.map = await self.client.get_map(self.map_name) + + async def asyncTearDown(self): + await self.map.destroy() + await super().asyncTearDown() + + async def test_put_get(self): + key = "key" + value = "value" + await self.map.put(key, value) + value2 = await self.map.get(key) + value3 = await self.map.get(key) + self.assertEqual(value, value2) + self.assertEqual(value, value3) + self.assertEqual(1, self.map._near_cache._hits) + self.assertEqual(1, self.map._near_cache._misses) + + async def test_put_get_remove(self): + key = "key" + value = "value" + await self.map.put(key, value) + value2 = await self.map.get(key) + value3 = await self.map.get(key) + await self.map.remove(key) + self.assertEqual(value, value2) + self.assertEqual(value, value3) + self.assertEqual(1, self.map._near_cache._hits) + self.assertEqual(1, self.map._near_cache._misses) + self.assertEqual(0, len(self.map._near_cache)) + + async def test_remove_all(self): + skip_if_client_version_older_than(self, "5.6.0") + + await self.fill_map_and_near_cache(10) + await self.map.remove_all(predicate=true()) + self.assertEqual(0, len(self.map._near_cache)) + + async def test_invalidate_single_key(self): + await self.fill_map_and_near_cache(10) + initial_cache_size = len(self.map._near_cache) + script = """map = instance_0.getMap("{}");map.remove("key-5")""".format(self.map.name) + response = await asyncio.to_thread( + self.rc.executeOnController, self.cluster.id, script, Lang.PYTHON + ) + self.assertTrue(response.success) + self.assertEqual(initial_cache_size, 10) + + def assertion(): + self.assertEqual(9, len(self.map._near_cache)) + + await self.assertTrueEventually(assertion, timeout=30) + + async def test_invalidate_nonexist_key(self): + await self.fill_map_and_near_cache(10) + initial_cache_size = len(self.map._near_cache) + script = ( + """ + var map = instance_0.getMap("%s"); + map.put("key-99","x"); + map.put("key-NonExist","x"); + map.remove("key-NonExist");""" + % self.map.name + ) + + response = self.rc.executeOnController(self.cluster.id, script, Lang.JAVASCRIPT) + self.assertTrue(response.success) + self.assertEqual(initial_cache_size, 10) + + async def assertion(): + self.assertEqual(await self.map.size(), 11) + self.assertEqual(len(self.map._near_cache), 10) + + await self.assertTrueEventually(assertion) + + async def test_invalidate_multiple_keys(self): + await self.fill_map_and_near_cache(10) + initial_cache_size = len(self.map._near_cache) + script = """map = instance_0.getMap("{}");map.clear()""".format(self.map.name) + response = await asyncio.to_thread( + self.rc.executeOnController, self.cluster.id, script, Lang.PYTHON + ) + self.assertTrue(response.success) + self.assertEqual(initial_cache_size, 10) + + def assertion(): + self.assertEqual(0, len(self.map._near_cache)) + + await self.assertTrueEventually(assertion, timeout=60) + + async def fill_map_and_near_cache(self, count=10): + fill_content = {"key-%d" % x: "value-%d" % x for x in range(0, count)} + for k, v in fill_content.items(): + await self.map.put(k, v) + for k, v in fill_content.items(): + await self.map.get(k) + return fill_content + + +ENTRY_COUNT = 100 + + +class NonStopNearCacheTest(HazelcastTestCase): + def setUp(self): + rc = self.create_rc() + cluster = self.create_cluster(rc, self.read_cluster_config()) + cluster.start_member() + + def event_collector(): + events = [] + + def collector(e): + if e == LifecycleState.DISCONNECTED: + events.append(e) + + collector.events = events + return collector + + collector = event_collector() + + client = HazelcastClient( + cluster_name=cluster.id, + reconnect_mode=ReconnectMode.ASYNC, + near_caches={"map": {}}, + lifecycle_listeners=[collector], + ) + + map = client.get_map("map").blocking() + + for i in range(ENTRY_COUNT): + map.put(i, i) + + # Populate the near cache + for i in range(ENTRY_COUNT): + map.get(i) + + rc.terminateCluster(cluster.id) + rc.exit() + + self.assertTrueEventually(lambda: self.assertEqual(1, len(collector.events))) + + self.client = client + self.map = map + + def tearDown(self): + self.client.shutdown() + + def test_get_existing_key_from_cache_when_the_cluster_is_down(self): + for i in range(ENTRY_COUNT): + self.assertEqual(i, self.map.get(i)) + + def test_get_non_existing_key_from_cache_when_the_cluster_is_down(self): + with self.assertRaises(ClientOfflineError): + self.map.get(ENTRY_COUNT) + + def test_put_to_map_when_the_cluster_is_down(self): + with self.assertRaises(ClientOfflineError): + self.map.put(ENTRY_COUNT, ENTRY_COUNT) + + @staticmethod + def read_cluster_config(): + path = os.path.abspath(__file__) + dir_path = os.path.dirname(path) + with open(os.path.join(dir_path, "hazelcast.xml")) as f: + return f.read() From 6ab936546847143b0566b19a41fe1c4deae34ee6 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Tue, 18 Nov 2025 18:23:53 +0300 Subject: [PATCH 38/51] Ported missing nearcache test --- .../asyncio/proxy/map_nearcache_test.py | 36 +++++++++---------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/tests/integration/asyncio/proxy/map_nearcache_test.py b/tests/integration/asyncio/proxy/map_nearcache_test.py index fcb3658905..f0bee878eb 100644 --- a/tests/integration/asyncio/proxy/map_nearcache_test.py +++ b/tests/integration/asyncio/proxy/map_nearcache_test.py @@ -1,5 +1,6 @@ import asyncio import os +import unittest from hazelcast.config import ReconnectMode from hazelcast.errors import ClientOfflineError @@ -130,8 +131,8 @@ async def fill_map_and_near_cache(self, count=10): ENTRY_COUNT = 100 -class NonStopNearCacheTest(HazelcastTestCase): - def setUp(self): +class NonStopNearCacheTest(unittest.IsolatedAsyncioTestCase, HazelcastTestCase): + async def asyncSetUp(self): rc = self.create_rc() cluster = self.create_cluster(rc, self.read_cluster_config()) cluster.start_member() @@ -148,48 +149,45 @@ def collector(e): collector = event_collector() - client = HazelcastClient( + client = await HazelcastClient.create_and_start( cluster_name=cluster.id, reconnect_mode=ReconnectMode.ASYNC, near_caches={"map": {}}, lifecycle_listeners=[collector], ) - map = client.get_map("map").blocking() - + map = await client.get_map("map") for i in range(ENTRY_COUNT): - map.put(i, i) + await map.put(i, i) # Populate the near cache for i in range(ENTRY_COUNT): - map.get(i) + await map.get(i) rc.terminateCluster(cluster.id) rc.exit() - - self.assertTrueEventually(lambda: self.assertEqual(1, len(collector.events))) - + await self.assertTrueEventually(lambda: self.assertEqual(1, len(collector.events))) self.client = client self.map = map - def tearDown(self): - self.client.shutdown() + async def tearDown(self): + await self.client.shutdown() - def test_get_existing_key_from_cache_when_the_cluster_is_down(self): + async def test_get_existing_key_from_cache_when_the_cluster_is_down(self): for i in range(ENTRY_COUNT): - self.assertEqual(i, self.map.get(i)) + self.assertEqual(i, await self.map.get(i)) - def test_get_non_existing_key_from_cache_when_the_cluster_is_down(self): + async def test_get_non_existing_key_from_cache_when_the_cluster_is_down(self): with self.assertRaises(ClientOfflineError): - self.map.get(ENTRY_COUNT) + await self.map.get(ENTRY_COUNT) - def test_put_to_map_when_the_cluster_is_down(self): + async def test_put_to_map_when_the_cluster_is_down(self): with self.assertRaises(ClientOfflineError): - self.map.put(ENTRY_COUNT, ENTRY_COUNT) + await self.map.put(ENTRY_COUNT, ENTRY_COUNT) @staticmethod def read_cluster_config(): path = os.path.abspath(__file__) dir_path = os.path.dirname(path) - with open(os.path.join(dir_path, "hazelcast.xml")) as f: + with open(os.path.join(dir_path, "../../backward_compatible/proxy/hazelcast.xml")) as f: return f.read() From 3f3a9c57399667c71216a242eb6bce65c6a09e1e Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Wed, 19 Nov 2025 12:41:14 +0300 Subject: [PATCH 39/51] Ported VectorCollection and its tests --- hazelcast/asyncio/client.py | 38 +- hazelcast/internal/asyncio_proxy/manager.py | 3 + hazelcast/internal/asyncio_proxy/map.py | 1 - .../asyncio_proxy/vector_collection.py | 256 +++++++++++++ .../asyncio/proxy/vector_collection_test.py | 337 ++++++++++++++++++ 5 files changed, 631 insertions(+), 4 deletions(-) create mode 100644 hazelcast/internal/asyncio_proxy/vector_collection.py create mode 100644 tests/integration/asyncio/proxy/vector_collection_test.py diff --git a/hazelcast/asyncio/client.py b/hazelcast/asyncio/client.py index 758ae7011f..1f8c5e9bb8 100644 --- a/hazelcast/asyncio/client.py +++ b/hazelcast/asyncio/client.py @@ -5,13 +5,14 @@ from hazelcast.internal.asyncio_cluster import ClusterService, _InternalClusterService from hazelcast.internal.asyncio_compact import CompactSchemaService -from hazelcast.config import Config +from hazelcast.config import Config, IndexConfig from hazelcast.internal.asyncio_connection import ConnectionManager, DefaultAddressProvider from hazelcast.core import DistributedObjectEvent, DistributedObjectInfo from hazelcast.cp import CPSubsystem, ProxySessionManager from hazelcast.discovery import HazelcastCloudAddressProvider from hazelcast.errors import IllegalStateError, InvalidConfigurationError from hazelcast.internal.asyncio_invocation import InvocationService, Invocation +from hazelcast.internal.asyncio_proxy.vector_collection import VectorCollection from hazelcast.lifecycle import LifecycleService, LifecycleState, _InternalLifecycleService from hazelcast.internal.asyncio_listener import ClusterViewListenerService, ListenerService from hazelcast.near_cache import NearCacheManager @@ -19,11 +20,11 @@ from hazelcast.protocol.codec import ( client_add_distributed_object_listener_codec, client_get_distributed_objects_codec, - client_remove_distributed_object_listener_codec, + client_remove_distributed_object_listener_codec, dynamic_config_add_vector_collection_config_codec, ) from hazelcast.internal.asyncio_proxy.manager import ( MAP_SERVICE, - ProxyManager, + ProxyManager, VECTOR_SERVICE, ) from hazelcast.internal.asyncio_proxy.base import Proxy from hazelcast.internal.asyncio_proxy.map import Map @@ -185,6 +186,37 @@ async def _start(self): async def get_map(self, name: str) -> Map[KeyType, ValueType]: return await self._proxy_manager.get_or_create(MAP_SERVICE, name) + async def create_vector_collection_config( + self, + name: str, + indexes: typing.List[IndexConfig], + backup_count: int = 1, + async_backup_count: int = 0, + split_brain_protection_name: typing.Optional[str] = None, + merge_policy: str = "PutIfAbsentMergePolicy", + merge_batch_size: int = 100, + ) -> None: + # check that indexes have different names + if indexes: + index_names = set(index.name for index in indexes) + if len(index_names) != len(indexes): + raise AssertionError("index names must be unique") + + request = dynamic_config_add_vector_collection_config_codec.encode_request( + name, + indexes, + backup_count, + async_backup_count, + split_brain_protection_name, + merge_policy, + merge_batch_size, + ) + invocation = Invocation(request, response_handler=lambda m: m) + await self._invocation_service.ainvoke(invocation) + + async def get_vector_collection(self, name: str) -> VectorCollection: + return await self._proxy_manager.get_or_create(VECTOR_SERVICE, name) + async def add_distributed_object_listener( self, listener_func: typing.Callable[[DistributedObjectEvent], None] ) -> str: diff --git a/hazelcast/internal/asyncio_proxy/manager.py b/hazelcast/internal/asyncio_proxy/manager.py index a5028addca..cd0fa20acf 100644 --- a/hazelcast/internal/asyncio_proxy/manager.py +++ b/hazelcast/internal/asyncio_proxy/manager.py @@ -1,5 +1,6 @@ import typing +from hazelcast.internal.asyncio_proxy.vector_collection import VectorCollection, create_vector_collection_proxy from hazelcast.protocol.codec import client_create_proxy_codec, client_destroy_proxy_codec from hazelcast.internal.asyncio_invocation import Invocation from hazelcast.internal.asyncio_proxy.base import Proxy @@ -7,12 +8,14 @@ from hazelcast.util import to_list MAP_SERVICE = "hz:impl:mapService" +VECTOR_SERVICE = "hz:service:vector" _proxy_init: typing.Dict[ str, typing.Callable[[str, str, typing.Any], typing.Coroutine[typing.Any, typing.Any, typing.Any]], ] = { MAP_SERVICE: create_map_proxy, + VECTOR_SERVICE: create_vector_collection_proxy, } diff --git a/hazelcast/internal/asyncio_proxy/map.py b/hazelcast/internal/asyncio_proxy/map.py index 5fd2ffc475..48d362988a 100644 --- a/hazelcast/internal/asyncio_proxy/map.py +++ b/hazelcast/internal/asyncio_proxy/map.py @@ -5,7 +5,6 @@ from hazelcast.aggregator import Aggregator from hazelcast.config import IndexUtil, IndexType, IndexConfig from hazelcast.core import SimpleEntryView -from hazelcast.errors import InvalidConfigurationError from hazelcast.projection import Projection from hazelcast.protocol import PagingPredicateHolder from hazelcast.protocol.codec import ( diff --git a/hazelcast/internal/asyncio_proxy/vector_collection.py b/hazelcast/internal/asyncio_proxy/vector_collection.py new file mode 100644 index 0000000000..d976bb63d6 --- /dev/null +++ b/hazelcast/internal/asyncio_proxy/vector_collection.py @@ -0,0 +1,256 @@ +import asyncio +import copy +import typing +import uuid +from typing import Any, Dict, List, Optional, Tuple + +from hazelcast.protocol.codec import ( + vector_collection_set_codec, + vector_collection_get_codec, + vector_collection_search_near_vector_codec, + vector_collection_delete_codec, + vector_collection_put_codec, + vector_collection_put_if_absent_codec, + vector_collection_remove_codec, + vector_collection_put_all_codec, + vector_collection_clear_codec, + vector_collection_optimize_codec, + vector_collection_size_codec, +) +from hazelcast.internal.asyncio_proxy.base import Proxy +from hazelcast.serialization.compact import SchemaNotReplicatedError +from hazelcast.serialization.data import Data +from hazelcast.types import KeyType, ValueType +from hazelcast.util import check_not_none +from hazelcast.vector import ( + Document, + SearchResult, + Vector, + VectorType, + VectorSearchOptions, +) + + +class VectorCollection(Proxy, typing.Generic[KeyType, ValueType]): + + def __init__(self, service_name, name, context): + super(VectorCollection, self).__init__(service_name, name, context) + + + async def get(self, key: Any) -> Document|None: + check_not_none(key, "key can't be None") + return await self._get_internal(key) + + async def set(self, key: Any, document: Document) -> None: + check_not_none(key, "key can't be None") + check_not_none(document, "document can't be None") + check_not_none(document.value, "document value can't be None") + return await self._set_internal(key, document) + + async def put(self, key: Any, document: Document) -> Document|None: + check_not_none(key, "key can't be None") + check_not_none(document, "document can't be None") + check_not_none(document.value, "document value can't be None") + return await self._put_internal(key, document) + + async def put_all(self, map: Dict[Any, Document]) -> None: + check_not_none(map, "map can't be None") + if not map: + return None + partition_service = self._context.partition_service + partition_map: Dict[int, List[Tuple[Data, Document]]] = {} + for key, doc in map.items(): + check_not_none(key, "key can't be None") + check_not_none(doc, "value can't be None") + doc = copy.copy(doc) + try: + entry = (self._to_data(key), doc) + doc.value = self._to_data(doc.value) + except SchemaNotReplicatedError as e: + return await self._send_schema_and_retry(e, self.put_all, map) + + partition_id = partition_service.get_partition_id(entry[0]) + partition_map.setdefault(partition_id, []).append(entry) + + async with asyncio.TaskGroup() as tg: + for partition_id, entry_list in partition_map.items(): + request = vector_collection_put_all_codec.encode_request(self.name, entry_list) + tg.create_task(self._ainvoke_on_partition(request, partition_id)) + + return None + + async def put_if_absent(self, key: Any, document: Document) -> Document|None: + check_not_none(key, "key can't be None") + check_not_none(document, "document can't be None") + check_not_none(document.value, "document value can't be None") + return await self._put_if_absent_internal(key, document) + + async def search_near_vector( + self, + vector: Vector, + *, + include_value: bool = False, + include_vectors: bool = False, + limit: int = 10, + hints: Dict[str, str] = None + ) -> List[SearchResult]: + check_not_none(vector, "vector can't be None") + if limit <= 0: + raise AssertionError("limit must be positive") + return await self._search_near_vector_internal( + vector, + include_value=include_value, + include_vectors=include_vectors, + limit=limit, + hints=hints, + ) + + async def remove(self, key: Any) -> Document|None: + check_not_none(key, "key can't be None") + return await self._remove_internal(key) + + async def delete(self, key: Any) -> None: + check_not_none(key, "key can't be None") + return await self._delete_internal(key) + + async def optimize(self, index_name: str = None) -> None: + request = vector_collection_optimize_codec.encode_request( + self.name, index_name, uuid.uuid4() + ) + return await self._invoke(request) + + async def clear(self) -> None: + request = vector_collection_clear_codec.encode_request(self.name) + return await self._invoke(request) + + async def size(self) -> int: + request = vector_collection_size_codec.encode_request(self.name) + return await self._invoke(request, vector_collection_size_codec.decode_response) + + def _set_internal(self, key: Any, document: Document) -> asyncio.Future[None]: + try: + key_data = self._to_data(key) + value_data = self._to_data(document.value) + except SchemaNotReplicatedError as e: + return self._send_schema_and_retry(e, self.set, key, document) + document = copy.copy(document) + document.value = value_data + request = vector_collection_set_codec.encode_request( + self.name, + key_data, + document, + ) + return self._invoke_on_key(request, key_data) + + def _get_internal(self, key: Any) -> asyncio.Future[Any]: + def handler(message): + doc = vector_collection_get_codec.decode_response(message) + return self._transform_document(doc) + + try: + key_data = self._to_data(key) + except SchemaNotReplicatedError as e: + return self._send_schema_and_retry(e, self.get, key) + request = vector_collection_get_codec.encode_request( + self.name, + key_data, + ) + return self._invoke_on_key(request, key_data, response_handler=handler) + + def _search_near_vector_internal( + self, + vector: Vector, + *, + include_value: bool = False, + include_vectors: bool = False, + limit: int = 10, + hints: Dict[str, str] = None + ) -> asyncio.Future[List[SearchResult]]: + def handler(message): + results: List[ + SearchResult + ] = vector_collection_search_near_vector_codec.decode_response(message) + for result in results: + if result.key is not None: + result.key = self._to_object(result.key) + if result.value is not None: + result.value = self._to_object(result.value) + if result.vectors: + for vec in result.vectors: + vec.type = VectorType(vec.type) + return results + + options = VectorSearchOptions( + include_value=include_value, + include_vectors=include_vectors, + limit=limit, + hints=hints or {}, + ) + request = vector_collection_search_near_vector_codec.encode_request( + self.name, + [vector], + options, + ) + return self._invoke(request, response_handler=handler) + + def _delete_internal(self, key: Any) -> asyncio.Future[None]: + key_data = self._to_data(key) + request = vector_collection_delete_codec.encode_request(self.name, key_data) + return self._invoke_on_key(request, key_data) + + def _remove_internal(self, key: Any) -> asyncio.Future[Document|None]: + def handler(message): + doc = vector_collection_remove_codec.decode_response(message) + return self._transform_document(doc) + + key_data = self._to_data(key) + request = vector_collection_remove_codec.encode_request(self.name, key_data) + return self._invoke_on_key(request, key_data, response_handler=handler) + + def _put_internal(self, key: Any, document: Document) -> asyncio.Future[Document|None]: + def handler(message): + doc = vector_collection_put_codec.decode_response(message) + return self._transform_document(doc) + + try: + key_data = self._to_data(key) + value_data = self._to_data(document.value) + except SchemaNotReplicatedError as e: + return self._send_schema_and_retry(e, self.set, key, document) + document = copy.copy(document) + document.value = value_data + request = vector_collection_put_codec.encode_request( + self.name, + key_data, + document, + ) + return self._invoke_on_key(request, key_data, response_handler=handler) + + def _put_if_absent_internal(self, key: Any, document: Document) -> asyncio.Future[Document|None]: + def handler(message): + doc = vector_collection_put_if_absent_codec.decode_response(message) + return self._transform_document(doc) + + try: + key_data = self._to_data(key) + value_data = self._to_data(document.value) + except SchemaNotReplicatedError as e: + return self._send_schema_and_retry(e, self.set, key, document) + document.value = value_data + request = vector_collection_put_if_absent_codec.encode_request( + self.name, + key_data, + document, + ) + return self._invoke_on_key(request, key_data, response_handler=handler) + + def _transform_document(self, doc: Optional[Document]) -> Optional[Document]: + if doc is not None: + if doc.value is not None: + doc.value = self._to_object(doc.value) + for vec in doc.vectors: + vec.type = VectorType(vec.type) + return doc + +async def create_vector_collection_proxy(service_name, name, context): + return VectorCollection(service_name, name, context) diff --git a/tests/integration/asyncio/proxy/vector_collection_test.py b/tests/integration/asyncio/proxy/vector_collection_test.py new file mode 100644 index 0000000000..cb094d4f74 --- /dev/null +++ b/tests/integration/asyncio/proxy/vector_collection_test.py @@ -0,0 +1,337 @@ +import os +import unittest + +import pytest + +import hazelcast.errors +from tests.integration.asyncio.base import SingleMemberTestCase +from tests.util import ( + random_string, + compare_client_version, + skip_if_server_version_older_than, + skip_if_client_version_older_than, +) + +try: + from hazelcast.vector import IndexConfig, Metric, Document, Vector, Type +except ImportError: + # backward compatibility + pass + + +@unittest.skipIf( + compare_client_version("5.5") < 0, "Tests the features added in 5.5 version of the client" +) +@pytest.mark.enterprise +class VectorCollectionTest(SingleMemberTestCase): + @classmethod + def configure_cluster(cls): + path = os.path.abspath(__file__) + dir_path = os.path.dirname(path) + with open(os.path.join(dir_path, "../../backward_compatible/proxy/hazelcast.xml")) as f: + return f.read() + + @classmethod + def configure_client(cls, config): + config["cluster_name"] = cls.cluster.id + return config + + async def asyncSetUp(self): + await super().asyncSetUp() + skip_if_server_version_older_than(self, self.client, "5.5") + name = random_string() + await self.client.create_vector_collection_config(name, [IndexConfig("vector", Metric.COSINE, 3)]) + self.vector_collection = await self.client.get_vector_collection(name) + + async def asyncTearDown(self): + await self.vector_collection.destroy() + await super().asyncTearDown() + + async def test_set(self): + doc = Document("v1", Vector("vector", Type.DENSE, [0.1, 0.2, 0.3])) + await self.vector_collection.set("k1", doc) + + async def test_get(self): + doc = await self.vector_collection.get("k1") + self.assertIsNone(doc) + doc = Document("v1", Vector("vector", Type.DENSE, [0.1, 0.2, 0.3])) + await self.vector_collection.set("k1", doc) + got_doc = await self.vector_collection.get("k1") + self.assert_document_equal(got_doc, doc) + + async def test_put(self): + doc = Document("v1", Vector("vector", Type.DENSE, [0.1, 0.2, 0.3])) + doc_old = await self.vector_collection.put("k1", doc) + self.assertIsNone(doc_old) + doc2 = Document("v1", Vector("vector", Type.DENSE, [0.4, 0.5, 0.6])) + doc_old = await self.vector_collection.put("k1", doc2) + self.assert_document_equal(doc_old, doc) + + async def test_delete(self): + doc = await self.vector_collection.get("k1") + self.assertIsNone(doc) + doc = Document("v1", Vector("vector", Type.DENSE, [0.1, 0.2, 0.3])) + await self.vector_collection.set("k1", doc) + await self.vector_collection.delete("k1") + doc = await self.vector_collection.get("k1") + self.assertIsNone(doc) + + async def test_remove(self): + doc = await self.vector_collection.get("k1") + self.assertIsNone(doc) + doc = Document("v1", Vector("vector", Type.DENSE, [0.1, 0.2, 0.3])) + await self.vector_collection.set("k1", doc) + doc2 = await self.vector_collection.remove("k1") + self.assert_document_equal(doc, doc2) + + async def test_put_all(self): + doc1 = self.doc1("v1", [0.1, 0.2, 0.3]) + doc2 = self.doc1("v1", [0.2, 0.3, 0.4]) + await self.vector_collection.put_all( + { + "k1": doc1, + "k2": doc2, + } + ) + k1 = await self.vector_collection.get("k1") + self.assert_document_equal(k1, doc1) + k2 = await self.vector_collection.get("k2") + self.assert_document_equal(k2, doc2) + + async def test_clear(self): + doc = await self.vector_collection.get("k1") + self.assertIsNone(doc) + doc = Document("v1", self.vec1([0.1, 0.2, 0.3])) + await self.vector_collection.set("k1", doc) + await self.vector_collection.clear() + doc = await self.vector_collection.get("k1") + self.assertIsNone(doc) + + async def test_optimize(self): + doc = Document("v1", self.vec1([0.1, 0.2, 0.3])) + await self.vector_collection.set("k1", doc) + # it is hard to observe results of optimize, so just test that the invocation works + await self.vector_collection.optimize() + + async def test_optimize_with_name(self): + doc = Document("v1", self.vec1([0.1, 0.2, 0.3])) + await self.vector_collection.set("k1", doc) + # it is hard to observe results of optimize, so just test that the invocation works + await self.vector_collection.optimize("vector") + + async def test_search_near_vector_include_all(self): + target_doc = self.doc1("v1", [0.3, 0.4, 0.5]) + await self.vector_collection.put_all( + { + "k1": self.doc1("v1", [0.1, 0.2, 0.3]), + "k2": self.doc1("v1", [0.2, 0.3, 0.4]), + "k3": target_doc, + } + ) + result = await self.vector_collection.search_near_vector( + self.vec1([0.2, 0.2, 0.3]), limit=1, include_vectors=True, include_value=True + ) + self.assertEqual(1, len(result)) + self.assert_document_equal(target_doc, result[0]) + self.assertAlmostEqual(0.9973459243774414, result[0].score) + + async def test_search_near_vector_include_none(self): + target_doc = self.doc1("v1", [0.3, 0.4, 0.5]) + await self.vector_collection.put_all( + { + "k1": self.doc1("v1", [0.1, 0.2, 0.3]), + "k2": self.doc1("v1", [0.2, 0.3, 0.4]), + "k3": target_doc, + } + ) + result = await self.vector_collection.search_near_vector( + self.vec1([0.2, 0.2, 0.3]), limit=1, include_vectors=False, include_value=False + ) + self.assertEqual(1, len(result)) + result1 = result[0] + self.assertAlmostEqual(0.9973459243774414, result1.score) + self.assertIsNone(result1.value) + self.assertIsNone(result1.vectors) + + async def test_search_near_vector_hint(self): + # not empty collection is needed for search to do something + doc = Document("v1", self.vec1([0.1, 0.2, 0.3])) + await self.vector_collection.set("k1", doc) + # trigger validation error to check if hint was sent + with self.assertRaises(hazelcast.errors.IllegalArgumentError): + await self.vector_collection.search_near_vector( + self.vec1([0.2, 0.2, 0.3]), + limit=1, + include_vectors=False, + include_value=False, + hints={"partitionLimit": "-1"}, + ) + + async def test_size(self): + self.assertEqual(await self.vector_collection.size(), 0) + doc = Document("v1", Vector("vector", Type.DENSE, [0.1, 0.2, 0.3])) + await self.vector_collection.put("k1", doc) + self.assertEqual(await self.vector_collection.size(), 1) + await self.vector_collection.clear() + self.assertEqual(await self.vector_collection.size(), 0) + + async def test_backup_count_valid_values_pass(self): + skip_if_client_version_older_than(self, "6.0") + name = random_string() + await self.client.create_vector_collection_config( + name, [IndexConfig("vector", Metric.COSINE, 3)], backup_count=2, async_backup_count=2 + ) + await self.client.get_vector_collection(name) + + async def test_backup_count_max_value_pass(self): + skip_if_client_version_older_than(self, "6.0") + name = random_string() + await self.client.create_vector_collection_config( + name, [IndexConfig("vector", Metric.COSINE, 3)], backup_count=6 + ) + await self.client.get_vector_collection(name) + + async def test_backup_count_min_value_pass(self): + skip_if_client_version_older_than(self, "6.0") + name = random_string() + await self.client.create_vector_collection_config( + name, [IndexConfig("vector", Metric.COSINE, 3)], backup_count=0 + ) + await self.client.get_vector_collection(name) + + async def test_backup_count_more_than_max_value_fail(self): + skip_if_server_version_older_than(self, self.client, "6.0") + name = random_string() + # check that the parameter is used by ensuring that it is validated on server side + # there is no simple way to check number of backups + with self.assertRaises(hazelcast.errors.IllegalArgumentError): + await self.client.create_vector_collection_config( + name, + [IndexConfig("vector", Metric.COSINE, 3)], + backup_count=7, + async_backup_count=0, + ) + + async def test_backup_count_less_than_min_value_fail(self): + skip_if_server_version_older_than(self, self.client, "6.0") + name = random_string() + with self.assertRaises(hazelcast.errors.IllegalArgumentError): + await self.client.create_vector_collection_config( + name, [IndexConfig("vector", Metric.COSINE, 3)], backup_count=-1 + ) + + async def test_async_backup_count_max_value_pass(self): + skip_if_client_version_older_than(self, "6.0") + name = random_string() + await self.client.create_vector_collection_config( + name, + [IndexConfig("vector", Metric.COSINE, 3)], + backup_count=0, + async_backup_count=6, + ) + await self.client.get_vector_collection(name) + + async def test_async_backup_count_min_value_pass(self): + skip_if_client_version_older_than(self, "6.0") + name = random_string() + await self.client.create_vector_collection_config( + name, [IndexConfig("vector", Metric.COSINE, 3)], async_backup_count=0 + ) + await self.client.get_vector_collection(name) + + async def test_async_backup_count_more_than_max_value_fail(self): + skip_if_server_version_older_than(self, self.client, "6.0") + name = random_string() + # check that the parameter is used by ensuring that it is validated on server side + # there is no simple way to check number of backups + with self.assertRaises(hazelcast.errors.IllegalArgumentError): + await self.client.create_vector_collection_config( + name, + [IndexConfig("vector", Metric.COSINE, 3)], + backup_count=0, + async_backup_count=7, + ) + + async def test_async_backup_count_less_than_min_value_fail(self): + skip_if_server_version_older_than(self, self.client, "6.0") + name = random_string() + with self.assertRaises(hazelcast.errors.IllegalArgumentError): + await self.client.create_vector_collection_config( + name, + [IndexConfig("vector", Metric.COSINE, 3)], + async_backup_count=-1, + ) + + async def test_sync_and_async_backup_count_more_than_max_value_fail(self): + skip_if_server_version_older_than(self, self.client, "6.0") + name = random_string() + with self.assertRaises(hazelcast.errors.IllegalArgumentError): + await self.client.create_vector_collection_config( + name, + [IndexConfig("vector", Metric.COSINE, 3)], + backup_count=4, + async_backup_count=3, + ) + + async def test_merge_policy_can_be_sent(self): + skip_if_client_version_older_than(self, "6.0") + name = random_string() + await self.client.create_vector_collection_config( + name, + [IndexConfig("vector", Metric.COSINE, 3)], + merge_policy="DiscardMergePolicy", + merge_batch_size=1000, + ) + # validation happens when the collection proxy is created + self.client.get_vector_collection(name) + + async def test_wrong_merge_policy_fails(self): + skip_if_client_version_older_than(self, "6.0") + skip_if_server_version_older_than(self, self.client, "6.0") + name = random_string() + with self.assertRaises(hazelcast.errors.InvalidConfigurationError): + await self.client.create_vector_collection_config( + name, [IndexConfig("vector", Metric.COSINE, 3)], merge_policy="non-existent" + ) + # validation happens when the collection proxy is created + await self.client.get_vector_collection(name) + + async def test_split_brain_name_can_be_sent(self): + skip_if_client_version_older_than(self, "6.0") + name = random_string() + await self.client.create_vector_collection_config( + name, + [IndexConfig("vector", Metric.COSINE, 3)], + # wrong name will be ignored + split_brain_protection_name="non-existent", + ) + col = await self.client.get_vector_collection(name) + doc = Document("v1", Vector("vector", Type.DENSE, [0.1, 0.2, 0.3])) + await col.set("k1", doc) + + def assert_document_equal(self, doc1, doc2) -> None: + self.assertEqual(doc1.value, doc2.value) + self.assertEqual(len(doc1.vectors), len(doc2.vectors)) + # currently there's a bug on the server-side about vector names. + # if there's a single vector, its name is not returned + # see: https://hazelcast.atlassian.net/browse/HZAI-67 + # working around that for now + skip_check_name = len(doc1.vectors) == 1 + for i in range(len(doc1.vectors)): + self.assert_vector_equal(doc1.vectors[i], doc2.vectors[i], skip_check_name) + + def assert_vector_equal(self, vec1, vec2, skip_check_name=False): + if not skip_check_name: + self.assertEqual(vec1.name, vec2.name) + self.assertEqual(vec1.type, vec2.type) + self.assertEqual(len(vec1.vector), len(vec2.vector)) + for i in range(len(vec1.vector)): + self.assertAlmostEqual(vec1.vector[i], vec2.vector[i]) + + @classmethod + def vec1(cls, elems): + return Vector("vector", Type.DENSE, elems) + + @classmethod + def doc1(cls, value, vector_elems): + return Document(value, cls.vec1(vector_elems)) From bfb805d2d9d6cd7264bacc1e7151168057e68c63 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Wed, 19 Nov 2025 12:45:28 +0300 Subject: [PATCH 40/51] Black --- hazelcast/asyncio/client.py | 6 ++++-- hazelcast/internal/asyncio_proxy/manager.py | 5 ++++- .../asyncio_proxy/vector_collection.py | 21 ++++++++++--------- .../asyncio/proxy/vector_collection_test.py | 4 +++- 4 files changed, 22 insertions(+), 14 deletions(-) diff --git a/hazelcast/asyncio/client.py b/hazelcast/asyncio/client.py index 1f8c5e9bb8..6920eef361 100644 --- a/hazelcast/asyncio/client.py +++ b/hazelcast/asyncio/client.py @@ -20,11 +20,13 @@ from hazelcast.protocol.codec import ( client_add_distributed_object_listener_codec, client_get_distributed_objects_codec, - client_remove_distributed_object_listener_codec, dynamic_config_add_vector_collection_config_codec, + client_remove_distributed_object_listener_codec, + dynamic_config_add_vector_collection_config_codec, ) from hazelcast.internal.asyncio_proxy.manager import ( MAP_SERVICE, - ProxyManager, VECTOR_SERVICE, + ProxyManager, + VECTOR_SERVICE, ) from hazelcast.internal.asyncio_proxy.base import Proxy from hazelcast.internal.asyncio_proxy.map import Map diff --git a/hazelcast/internal/asyncio_proxy/manager.py b/hazelcast/internal/asyncio_proxy/manager.py index cd0fa20acf..9daeca0e1f 100644 --- a/hazelcast/internal/asyncio_proxy/manager.py +++ b/hazelcast/internal/asyncio_proxy/manager.py @@ -1,6 +1,9 @@ import typing -from hazelcast.internal.asyncio_proxy.vector_collection import VectorCollection, create_vector_collection_proxy +from hazelcast.internal.asyncio_proxy.vector_collection import ( + VectorCollection, + create_vector_collection_proxy, +) from hazelcast.protocol.codec import client_create_proxy_codec, client_destroy_proxy_codec from hazelcast.internal.asyncio_invocation import Invocation from hazelcast.internal.asyncio_proxy.base import Proxy diff --git a/hazelcast/internal/asyncio_proxy/vector_collection.py b/hazelcast/internal/asyncio_proxy/vector_collection.py index d976bb63d6..ef12e032af 100644 --- a/hazelcast/internal/asyncio_proxy/vector_collection.py +++ b/hazelcast/internal/asyncio_proxy/vector_collection.py @@ -32,12 +32,10 @@ class VectorCollection(Proxy, typing.Generic[KeyType, ValueType]): - def __init__(self, service_name, name, context): super(VectorCollection, self).__init__(service_name, name, context) - - async def get(self, key: Any) -> Document|None: + async def get(self, key: Any) -> Document | None: check_not_none(key, "key can't be None") return await self._get_internal(key) @@ -47,7 +45,7 @@ async def set(self, key: Any, document: Document) -> None: check_not_none(document.value, "document value can't be None") return await self._set_internal(key, document) - async def put(self, key: Any, document: Document) -> Document|None: + async def put(self, key: Any, document: Document) -> Document | None: check_not_none(key, "key can't be None") check_not_none(document, "document can't be None") check_not_none(document.value, "document value can't be None") @@ -72,14 +70,14 @@ async def put_all(self, map: Dict[Any, Document]) -> None: partition_id = partition_service.get_partition_id(entry[0]) partition_map.setdefault(partition_id, []).append(entry) - async with asyncio.TaskGroup() as tg: + async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined] for partition_id, entry_list in partition_map.items(): request = vector_collection_put_all_codec.encode_request(self.name, entry_list) tg.create_task(self._ainvoke_on_partition(request, partition_id)) return None - async def put_if_absent(self, key: Any, document: Document) -> Document|None: + async def put_if_absent(self, key: Any, document: Document) -> Document | None: check_not_none(key, "key can't be None") check_not_none(document, "document can't be None") check_not_none(document.value, "document value can't be None") @@ -105,7 +103,7 @@ async def search_near_vector( hints=hints, ) - async def remove(self, key: Any) -> Document|None: + async def remove(self, key: Any) -> Document | None: check_not_none(key, "key can't be None") return await self._remove_internal(key) @@ -198,7 +196,7 @@ def _delete_internal(self, key: Any) -> asyncio.Future[None]: request = vector_collection_delete_codec.encode_request(self.name, key_data) return self._invoke_on_key(request, key_data) - def _remove_internal(self, key: Any) -> asyncio.Future[Document|None]: + def _remove_internal(self, key: Any) -> asyncio.Future[Document | None]: def handler(message): doc = vector_collection_remove_codec.decode_response(message) return self._transform_document(doc) @@ -207,7 +205,7 @@ def handler(message): request = vector_collection_remove_codec.encode_request(self.name, key_data) return self._invoke_on_key(request, key_data, response_handler=handler) - def _put_internal(self, key: Any, document: Document) -> asyncio.Future[Document|None]: + def _put_internal(self, key: Any, document: Document) -> asyncio.Future[Document | None]: def handler(message): doc = vector_collection_put_codec.decode_response(message) return self._transform_document(doc) @@ -226,7 +224,9 @@ def handler(message): ) return self._invoke_on_key(request, key_data, response_handler=handler) - def _put_if_absent_internal(self, key: Any, document: Document) -> asyncio.Future[Document|None]: + def _put_if_absent_internal( + self, key: Any, document: Document + ) -> asyncio.Future[Document | None]: def handler(message): doc = vector_collection_put_if_absent_codec.decode_response(message) return self._transform_document(doc) @@ -252,5 +252,6 @@ def _transform_document(self, doc: Optional[Document]) -> Optional[Document]: vec.type = VectorType(vec.type) return doc + async def create_vector_collection_proxy(service_name, name, context): return VectorCollection(service_name, name, context) diff --git a/tests/integration/asyncio/proxy/vector_collection_test.py b/tests/integration/asyncio/proxy/vector_collection_test.py index cb094d4f74..5523821b9e 100644 --- a/tests/integration/asyncio/proxy/vector_collection_test.py +++ b/tests/integration/asyncio/proxy/vector_collection_test.py @@ -40,7 +40,9 @@ async def asyncSetUp(self): await super().asyncSetUp() skip_if_server_version_older_than(self, self.client, "5.5") name = random_string() - await self.client.create_vector_collection_config(name, [IndexConfig("vector", Metric.COSINE, 3)]) + await self.client.create_vector_collection_config( + name, [IndexConfig("vector", Metric.COSINE, 3)] + ) self.vector_collection = await self.client.get_vector_collection(name) async def asyncTearDown(self): From 91bf1d1d9ea318980bc8120540047c1d730b4eac Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Fri, 21 Nov 2025 10:45:58 +0300 Subject: [PATCH 41/51] Addressed review comment --- hazelcast/internal/asyncio_connection.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/hazelcast/internal/asyncio_connection.py b/hazelcast/internal/asyncio_connection.py index c78736cb2a..f89bd39db7 100644 --- a/hazelcast/internal/asyncio_connection.py +++ b/hazelcast/internal/asyncio_connection.py @@ -70,12 +70,12 @@ def __init__(self, initial_backoff, max_backoff, multiplier, cluster_connect_tim def reset(self): self._attempt = 0 - self._cluster_connect_attempt_begin = time.time() + self._cluster_connect_attempt_begin = asyncio.get_running_loop().time() self._current_backoff = min(self._max_backoff, self._initial_backoff) - def sleep(self): + async def sleep(self): self._attempt += 1 - time_passed = time.time() - self._cluster_connect_attempt_begin + time_passed = asyncio.get_running_loop().time() - self._cluster_connect_attempt_begin if time_passed > self._cluster_connect_timeout: _logger.warning( "Unable to get live cluster connection, cluster connect timeout (%s) is reached. " @@ -98,7 +98,7 @@ def sleep(self): self._cluster_connect_timeout_text, self._max_backoff, ) - time.sleep(sleep_time) + await asyncio.sleep(sleep_time) self._current_backoff = min(self._current_backoff * self._multiplier, self._max_backoff) return True @@ -547,7 +547,7 @@ async def _sync_connect_to_cluster(self): if not tried_addresses_per_attempt: self._check_client_active() - if not self._wait_strategy.sleep(): + if not await self._wait_strategy.sleep(): break except (ClientNotAllowedInClusterError, InvalidConfigurationError): cluster_name = self._config.cluster_name From 2128f5e6f0865600c5703a9e2304fb721f565a02 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Fri, 21 Nov 2025 12:52:11 +0300 Subject: [PATCH 42/51] Removed unnecessary code --- hazelcast/internal/asyncio_proxy/base.py | 53 ------------------------ 1 file changed, 53 deletions(-) diff --git a/hazelcast/internal/asyncio_proxy/base.py b/hazelcast/internal/asyncio_proxy/base.py index 60fb8de4ac..e0de6e1183 100644 --- a/hazelcast/internal/asyncio_proxy/base.py +++ b/hazelcast/internal/asyncio_proxy/base.py @@ -104,32 +104,6 @@ def _invoke(self, request, response_handler=_no_op_response_handler): return invocation.future -class TransactionalProxy: - """Provides an interface for all transactional distributed objects.""" - - def __init__(self, name, transaction, context): - self.name = name - self.transaction = transaction - self._invocation_service = context.invocation_service - serialization_service = context.serialization_service - self._to_object = serialization_service.to_object - self._to_data = serialization_service.to_data - self._send_schema_and_retry = context.compact_schema_service.send_schema_and_retry - - def _send_schema(self, error): - return self._send_schema_and_retry(error, lambda: None).result() - - def _invoke(self, request, response_handler=_no_op_response_handler): - invocation = Invocation( - request, connection=self.transaction.connection, response_handler=response_handler - ) - self._invocation_service.invoke(invocation) - return invocation.future.result() - - def __repr__(self): - return '%s(name="%s")' % (type(self).__name__, self.name) - - class ItemEventType: """Type of item events.""" @@ -262,33 +236,6 @@ def __repr__(self): ) -class TopicMessage(typing.Generic[MessageType]): - """Topic message. - - Attributes: - name: Name of the proxy that fired the event. - message: The message sent to Topic. - publish_time: UNIX time that the event is published as seconds. - member: Member that fired the event. - """ - - __slots__ = ("name", "message", "publish_time", "member") - - def __init__(self, name: str, message: MessageType, publish_time: int, member: MemberInfo): - self.name = name - self.message = message - self.publish_time = publish_time - self.member = member - - def __repr__(self): - return "TopicMessage(message=%s, publish_time=%s, topic_name=%s, publishing_member=%s)" % ( - self.message, - self.publish_time, - self.name, - self.member, - ) - - def get_entry_listener_flags(**kwargs): flags = 0 for key, value in kwargs.items(): From 62697e31e9f696841c23a412e6174a6d40a5b05a Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Fri, 21 Nov 2025 17:10:29 +0300 Subject: [PATCH 43/51] Add BETA warning --- hazelcast/asyncio/__init__.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/hazelcast/asyncio/__init__.py b/hazelcast/asyncio/__init__.py index 6137aac760..dc8d7dbbf1 100644 --- a/hazelcast/asyncio/__init__.py +++ b/hazelcast/asyncio/__init__.py @@ -1,2 +1,10 @@ +import warnings + +warnings.warn("Asyncio API for Hazelcast Python Client is in BETA. DO NOT use it in production.") +del warnings + +__all__ = ["HazelcastClient", "Map"] + from hazelcast.asyncio.client import HazelcastClient from hazelcast.internal.asyncio_proxy.map import Map + From a87a5c6fc88d7abd3cba844819739dd306ecc14b Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Fri, 21 Nov 2025 17:12:08 +0300 Subject: [PATCH 44/51] Black --- hazelcast/asyncio/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/hazelcast/asyncio/__init__.py b/hazelcast/asyncio/__init__.py index dc8d7dbbf1..52f7e20844 100644 --- a/hazelcast/asyncio/__init__.py +++ b/hazelcast/asyncio/__init__.py @@ -7,4 +7,3 @@ from hazelcast.asyncio.client import HazelcastClient from hazelcast.internal.asyncio_proxy.map import Map - From 767bfd58d96649bf8d3eedddc18238c783a1dec3 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Mon, 24 Nov 2025 13:25:53 +0300 Subject: [PATCH 45/51] Fix test_map_smart_listener_local_only --- tests/integration/asyncio/smart_listener_test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/asyncio/smart_listener_test.py b/tests/integration/asyncio/smart_listener_test.py index ade74ccd74..18ca118f2e 100644 --- a/tests/integration/asyncio/smart_listener_test.py +++ b/tests/integration/asyncio/smart_listener_test.py @@ -38,8 +38,8 @@ async def test_map_smart_listener_local_only(self): map = await self.client.get_map(random_string()) await map.add_entry_listener(added_func=self.collector) await map.put("key", "value") - self.assert_event_received_once() + await self.assert_event_received_once() - def assert_event_received_once(self): - asyncio.sleep(2) + async def assert_event_received_once(self): + await asyncio.sleep(2) self.assertEqual(1, len(self.collector.events)) From e6736795dc7f65e699e6625184ac6608f66671c7 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Tue, 25 Nov 2025 14:44:23 +0300 Subject: [PATCH 46/51] Updated test_heartbeat_stopped_and_restored --- Makefile | 4 ++-- tests/integration/asyncio/heartbeat_test.py | 12 ++++-------- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/Makefile b/Makefile index d440d03503..e1032e5525 100644 --- a/Makefile +++ b/Makefile @@ -5,10 +5,10 @@ check: black --check --config black.toml . test: - pytest -m "not enterprise" + pytest --verbose -m "not enterprise" test-enterprise: - pytest + pytest --verbose test-cover: pytest --cov=hazelcast --cov-report=xml diff --git a/tests/integration/asyncio/heartbeat_test.py b/tests/integration/asyncio/heartbeat_test.py index 3f9015363a..5fbdd72b3f 100644 --- a/tests/integration/asyncio/heartbeat_test.py +++ b/tests/integration/asyncio/heartbeat_test.py @@ -33,9 +33,7 @@ async def asyncTearDown(self): self.rc.shutdownCluster(self.cluster.id) async def test_heartbeat_stopped_and_restored(self): - member2 = self.rc.startMember(self.cluster.id) - # TODO: remove this - await asyncio.sleep(1) + member2 = await asyncio.to_thread(self.rc.startMember, self.cluster.id) addr = Address(member2.host, member2.port) await wait_for_partition_table(self.client) await open_connection_to_address(self.client, member2.uuid) @@ -56,7 +54,7 @@ def collector(c, *_): ) assertion_succeeded = False - def run(): + async def run(): nonlocal assertion_succeeded # It is possible for client to override the set last_read_time # of the connection, in case of the periodically sent heartbeat @@ -74,10 +72,9 @@ def run(): connection.last_read_time -= 2 break - asyncio.sleep((i + 1) * 0.1) + await asyncio.sleep((i + 1) * 0.1) - simulation_thread = threading.Thread(target=run) - simulation_thread.start() + asyncio.create_task(run()) async def assert_heartbeat_stopped_and_restored(): nonlocal assertion_succeeded @@ -98,4 +95,3 @@ async def assert_heartbeat_stopped_and_restored(): assertion_succeeded = True await self.assertTrueEventually(assert_heartbeat_stopped_and_restored) - simulation_thread.join() From 319bb3500b165e5720e8ee41da7291d3929cc24c Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Tue, 25 Nov 2025 15:52:00 +0300 Subject: [PATCH 47/51] Fixed tests --- hazelcast/internal/asyncio_proxy/base.py | 7 ++-- hazelcast/internal/asyncio_proxy/map.py | 8 ++--- tests/integration/asyncio/listener_test.py | 33 ++++++++++++------- tests/integration/asyncio/predicate_test.py | 2 +- .../asyncio/proxy/map_nearcache_test.py | 2 +- .../asyncio/smart_listener_test.py | 6 ++-- 6 files changed, 33 insertions(+), 25 deletions(-) diff --git a/hazelcast/internal/asyncio_proxy/base.py b/hazelcast/internal/asyncio_proxy/base.py index e0de6e1183..71e4a4a0ea 100644 --- a/hazelcast/internal/asyncio_proxy/base.py +++ b/hazelcast/internal/asyncio_proxy/base.py @@ -41,10 +41,11 @@ async def destroy(self) -> bool: ``True`` if this proxy is destroyed successfully, ``False`` otherwise. """ - self._on_destroy() - return await self._context.proxy_manager.destroy_proxy(self.service_name, self.name) + async with asyncio.TaskGroup() as tg: + tg.create_task(self._on_destroy()) + return await tg.create_task(self._context.proxy_manager.destroy_proxy(self.service_name, self.name)) - def _on_destroy(self): + async def _on_destroy(self): pass def __repr__(self) -> str: diff --git a/hazelcast/internal/asyncio_proxy/map.py b/hazelcast/internal/asyncio_proxy/map.py index 5fd2ffc475..a7e20bed55 100644 --- a/hazelcast/internal/asyncio_proxy/map.py +++ b/hazelcast/internal/asyncio_proxy/map.py @@ -979,8 +979,6 @@ def __init__(self, service_name, name, context): super(MapFeatNearCache, self).__init__(service_name, name, context) self._invalidation_listener_id = None self._near_cache = context.near_cache_manager.get_or_create_near_cache(name) - if self._near_cache.invalidate_on_change: - self._add_near_cache_invalidation_listener() async def clear(self): self._near_cache._clear() @@ -995,10 +993,10 @@ async def load_all(self, keys=None, replace_existing_values=True): self._near_cache.clear() return await super(MapFeatNearCache, self).load_all(keys, replace_existing_values) - def _on_destroy(self): - self._remove_near_cache_invalidation_listener() + async def _on_destroy(self): + await self._remove_near_cache_invalidation_listener() self._near_cache.clear() - super(MapFeatNearCache, self)._on_destroy() + await super(MapFeatNearCache, self)._on_destroy() async def _add_near_cache_invalidation_listener(self): codec = map_add_near_cache_invalidation_listener_codec diff --git a/tests/integration/asyncio/listener_test.py b/tests/integration/asyncio/listener_test.py index 758ed7244e..da13a25438 100644 --- a/tests/integration/asyncio/listener_test.py +++ b/tests/integration/asyncio/listener_test.py @@ -42,16 +42,21 @@ async def asyncTearDown(self): self.rc.terminateCluster(self.cluster.id) self.rc.exit() - @parameterized.expand(LISTENER_TYPES) - async def test_remove_member(self, _, is_smart): + async def test_remove_member_smart(self): + await self._remove_member_test(True) + + async def test_remove_member_unisocket(self): + await self._remove_member_test(False) + + async def _remove_member_test(self, is_smart): self.client_config["smart_routing"] = is_smart client = await self.create_client(self.client_config) await wait_for_partition_table(client) key_m1 = generate_key_owned_by_instance(client, self.m1.uuid) - random_map = await client.get_map(random_string()).blocking() - random_map.add_entry_listener(added_func=self.collector) - self.m1.shutdown() - random_map.put(key_m1, "value2") + random_map = await client.get_map(random_string()) + await random_map.add_entry_listener(added_func=self.collector) + await asyncio.to_thread(self.m1.shutdown) + await random_map.put(key_m1, "value2") def assert_event(): self.assertEqual(1, len(self.collector.events)) @@ -74,16 +79,20 @@ async def asyncTearDown(self): self.rc.terminateCluster(self.cluster.id) self.rc.exit() - @parameterized.expand(LISTENER_TYPES) - async def test_add_member(self, _, is_smart): + async def test_add_member_smart(self): + await self._add_member_test(True) + + async def test_add_member_unisocket(self): + await self._add_member_test(True) + + async def _add_member_test(self, is_smart): self.client_config["smart_routing"] = is_smart client = await self.create_client(self.client_config) - random_map = await client.get_map(random_string()).blocking() - random_map.add_entry_listener(added_func=self.collector, updated_func=self.collector) - m2 = self.cluster.start_member() + random_map = await client.get_map(random_string()) + await random_map.add_entry_listener(added_func=self.collector, updated_func=self.collector) + m2 = await asyncio.to_thread(self.cluster.start_member) await wait_for_partition_table(client) key_m2 = generate_key_owned_by_instance(client, m2.uuid) - assertion_succeeded = False async def run(): diff --git a/tests/integration/asyncio/predicate_test.py b/tests/integration/asyncio/predicate_test.py index f872548037..4b7473a6f3 100644 --- a/tests/integration/asyncio/predicate_test.py +++ b/tests/integration/asyncio/predicate_test.py @@ -200,7 +200,7 @@ async def asyncSetUp(self): await super().asyncSetUp() self.map = await self.client.get_map(random_string()) - async def tearDown(self): + async def asyncTearDown(self): await self.map.destroy() await super().asyncTearDown() diff --git a/tests/integration/asyncio/proxy/map_nearcache_test.py b/tests/integration/asyncio/proxy/map_nearcache_test.py index f0bee878eb..9665ad1a15 100644 --- a/tests/integration/asyncio/proxy/map_nearcache_test.py +++ b/tests/integration/asyncio/proxy/map_nearcache_test.py @@ -170,7 +170,7 @@ def collector(e): self.client = client self.map = map - async def tearDown(self): + async def asyncTearDown(self): await self.client.shutdown() async def test_get_existing_key_from_cache_when_the_cluster_is_down(self): diff --git a/tests/integration/asyncio/smart_listener_test.py b/tests/integration/asyncio/smart_listener_test.py index ade74ccd74..18ca118f2e 100644 --- a/tests/integration/asyncio/smart_listener_test.py +++ b/tests/integration/asyncio/smart_listener_test.py @@ -38,8 +38,8 @@ async def test_map_smart_listener_local_only(self): map = await self.client.get_map(random_string()) await map.add_entry_listener(added_func=self.collector) await map.put("key", "value") - self.assert_event_received_once() + await self.assert_event_received_once() - def assert_event_received_once(self): - asyncio.sleep(2) + async def assert_event_received_once(self): + await asyncio.sleep(2) self.assertEqual(1, len(self.collector.events)) From 8e325ea0226914088b55c42dde08e6024ad41e4a Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Tue, 25 Nov 2025 15:55:05 +0300 Subject: [PATCH 48/51] Linter --- hazelcast/internal/asyncio_proxy/base.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hazelcast/internal/asyncio_proxy/base.py b/hazelcast/internal/asyncio_proxy/base.py index 71e4a4a0ea..4d4ba8b4e1 100644 --- a/hazelcast/internal/asyncio_proxy/base.py +++ b/hazelcast/internal/asyncio_proxy/base.py @@ -41,9 +41,11 @@ async def destroy(self) -> bool: ``True`` if this proxy is destroyed successfully, ``False`` otherwise. """ - async with asyncio.TaskGroup() as tg: + async with asyncio.TaskGroup() as tg: # type: ignore[attr-defined] tg.create_task(self._on_destroy()) - return await tg.create_task(self._context.proxy_manager.destroy_proxy(self.service_name, self.name)) + return await tg.create_task( + self._context.proxy_manager.destroy_proxy(self.service_name, self.name) + ) async def _on_destroy(self): pass From eed53b33e831cabdc60fa6f086cd498a80ca0d36 Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Tue, 25 Nov 2025 16:21:51 +0300 Subject: [PATCH 49/51] Test updates --- tests/integration/asyncio/reconnect_test.py | 39 +++++++++++---------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/tests/integration/asyncio/reconnect_test.py b/tests/integration/asyncio/reconnect_test.py index 5b5887c42a..ed09afabf7 100644 --- a/tests/integration/asyncio/reconnect_test.py +++ b/tests/integration/asyncio/reconnect_test.py @@ -37,7 +37,7 @@ async def test_start_client_with_no_member(self): async def test_start_client_before_member(self): async def run(): await asyncio.sleep(1.0) - self.cluster.start_member() + await asyncio.to_thread(self.cluster.start_member) asyncio.create_task(run()) await self.create_client( @@ -48,7 +48,7 @@ async def run(): ) async def test_restart_member(self): - member = self.cluster.start_member() + member = await asyncio.to_thread(self.cluster.start_member) client = await self.create_client( { "cluster_name": self.cluster.id, @@ -63,30 +63,29 @@ def listener(s): client.lifecycle_service.add_listener(listener) - member.shutdown() + await asyncio.to_thread(member.shutdown) await self.assertTrueEventually( lambda: self.assertEqual(state[0], LifecycleState.DISCONNECTED) ) - self.cluster.start_member() + await asyncio.to_thread(self.cluster.start_member) await self.assertTrueEventually( lambda: self.assertEqual(state[0], LifecycleState.CONNECTED) ) async def test_listener_re_register(self): - member = self.cluster.start_member() + member = await asyncio.to_thread(self.cluster.start_member) client = await self.create_client( { "cluster_name": self.cluster.id, "cluster_connect_timeout": 5.0, } ) - map = await client.get_map("map") collector = event_collector() reg_id = await map.add_entry_listener(added_func=collector) self.logger.info("Registered listener with id %s", reg_id) - member.shutdown() - self.cluster.start_member() + await asyncio.to_thread(member.shutdown) + await asyncio.to_thread(self.cluster.start_member) count = AtomicInteger() async def assert_events(): @@ -104,15 +103,15 @@ async def assert_events(): await self.assertTrueEventually(assert_events) async def test_member_list_after_reconnect(self): - old_member = self.cluster.start_member() + old_member = await asyncio.to_thread(self.cluster.start_member) client = await self.create_client( { "cluster_name": self.cluster.id, "cluster_connect_timeout": 5.0, } ) - old_member.shutdown() - new_member = self.cluster.start_member() + await asyncio.to_thread(old_member.shutdown) + new_member = await asyncio.to_thread(self.cluster.start_member) def assert_member_list(): members = client.cluster_service.get_members() @@ -122,7 +121,7 @@ def assert_member_list(): await self.assertTrueEventually(assert_member_list) async def test_reconnect_toNewNode_ViaLastMemberList(self): - old_member = self.cluster.start_member() + old_member = await asyncio.to_thread(self.cluster.start_member) client = await self.create_client( { "cluster_name": self.cluster.id, @@ -133,8 +132,8 @@ async def test_reconnect_toNewNode_ViaLastMemberList(self): "cluster_connect_timeout": 10.0, } ) - new_member = self.cluster.start_member() - old_member.shutdown() + new_member = await asyncio.to_thread(self.cluster.start_member) + await asyncio.to_thread(old_member.shutdown) def assert_member_list(): members = client.cluster_service.get_members() @@ -201,8 +200,10 @@ async def test_listeners_after_client_disconnected_with_member_ip_client_hostnam await self._verify_listeners_after_client_disconnected("127.0.0.1", "localhost") async def _verify_connection_count_after_reconnect(self, member_address, client_address): - cluster = self.create_cluster(self.rc, self._create_cluster_config(member_address)) - member = cluster.start_member() + cluster = await asyncio.to_thread( + self.create_cluster, self.rc, self._create_cluster_config(member_address) + ) + member = await asyncio.to_thread(cluster.start_member) disconnected = asyncio.Event() reconnected = asyncio.Event() @@ -225,13 +226,13 @@ def listener(state): await self.assertTrueEventually( lambda: self.assertEqual(1, len(client._connection_manager.active_connections)) ) - member.shutdown() + await asyncio.to_thread(member.shutdown) await self.assertTrueEventually(lambda: self.assertTrue(disconnected.is_set())) - cluster.start_member() + await asyncio.to_thread(cluster.start_member) await self.assertTrueEventually(lambda: self.assertTrue(reconnected.is_set())) self.assertEqual(1, len(client._connection_manager.active_connections)) await client.shutdown() - self.rc.terminateCluster(cluster.id) + await asyncio.to_thread(self.rc.terminateCluster, cluster.id) async def _verify_listeners_after_client_disconnected(self, member_address, client_address): heartbeat_seconds = 2 From f61ec8ed4751b2f70bd1ac53d0550a34b695c9dd Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Wed, 26 Nov 2025 10:34:07 +0300 Subject: [PATCH 50/51] Test updates --- tests/integration/asyncio/proxy/vector_collection_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/asyncio/proxy/vector_collection_test.py b/tests/integration/asyncio/proxy/vector_collection_test.py index 5523821b9e..d6db835782 100644 --- a/tests/integration/asyncio/proxy/vector_collection_test.py +++ b/tests/integration/asyncio/proxy/vector_collection_test.py @@ -285,7 +285,7 @@ async def test_merge_policy_can_be_sent(self): merge_batch_size=1000, ) # validation happens when the collection proxy is created - self.client.get_vector_collection(name) + await self.client.get_vector_collection(name) async def test_wrong_merge_policy_fails(self): skip_if_client_version_older_than(self, "6.0") From 1ca7fd6a0e79f8c0db1a78a081a1c55745e1940e Mon Sep 17 00:00:00 2001 From: Yuce Tekol Date: Wed, 26 Nov 2025 15:27:42 +0300 Subject: [PATCH 51/51] Addressed review comment --- hazelcast/internal/asyncio_connection.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/hazelcast/internal/asyncio_connection.py b/hazelcast/internal/asyncio_connection.py index ed2837409c..f747a53110 100644 --- a/hazelcast/internal/asyncio_connection.py +++ b/hazelcast/internal/asyncio_connection.py @@ -340,7 +340,6 @@ async def on_connection_close(self, closed_connection): if removed: async with asyncio.TaskGroup() as tg: - # TODO: see on_connection_open for _, on_connection_closed in self._connection_listeners: if on_connection_closed: try: @@ -472,7 +471,6 @@ async def run(): connecting_uuids.add(member_uuid) if not self._lifecycle_service.running: break - # TODO: ERROR:asyncio:Task was destroyed but it is pending! tg.create_task(self._get_or_connect_to_member(member)) member_uuids.append(member_uuid) @@ -705,8 +703,6 @@ async def _handle_successful_auth(self, response, connection): for on_connection_opened, _ in self._connection_listeners: if on_connection_opened: try: - # TODO: creating the task may not throw the exception - # TODO: protect the loop against exceptions, so all handlers run maybe_coro = on_connection_opened(connection) if isinstance(maybe_coro, Coroutine): tg.create_task(maybe_coro)