From a7eb1682f8903a0d70cf28b5547dbc763c48aef3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Sun, 28 Sep 2025 20:32:54 +0800 Subject: [PATCH 01/32] feat: define mem-read schedular message&consumer; add async mem-reader mode in core; --- src/memos/mem_os/core.py | 14 +++++++ src/memos/mem_reader/base.py | 2 +- src/memos/mem_reader/simple_struct.py | 4 +- src/memos/mem_scheduler/general_scheduler.py | 5 +++ .../mem_scheduler/schemas/general_schemas.py | 1 + src/memos/memories/textual/base.py | 3 ++ src/memos/memories/textual/general.py | 2 + src/memos/memories/textual/naive.py | 2 + src/memos/memories/textual/tree.py | 30 +++---------- .../tree_text_memory/organize/manager.py | 42 ++++++++----------- 10 files changed, 54 insertions(+), 51 deletions(-) diff --git a/src/memos/mem_os/core.py b/src/memos/mem_os/core.py index 54e507b50..0055e8953 100644 --- a/src/memos/mem_os/core.py +++ b/src/memos/mem_os/core.py @@ -17,6 +17,7 @@ from memos.mem_scheduler.schemas.general_schemas import ( ADD_LABEL, ANSWER_LABEL, + MEM_READ_LABEL, QUERY_LABEL, ) from memos.mem_scheduler.schemas.message_schemas import ScheduleMessageItem @@ -695,6 +696,7 @@ def add( logger.info( f"time add: messages is not None and enable_textual_memory and text_mem is not None time user_id: {target_user_id} time is: {time.time() - time_start_1}" ) + sync_mode = self.mem_cubes[mem_cube_id].text_mem.mode if self.mem_cubes[mem_cube_id].config.text_mem.backend != "tree_text": add_memory = [] metadata = TextualMemoryMetadata( @@ -712,6 +714,7 @@ def add( messages_list, type="chat", info={"user_id": target_user_id, "session_id": target_session_id}, + mode="fast" if sync_mode == "async" else "fine", ) logger.info( f"time add: get mem_reader time user_id: {target_user_id} time is: {time.time() - time_start_2}" @@ -724,6 +727,17 @@ def add( f"Added memory user {target_user_id} to memcube {mem_cube_id}: {mem_id_list}" ) + if sync_mode == "async" and self.mem_scheduler is not None: + message_item = ScheduleMessageItem( + user_id=target_user_id, + mem_cube_id=mem_cube_id, + mem_cube=self.mem_cubes[mem_cube_id], + label=MEM_READ_LABEL, + content={json.dumps(mem_ids)}, + timestamp=datetime.utcnow(), + ) + self.mem_scheduler.submit_messages(messages=[message_item]) + # submit messages for scheduler if self.enable_mem_scheduler and self.mem_scheduler is not None: mem_cube = self.mem_cubes[mem_cube_id] diff --git a/src/memos/mem_reader/base.py b/src/memos/mem_reader/base.py index f092c3870..ba8be8652 100644 --- a/src/memos/mem_reader/base.py +++ b/src/memos/mem_reader/base.py @@ -18,7 +18,7 @@ def get_scene_data_info(self, scene_data: list, type: str) -> list[str]: @abstractmethod def get_memory( - self, scene_data: list, type: str, info: dict[str, Any] + self, scene_data: list, type: str, info: dict[str, Any], mode: str = "fast" ) -> list[list[TextualMemoryItem]]: """Various types of memories extracted from scene_data""" diff --git a/src/memos/mem_reader/simple_struct.py b/src/memos/mem_reader/simple_struct.py index b439cb2b2..2d20453ab 100644 --- a/src/memos/mem_reader/simple_struct.py +++ b/src/memos/mem_reader/simple_struct.py @@ -202,7 +202,7 @@ def _process_chat_data(self, scene_data_info, info): return chat_read_nodes def get_memory( - self, scene_data: list, type: str, info: dict[str, Any] + self, scene_data: list, type: str, info: dict[str, Any], mode: str = "fast" ) -> list[list[TextualMemoryItem]]: """ Extract and classify memory content from scene_data. @@ -219,6 +219,8 @@ def get_memory( - topic_chunk_overlap: Overlap for large topic chunks (default: 100) - chunk_size: Size for small chunks (default: 256) - chunk_overlap: Overlap for small chunks (default: 50) + mode: mem-reader mode, fast for quick process while fine for + better understanding via calling llm Returns: list[list[TextualMemoryItem]] containing memory content with summaries as keys and original text as values Raises: diff --git a/src/memos/mem_scheduler/general_scheduler.py b/src/memos/mem_scheduler/general_scheduler.py index 340400abf..c85bdb756 100644 --- a/src/memos/mem_scheduler/general_scheduler.py +++ b/src/memos/mem_scheduler/general_scheduler.py @@ -8,6 +8,7 @@ ADD_LABEL, ANSWER_LABEL, DEFAULT_MAX_QUERY_KEY_WORDS, + MEM_READ_LABEL, QUERY_LABEL, WORKING_MEMORY_TYPE, MemCubeID, @@ -34,6 +35,7 @@ def __init__(self, config: GeneralSchedulerConfig): QUERY_LABEL: self._query_message_consumer, ANSWER_LABEL: self._answer_message_consumer, ADD_LABEL: self._add_message_consumer, + MEM_READ_LABEL: self._mem_read_message_consumer, } self.dispatcher.register_handlers(handlers) @@ -216,6 +218,9 @@ def _add_message_consumer(self, messages: list[ScheduleMessageItem]) -> None: except Exception as e: logger.error(f"Error: {e}", exc_info=True) + def _mem_read_message_consumer(self, messages: list[ScheduleMessageItem]) -> None: + logger.info(f"Messages {messages} assigned to {ADD_LABEL} handler.") + def process_session_turn( self, queries: str | list[str], diff --git a/src/memos/mem_scheduler/schemas/general_schemas.py b/src/memos/mem_scheduler/schemas/general_schemas.py index a81caf5a8..ac24cd2ee 100644 --- a/src/memos/mem_scheduler/schemas/general_schemas.py +++ b/src/memos/mem_scheduler/schemas/general_schemas.py @@ -8,6 +8,7 @@ QUERY_LABEL = "query" ANSWER_LABEL = "answer" ADD_LABEL = "add" +MEM_READ_LABEL = "mem_read" TreeTextMemory_SEARCH_METHOD = "tree_text_memory_search" TreeTextMemory_FINE_SEARCH_METHOD = "tree_text_memory_fine_search" diff --git a/src/memos/memories/textual/base.py b/src/memos/memories/textual/base.py index 8171fadce..26efb1cb3 100644 --- a/src/memos/memories/textual/base.py +++ b/src/memos/memories/textual/base.py @@ -10,6 +10,9 @@ class BaseTextMemory(BaseMemory): """Base class for all textual memory implementations.""" + # Default mode configuration - can be overridden by subclasses + mode: str = "sync" # Default mode: 'async' or 'sync' + @abstractmethod def __init__(self, config: BaseTextMemoryConfig): """Initialize memory with the given configuration.""" diff --git a/src/memos/memories/textual/general.py b/src/memos/memories/textual/general.py index 9793224b5..d71a86d2e 100644 --- a/src/memos/memories/textual/general.py +++ b/src/memos/memories/textual/general.py @@ -26,6 +26,8 @@ class GeneralTextMemory(BaseTextMemory): def __init__(self, config: GeneralTextMemoryConfig): """Initialize memory with the given configuration.""" + # Set mode from class default or override if needed + self.mode = getattr(self.__class__, "mode", "sync") self.config: GeneralTextMemoryConfig = config self.extractor_llm: OpenAILLM | OllamaLLM | AzureLLM = LLMFactory.from_config( config.extractor_llm diff --git a/src/memos/memories/textual/naive.py b/src/memos/memories/textual/naive.py index f8684729a..7bc49e767 100644 --- a/src/memos/memories/textual/naive.py +++ b/src/memos/memories/textual/naive.py @@ -61,6 +61,8 @@ class NaiveTextMemory(BaseTextMemory): def __init__(self, config: NaiveTextMemoryConfig): """Initialize memory with the given configuration.""" + # Set mode from class default or override if needed + self.mode = getattr(self.__class__, "mode", "sync") self.config = config self.extractor_llm = LLMFactory.from_config(config.extractor_llm) self.memories = [] diff --git a/src/memos/memories/textual/tree.py b/src/memos/memories/textual/tree.py index f324f41c9..7196738d8 100644 --- a/src/memos/memories/textual/tree.py +++ b/src/memos/memories/textual/tree.py @@ -2,7 +2,6 @@ import os import shutil import tempfile -import time from datetime import datetime from pathlib import Path @@ -31,30 +30,22 @@ class TreeTextMemory(BaseTextMemory): """General textual memory implementation for storing and retrieving memories.""" + # Override the default mode to async for TreeTextMemory + mode: str = "async" + def __init__(self, config: TreeTextMemoryConfig): """Initialize memory with the given configuration.""" - time_start = time.time() + # Set mode from class default or override if needed + self.mode = getattr(self.__class__, "mode", "async") self.config: TreeTextMemoryConfig = config self.extractor_llm: OpenAILLM | OllamaLLM | AzureLLM = LLMFactory.from_config( config.extractor_llm ) - logger.info(f"time init: extractor_llm time is: {time.time() - time_start}") - - time_start_ex = time.time() self.dispatcher_llm: OpenAILLM | OllamaLLM | AzureLLM = LLMFactory.from_config( config.dispatcher_llm ) - logger.info(f"time init: dispatcher_llm time is: {time.time() - time_start_ex}") - - time_start_em = time.time() self.embedder: OllamaEmbedder = EmbedderFactory.from_config(config.embedder) - logger.info(f"time init: embedder time is: {time.time() - time_start_em}") - - time_start_gs = time.time() self.graph_store: Neo4jGraphDB = GraphStoreFactory.from_config(config.graph_db) - logger.info(f"time init: graph_store time is: {time.time() - time_start_gs}") - - time_start_rr = time.time() if config.reranker is None: default_cfg = RerankerConfigFactory.model_validate( { @@ -68,10 +59,7 @@ def __init__(self, config: TreeTextMemoryConfig): self.reranker = RerankerFactory.from_config(default_cfg) else: self.reranker = RerankerFactory.from_config(config.reranker) - logger.info(f"time init: reranker time is: {time.time() - time_start_rr}") self.is_reorganize = config.reorganize - - time_start_mm = time.time() self.memory_manager: MemoryManager = MemoryManager( self.graph_store, self.embedder, @@ -84,8 +72,6 @@ def __init__(self, config: TreeTextMemoryConfig): }, is_reorganize=self.is_reorganize, ) - logger.info(f"time init: memory_manager time is: {time.time() - time_start_mm}") - time_start_ir = time.time() # Create internet retriever if configured self.internet_retriever = None if config.internet_retriever is not None: @@ -97,17 +83,11 @@ def __init__(self, config: TreeTextMemoryConfig): ) else: logger.info("No internet retriever configured") - logger.info(f"time init: internet_retriever time is: {time.time() - time_start_ir}") def add(self, memories: list[TextualMemoryItem | dict[str, Any]]) -> list[str]: """Add memories. Args: memories: List of TextualMemoryItem objects or dictionaries to add. - Later: - memory_items = [TextualMemoryItem(**m) if isinstance(m, dict) else m for m in memories] - metadata = extract_metadata(memory_items, self.extractor_llm) - plan = plan_memory_operations(memory_items, metadata, self.graph_store) - execute_plan(memory_items, metadata, plan, self.graph_store) """ return self.memory_manager.add(memories) diff --git a/src/memos/memories/textual/tree_text_memory/organize/manager.py b/src/memos/memories/textual/tree_text_memory/organize/manager.py index 5cc714806..0e86fe41f 100644 --- a/src/memos/memories/textual/tree_text_memory/organize/manager.py +++ b/src/memos/memories/textual/tree_text_memory/organize/manager.py @@ -1,3 +1,4 @@ +import asyncio import traceback import uuid @@ -54,7 +55,7 @@ def __init__( def add(self, memories: list[TextualMemoryItem]) -> list[str]: """ - Add new memories in parallel to different memory types (WorkingMemory, LongTermMemory, UserMemory). + Add new memories in parallel to different memory types. """ added_ids: list[str] = [] @@ -66,29 +67,6 @@ def add(self, memories: list[TextualMemoryItem]) -> list[str]: added_ids.extend(ids) except Exception as e: logger.exception("Memory processing error: ", exc_info=e) - - try: - self.graph_store.remove_oldest_memory( - memory_type="WorkingMemory", keep_latest=self.memory_size["WorkingMemory"] - ) - except Exception: - logger.warning(f"Remove WorkingMemory error: {traceback.format_exc()}") - - try: - self.graph_store.remove_oldest_memory( - memory_type="LongTermMemory", keep_latest=self.memory_size["LongTermMemory"] - ) - except Exception: - logger.warning(f"Remove LongTermMemory error: {traceback.format_exc()}") - - try: - self.graph_store.remove_oldest_memory( - memory_type="UserMemory", keep_latest=self.memory_size["UserMemory"] - ) - except Exception: - logger.warning(f"Remove UserMemory error: {traceback.format_exc()}") - - self._refresh_memory_size() return added_ids def replace_working_memory(self, memories: list[TextualMemoryItem]) -> None: @@ -266,6 +244,22 @@ def _ensure_structure_path( # Step 3: Return this structure node ID as the parent_id return node_id + async def _remove_and_refresh_memory(self): + remove_tasks = [ + self._remove_oldest_memory_async("WorkingMemory", self.memory_size["WorkingMemory"]), + self._remove_oldest_memory_async("LongTermMemory", self.memory_size["LongTermMemory"]), + self._remove_oldest_memory_async("UserMemory", self.memory_size["UserMemory"]), + ] + await asyncio.gather(*remove_tasks) + await asyncio.to_thread(self._refresh_memory_size) + print("finished remove and refresh memory") + + async def _remove_oldest_memory_async(self, memory_type: str, memory_size: int): + try: + await asyncio.to_thread(self.graph_store.remove_oldest_memory, memory_type, memory_size) + except Exception: + logger.warning(f"Remove {memory_type} error: {traceback.format_exc()}") + def wait_reorganizer(self): """ Wait for the reorganizer to finish processing all messages. From 8a24ec741e8a4c21850a4800fe3dc39585f3862c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Sun, 28 Sep 2025 21:10:44 +0800 Subject: [PATCH 02/32] feat: add fast/fine mode in mem-reader; --- src/memos/mem_reader/simple_struct.py | 10 ++++-- src/memos/mem_scheduler/general_scheduler.py | 36 +++++++++++++++++++- 2 files changed, 43 insertions(+), 3 deletions(-) diff --git a/src/memos/mem_reader/simple_struct.py b/src/memos/mem_reader/simple_struct.py index 2d20453ab..a0e4c061e 100644 --- a/src/memos/mem_reader/simple_struct.py +++ b/src/memos/mem_reader/simple_struct.py @@ -128,7 +128,10 @@ def __init__(self, config: SimpleStructMemReaderConfig): self.chunker = ChunkerFactory.from_config(config.chunker) @timed - def _process_chat_data(self, scene_data_info, info): + def _process_chat_data(self, scene_data_info, info, **kwargs): + mode = kwargs.get("mode", "fine") + if mode == "fast": + raise NotImplementedError mem_list = [] for item in scene_data_info: if "chat_time" in item: @@ -255,7 +258,7 @@ def get_memory( # Process Q&A pairs concurrently with context propagation with ContextThreadPoolExecutor() as executor: futures = [ - executor.submit(processing_func, scene_data_info, info) + executor.submit(processing_func, scene_data_info, info, mode) for scene_data_info in list_scene_data_info ] for future in concurrent.futures.as_completed(futures): @@ -319,6 +322,9 @@ def get_scene_data_info(self, scene_data: list, type: str) -> list[str]: return results def _process_doc_data(self, scene_data_info, info, **kwargs): + mode = kwargs.get("mode", "fine") + if mode == "fast": + raise NotImplementedError chunks = self.chunker.chunk(scene_data_info["text"]) messages = [] for chunk in chunks: diff --git a/src/memos/mem_scheduler/general_scheduler.py b/src/memos/mem_scheduler/general_scheduler.py index c85bdb756..a0056d98c 100644 --- a/src/memos/mem_scheduler/general_scheduler.py +++ b/src/memos/mem_scheduler/general_scheduler.py @@ -219,7 +219,41 @@ def _add_message_consumer(self, messages: list[ScheduleMessageItem]) -> None: logger.error(f"Error: {e}", exc_info=True) def _mem_read_message_consumer(self, messages: list[ScheduleMessageItem]) -> None: - logger.info(f"Messages {messages} assigned to {ADD_LABEL} handler.") + logger.info(f"Messages {messages} assigned to {MEM_READ_LABEL} handler.") + + for message in messages: + try: + user_id = message.user_id + mem_cube_id = message.mem_cube_id + mem_cube = message.mem_cube + content = message.content + + # Parse the memory IDs from content + mem_ids = json.loads(content) if isinstance(content, str) else content + + logger.info( + f"Processing mem_read for user_id={user_id}, mem_cube_id={mem_cube_id}, mem_ids={mem_ids}" + ) + + # Get the text memory from the mem_cube + text_mem = mem_cube.text_mem + if not isinstance(text_mem, TreeTextMemory): + logger.error(f"Expected TreeTextMemory but got {type(text_mem).__name__}") + continue + + # Process the memory reading/retrieval logic here + # This could include: + # 1. Triggering memory reorganization + # 2. Updating memory relationships + # 3. Performing additional memory processing + + # For now, just log the successful processing + logger.info( + f"Successfully processed mem_read for user_id={user_id}, mem_cube_id={mem_cube_id}" + ) + + except Exception as e: + logger.error(f"Error processing mem_read message: {e}", exc_info=True) def process_session_turn( self, From 81915a33f1899050be3b0e0a104dd4057d8a22bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Mon, 29 Sep 2025 11:55:16 +0800 Subject: [PATCH 03/32] feat: add mem-reader in scheduler --- src/memos/mem_os/core.py | 3 +- src/memos/mem_scheduler/base_scheduler.py | 1 + src/memos/mem_scheduler/general_scheduler.py | 138 ++++++++++++++++++- 3 files changed, 135 insertions(+), 7 deletions(-) diff --git a/src/memos/mem_os/core.py b/src/memos/mem_os/core.py index 0055e8953..261a18d12 100644 --- a/src/memos/mem_os/core.py +++ b/src/memos/mem_os/core.py @@ -71,6 +71,7 @@ def __init__(self, config: MOSConfig, user_manager: UserManager | None = None): if self.enable_mem_scheduler: self._mem_scheduler = self._initialize_mem_scheduler() self._mem_scheduler.mem_cubes = self.mem_cubes + self._mem_scheduler.mem_reader = self.mem_reader else: self._mem_scheduler: GeneralScheduler = None @@ -733,7 +734,7 @@ def add( mem_cube_id=mem_cube_id, mem_cube=self.mem_cubes[mem_cube_id], label=MEM_READ_LABEL, - content={json.dumps(mem_ids)}, + content=json.dumps(mem_ids), timestamp=datetime.utcnow(), ) self.mem_scheduler.submit_messages(messages=[message_item]) diff --git a/src/memos/mem_scheduler/base_scheduler.py b/src/memos/mem_scheduler/base_scheduler.py index b6ef00d8d..0a8c43d78 100644 --- a/src/memos/mem_scheduler/base_scheduler.py +++ b/src/memos/mem_scheduler/base_scheduler.py @@ -67,6 +67,7 @@ def __init__(self, config: BaseSchedulerConfig): self.db_engine: Engine | None = None self.monitor: SchedulerGeneralMonitor | None = None self.dispatcher_monitor: SchedulerDispatcherMonitor | None = None + self.mem_reader = None # Will be set by MOSCore self.dispatcher = SchedulerDispatcher( max_workers=self.thread_pool_max_workers, enable_parallel_dispatch=self.enable_parallel_dispatch, diff --git a/src/memos/mem_scheduler/general_scheduler.py b/src/memos/mem_scheduler/general_scheduler.py index a0056d98c..2058f1c87 100644 --- a/src/memos/mem_scheduler/general_scheduler.py +++ b/src/memos/mem_scheduler/general_scheduler.py @@ -241,13 +241,15 @@ def _mem_read_message_consumer(self, messages: list[ScheduleMessageItem]) -> Non logger.error(f"Expected TreeTextMemory but got {type(text_mem).__name__}") continue - # Process the memory reading/retrieval logic here - # This could include: - # 1. Triggering memory reorganization - # 2. Updating memory relationships - # 3. Performing additional memory processing + # Use mem_reader to process the memories + self._process_memories_with_reader( + mem_ids=mem_ids, + user_id=user_id, + mem_cube_id=mem_cube_id, + mem_cube=mem_cube, + text_mem=text_mem, + ) - # For now, just log the successful processing logger.info( f"Successfully processed mem_read for user_id={user_id}, mem_cube_id={mem_cube_id}" ) @@ -255,6 +257,130 @@ def _mem_read_message_consumer(self, messages: list[ScheduleMessageItem]) -> Non except Exception as e: logger.error(f"Error processing mem_read message: {e}", exc_info=True) + def _process_memories_with_reader( + self, + mem_ids: list[str], + user_id: str, + mem_cube_id: str, + mem_cube: GeneralMemCube, + text_mem: TreeTextMemory, + ) -> None: + """ + Process memories using mem_reader for enhanced memory processing. + + Args: + mem_ids: List of memory IDs to process + user_id: User ID + mem_cube_id: Memory cube ID + mem_cube: Memory cube instance + text_mem: Text memory instance + """ + try: + # Get the mem_reader from the parent MOSCore + if not hasattr(self, "mem_reader") or self.mem_reader is None: + logger.warning( + "mem_reader not available in scheduler, skipping enhanced processing" + ) + return + + # Get the original memory items + memory_items = [] + for mem_id in mem_ids: + try: + memory_item = text_mem.get(mem_id) + memory_items.append(memory_item) + except Exception as e: + logger.warning(f"Failed to get memory {mem_id}: {e}") + continue + + if not memory_items: + logger.warning("No valid memory items found for processing") + return + + # Prepare scene data for mem_reader + scene_data = [] + for memory_item in memory_items: + scene_data.append( + { + "role": "user", # or determine from metadata + "content": memory_item.memory, + "chat_time": memory_item.metadata.updated_at + or memory_item.metadata.created_at, + } + ) + + # Use mem_reader to process the memories + logger.info(f"Processing {len(scene_data)} memories with mem_reader") + + # Extract memories using mem_reader + processed_memories = self.mem_reader.get_memory( + scene_data=scene_data, + type="chat", + info={"user_id": user_id, "session_id": "", "mem_cube_id": mem_cube_id}, + mode="fast", # Use fast mode for async processing + ) + + if processed_memories and len(processed_memories) > 0: + # Flatten the results (mem_reader returns list of lists) + flattened_memories = [] + for memory_list in processed_memories: + flattened_memories.extend(memory_list) + + logger.info(f"mem_reader processed {len(flattened_memories)} enhanced memories") + + # Add the enhanced memories back to the memory system + if flattened_memories: + enhanced_mem_ids = text_mem.add(flattened_memories) + logger.info( + f"Added {len(enhanced_mem_ids)} enhanced memories: {enhanced_mem_ids}" + ) + + # Trigger memory reorganization if needed + self._trigger_memory_reorganization( + text_mem=text_mem, user_id=user_id, mem_cube_id=mem_cube_id + ) + else: + logger.info("No enhanced memories generated by mem_reader") + else: + logger.info("mem_reader returned no processed memories") + + except Exception as e: + logger.error(f"Error in _process_memories_with_reader: {e}", exc_info=True) + + def _trigger_memory_reorganization( + self, + text_mem: TreeTextMemory, + user_id: str, + mem_cube_id: str, + ) -> None: + """ + Trigger memory reorganization after enhanced processing. + + Args: + text_mem: Text memory instance + user_id: User ID + mem_cube_id: Memory cube ID + """ + try: + # Check if reorganization is enabled + if hasattr(text_mem, "is_reorganize") and text_mem.is_reorganize: + logger.info( + f"Triggering memory reorganization for user_id={user_id}, mem_cube_id={mem_cube_id}" + ) + + # Get current working memory size + current_sizes = text_mem.get_current_memory_size() + logger.info(f"Current memory sizes: {current_sizes}") + + # The reorganization will be handled by the memory manager + # This is just a trigger point for logging and monitoring + logger.info("Memory reorganization triggered successfully") + else: + logger.info("Memory reorganization is disabled, skipping reorganization trigger") + + except Exception as e: + logger.error(f"Error in _trigger_memory_reorganization: {e}", exc_info=True) + def process_session_turn( self, queries: str | list[str], From b5086e77c8a4a1c12aae3d94a321d19cf06a5856 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Mon, 13 Oct 2025 20:58:18 +0800 Subject: [PATCH 04/32] feat: change async remove --- .../tree_text_memory/organize/manager.py | 58 +++++++------------ 1 file changed, 21 insertions(+), 37 deletions(-) diff --git a/src/memos/memories/textual/tree_text_memory/organize/manager.py b/src/memos/memories/textual/tree_text_memory/organize/manager.py index 6be610761..53b01fd78 100644 --- a/src/memos/memories/textual/tree_text_memory/organize/manager.py +++ b/src/memos/memories/textual/tree_text_memory/organize/manager.py @@ -67,33 +67,8 @@ def add(self, memories: list[TextualMemoryItem]) -> list[str]: added_ids.extend(ids) except Exception as e: logger.exception("Memory processing error: ", exc_info=e) - - # Only clean up if we're close to or over the limit - self._cleanup_memories_if_needed() - self._refresh_memory_size() return added_ids - def _cleanup_memories_if_needed(self) -> None: - """ - Only clean up memories if we're close to or over the limit. - This reduces unnecessary database operations. - """ - cleanup_threshold = 0.8 # Clean up when 80% full - - for memory_type, limit in self.memory_size.items(): - current_count = self.current_memory_size.get(memory_type, 0) - threshold = int(limit * cleanup_threshold) - - # Only clean up if we're at or above the threshold - if current_count >= threshold: - try: - self.graph_store.remove_oldest_memory( - memory_type=memory_type, keep_latest=limit - ) - logger.debug(f"Cleaned up {memory_type}: {current_count} -> {limit}") - except Exception: - logger.warning(f"Remove {memory_type} error: {traceback.format_exc()}") - def replace_working_memory(self, memories: list[TextualMemoryItem]) -> None: """ Replace WorkingMemory @@ -270,20 +245,29 @@ def _ensure_structure_path( return node_id async def _remove_and_refresh_memory(self): - remove_tasks = [ - self._remove_oldest_memory_async("WorkingMemory", self.memory_size["WorkingMemory"]), - self._remove_oldest_memory_async("LongTermMemory", self.memory_size["LongTermMemory"]), - self._remove_oldest_memory_async("UserMemory", self.memory_size["UserMemory"]), - ] - await asyncio.gather(*remove_tasks) + await asyncio.to_thread(self._cleanup_memories_if_needed) await asyncio.to_thread(self._refresh_memory_size) - print("finished remove and refresh memory") - async def _remove_oldest_memory_async(self, memory_type: str, memory_size: int): - try: - await asyncio.to_thread(self.graph_store.remove_oldest_memory, memory_type, memory_size) - except Exception: - logger.warning(f"Remove {memory_type} error: {traceback.format_exc()}") + def _cleanup_memories_if_needed(self) -> None: + """ + Only clean up memories if we're close to or over the limit. + This reduces unnecessary database operations. + """ + cleanup_threshold = 0.8 # Clean up when 80% full + + for memory_type, limit in self.memory_size.items(): + current_count = self.current_memory_size.get(memory_type, 0) + threshold = int(limit * cleanup_threshold) + + # Only clean up if we're at or above the threshold + if current_count >= threshold: + try: + self.graph_store.remove_oldest_memory( + memory_type=memory_type, keep_latest=limit + ) + logger.debug(f"Cleaned up {memory_type}: {current_count} -> {limit}") + except Exception: + logger.warning(f"Remove {memory_type} error: {traceback.format_exc()}") def wait_reorganizer(self): """ From 0b2649d7934d325ffc0660064e86074969b18847 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Tue, 14 Oct 2025 14:39:02 +0800 Subject: [PATCH 05/32] feat: modify async-add in core.py --- src/memos/mem_os/core.py | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/src/memos/mem_os/core.py b/src/memos/mem_os/core.py index 261a18d12..b536ec5b2 100644 --- a/src/memos/mem_os/core.py +++ b/src/memos/mem_os/core.py @@ -698,6 +698,11 @@ def add( f"time add: messages is not None and enable_textual_memory and text_mem is not None time user_id: {target_user_id} time is: {time.time() - time_start_1}" ) sync_mode = self.mem_cubes[mem_cube_id].text_mem.mode + if sync_mode == "async": + assert self.mem_scheduler is not None, ( + "Mem-Scheduler must be working when use synchronous memory adding." + ) + if self.mem_cubes[mem_cube_id].config.text_mem.backend != "tree_text": add_memory = [] metadata = TextualMemoryMetadata( @@ -728,20 +733,20 @@ def add( f"Added memory user {target_user_id} to memcube {mem_cube_id}: {mem_id_list}" ) - if sync_mode == "async" and self.mem_scheduler is not None: - message_item = ScheduleMessageItem( - user_id=target_user_id, - mem_cube_id=mem_cube_id, - mem_cube=self.mem_cubes[mem_cube_id], - label=MEM_READ_LABEL, - content=json.dumps(mem_ids), - timestamp=datetime.utcnow(), - ) - self.mem_scheduler.submit_messages(messages=[message_item]) - # submit messages for scheduler if self.enable_mem_scheduler and self.mem_scheduler is not None: mem_cube = self.mem_cubes[mem_cube_id] + if sync_mode == "async": + message_item = ScheduleMessageItem( + user_id=target_user_id, + mem_cube_id=mem_cube_id, + mem_cube=mem_cube, + label=MEM_READ_LABEL, + content=json.dumps(mem_ids), + timestamp=datetime.utcnow(), + ) + self.mem_scheduler.submit_messages(messages=[message_item]) + message_item = ScheduleMessageItem( user_id=target_user_id, mem_cube_id=mem_cube_id, From 9dd632f0aadf237a331ee1400d2d6ea4f6be2dd2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Tue, 14 Oct 2025 17:26:05 +0800 Subject: [PATCH 06/32] feat: add 'remove and refresh memory in schedular' --- src/memos/mem_reader/simple_struct.py | 2 +- src/memos/mem_scheduler/general_scheduler.py | 7 ++++++- .../memories/textual/tree_text_memory/organize/manager.py | 7 +++---- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/memos/mem_reader/simple_struct.py b/src/memos/mem_reader/simple_struct.py index a0e4c061e..3ecc15e80 100644 --- a/src/memos/mem_reader/simple_struct.py +++ b/src/memos/mem_reader/simple_struct.py @@ -258,7 +258,7 @@ def get_memory( # Process Q&A pairs concurrently with context propagation with ContextThreadPoolExecutor() as executor: futures = [ - executor.submit(processing_func, scene_data_info, info, mode) + executor.submit(processing_func, scene_data_info, info, mode=mode) for scene_data_info in list_scene_data_info ] for future in concurrent.futures.as_completed(futures): diff --git a/src/memos/mem_scheduler/general_scheduler.py b/src/memos/mem_scheduler/general_scheduler.py index 2058f1c87..9e64e7311 100644 --- a/src/memos/mem_scheduler/general_scheduler.py +++ b/src/memos/mem_scheduler/general_scheduler.py @@ -317,7 +317,7 @@ def _process_memories_with_reader( scene_data=scene_data, type="chat", info={"user_id": user_id, "session_id": "", "mem_cube_id": mem_cube_id}, - mode="fast", # Use fast mode for async processing + mode="fine", # Use fast mode for async processing ) if processed_memories and len(processed_memories) > 0: @@ -344,6 +344,11 @@ def _process_memories_with_reader( else: logger.info("mem_reader returned no processed memories") + text_mem.delete(mem_ids) + logger.info("Delete raw mem_ids") + text_mem.memory_manager.remove_and_refresh_memory() + logger.info("Remove and Refresh Memories") + except Exception as e: logger.error(f"Error in _process_memories_with_reader: {e}", exc_info=True) diff --git a/src/memos/memories/textual/tree_text_memory/organize/manager.py b/src/memos/memories/textual/tree_text_memory/organize/manager.py index 53b01fd78..1dd4aee5e 100644 --- a/src/memos/memories/textual/tree_text_memory/organize/manager.py +++ b/src/memos/memories/textual/tree_text_memory/organize/manager.py @@ -1,4 +1,3 @@ -import asyncio import traceback import uuid @@ -244,9 +243,9 @@ def _ensure_structure_path( # Step 3: Return this structure node ID as the parent_id return node_id - async def _remove_and_refresh_memory(self): - await asyncio.to_thread(self._cleanup_memories_if_needed) - await asyncio.to_thread(self._refresh_memory_size) + def remove_and_refresh_memory(self): + self._cleanup_memories_if_needed() + self._refresh_memory_size() def _cleanup_memories_if_needed(self) -> None: """ From 8c970587f92030e8564033100a9a9f1c6161b975 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Tue, 14 Oct 2025 21:56:33 +0800 Subject: [PATCH 07/32] feat: add naive fast mode in mem-reader --- src/memos/chunkers/sentence_chunker.py | 5 +- src/memos/mem_reader/simple_struct.py | 217 ++++++++++++++++++++++--- 2 files changed, 201 insertions(+), 21 deletions(-) diff --git a/src/memos/chunkers/sentence_chunker.py b/src/memos/chunkers/sentence_chunker.py index 4de0cf32b..c499a49d2 100644 --- a/src/memos/chunkers/sentence_chunker.py +++ b/src/memos/chunkers/sentence_chunker.py @@ -28,8 +28,11 @@ def __init__(self, config: SentenceChunkerConfig): ) logger.info(f"Initialized SentenceChunker with config: {config}") - def chunk(self, text: str) -> list[Chunk]: + def chunk(self, text: str) -> list[str] | list[Chunk]: """Chunk the given text into smaller chunks based on sentences.""" + if len(text) <= self.config.chunk_size: + return [text] + chonkie_chunks = self.chunker.chunk(text) chunks = [] diff --git a/src/memos/mem_reader/simple_struct.py b/src/memos/mem_reader/simple_struct.py index 3ecc15e80..cc979bb41 100644 --- a/src/memos/mem_reader/simple_struct.py +++ b/src/memos/mem_reader/simple_struct.py @@ -112,6 +112,14 @@ def _build_node(idx, message, info, scene_file, llm, parse_json_result, embedder return None +def _derive_key(text: str, max_len: int = 80) -> str: + """default key when without LLM: first max_len words""" + if not text: + return "" + sent = re.split(r"[。!?!?]\s*|\n", text.strip())[0] + return (sent[:max_len]).strip() + + class SimpleStructMemReader(BaseMemReader, ABC): """Naive implementation of MemReader.""" @@ -127,11 +135,186 @@ def __init__(self, config: SimpleStructMemReaderConfig): self.embedder = EmbedderFactory.from_config(config.embedder) self.chunker = ChunkerFactory.from_config(config.chunker) + def _make_memory_item( + self, + value: str, + info: dict, + memory_type: str, + tags: list[str] | None = None, + key: str | None = None, + sources: list | None = None, + background: str = "", + type_: str = "fact", + confidence: float = 0.99, + ) -> TextualMemoryItem: + """construct memory item""" + return TextualMemoryItem( + memory=value, + metadata=TreeNodeTextualMemoryMetadata( + user_id=info.get("user_id", ""), + session_id=info.get("session_id", ""), + memory_type=memory_type, + status="activated", + tags=tags or [], + key=key if key is not None else _derive_key(value), + embedding=self.embedder.embed([value])[0], + usage=[], + sources=sources or [], + background=background, + confidence=confidence, + type=type_, + ), + ) + @timed def _process_chat_data(self, scene_data_info, info, **kwargs): mode = kwargs.get("mode", "fine") if mode == "fast": - raise NotImplementedError + # 使用合并逻辑处理短消息 + raw_content_list = [] + current_content = "" + current_roles = set() + current_sources = [] + current_idx = 0 + + for idx, item in enumerate(scene_data_info): + try: + role = item.get("role", "user") + content = item.get("content", "") + chat_time = item.get("chat_time", None) + + prefix = f"{role}: " + (f"[{chat_time}]: " if chat_time else "") + mem = f"{prefix}{content}\n" + + if len(mem) > 2000: + if current_content: + raw_content_list.append( + { + "text": current_content, + "roles": current_roles, + "sources": current_sources, + "start_idx": current_idx, + } + ) + current_content = "" + current_roles = set() + current_sources = [] + + try: + chunks = self.chunker.chunk(content) or [] + except Exception as e: + logger.warning(f"[ChatFast] chunker failed on item {idx}: {e}") + chunks = [] + + if not chunks: + chunks = [type("C", (), {"text": content})] + + for chunk in chunks: + chunk_text = f"{prefix}{chunk.text}" + raw_content_list.append( + { + "text": chunk_text, + "roles": {role}, + "sources": [ + { + "type": "chat", + "index": idx, + "role": role, + "chat_time": chat_time, + } + ], + "start_idx": idx, + } + ) + else: + if len(current_content + mem) > 2000: + if current_content: + raw_content_list.append( + { + "text": current_content, + "roles": current_roles, + "sources": current_sources, + "start_idx": current_idx, + } + ) + current_content = mem + current_roles = {role} + current_sources = [ + { + "type": "chat", + "index": idx, + "role": role, + "chat_time": chat_time, + } + ] + current_idx = idx + else: + current_content += mem + current_roles.add(role) + current_sources.append( + { + "type": "chat", + "index": idx, + "role": role, + "chat_time": chat_time, + } + ) + + except Exception as e: + logger.error(f"[ChatFast] Error preparing item {idx}: {e}") + + if current_content: + raw_content_list.append( + { + "text": current_content, + "roles": current_roles, + "sources": current_sources, + "start_idx": current_idx, + } + ) + + chat_nodes = [] + + def _process_single_item(item_data): + try: + text = item_data["text"] + roles = item_data["roles"] + sources = item_data["sources"] + + mem_type = "UserMemory" if (roles and roles == {"user"}) else "LongTermMemory" + tags = ["mode:fast", f"lang:{detect_lang(text)}"] + [ + f"role:{r}" for r in sorted(roles) + ] + + node = self._make_memory_item( + value=text, + info=info, + memory_type=mem_type, + tags=tags, + key=None, + sources=sources, + background="", + type_="fact", + confidence=0.99, + ) + return node + except Exception as e: + logger.error(f"[ChatFast] Error processing item: {e}") + return None + + with ContextThreadPoolExecutor(max_workers=8) as executor: + futures = [executor.submit(_process_single_item, item) for item in raw_content_list] + + for future in concurrent.futures.as_completed(futures): + try: + node = future.result() + if node: + chat_nodes.append(node) + except Exception as e: + logger.error(f"[ChatFast] Future result error: {e}") + + return chat_nodes + mem_list = [] for item in scene_data_info: if "chat_time" in item: @@ -179,24 +362,18 @@ def _process_chat_data(self, scene_data_info, info, **kwargs): if memory_type not in ["LongTermMemory", "UserMemory"]: memory_type = "LongTermMemory" - node_i = TextualMemoryItem( - memory=memory_i_raw.get("value", ""), - metadata=TreeNodeTextualMemoryMetadata( - user_id=info.get("user_id"), - session_id=info.get("session_id"), - memory_type=memory_type, - status="activated", - tags=memory_i_raw.get("tags", []) - if type(memory_i_raw.get("tags", [])) is list - else [], - key=memory_i_raw.get("key", ""), - embedding=self.embedder.embed([memory_i_raw.get("value", "")])[0], - usage=[], - sources=scene_data_info, - background=response_json.get("summary", ""), - confidence=0.99, - type="fact", - ), + node_i = self._make_memory_item( + value=memory_i_raw.get("value", ""), + info=info, + memory_type=memory_type, + tags=memory_i_raw.get("tags", []) + if isinstance(memory_i_raw.get("tags", []), list) + else [], + key=memory_i_raw.get("key", ""), + sources=scene_data_info, + background=response_json.get("summary", ""), + type_="fact", + confidence=0.99, ) chat_read_nodes.append(node_i) except Exception as e: @@ -205,7 +382,7 @@ def _process_chat_data(self, scene_data_info, info, **kwargs): return chat_read_nodes def get_memory( - self, scene_data: list, type: str, info: dict[str, Any], mode: str = "fast" + self, scene_data: list, type: str, info: dict[str, Any], mode: str = "fine" ) -> list[list[TextualMemoryItem]]: """ Extract and classify memory content from scene_data. From 3e08a82540b62f30d0d5fbab108835c0997d0edf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Wed, 15 Oct 2025 15:13:32 +0800 Subject: [PATCH 08/32] feat: finish fast mode in mem-reader --- examples/mem_reader/reader.py | 83 ++++++++++++++++++++------ src/memos/chunkers/sentence_chunker.py | 3 - src/memos/mem_reader/simple_struct.py | 29 +++------ 3 files changed, 72 insertions(+), 43 deletions(-) diff --git a/examples/mem_reader/reader.py b/examples/mem_reader/reader.py index e26d00a67..30e42d497 100644 --- a/examples/mem_reader/reader.py +++ b/examples/mem_reader/reader.py @@ -11,7 +11,7 @@ def main(): ) reader = SimpleStructMemReader(reader_config) - # 3. Define scene data + # 2. Define scene data scene_data = [ [ {"role": "user", "chat_time": "3 May 2025", "content": "I'm feeling a bit down today."}, @@ -187,32 +187,77 @@ def main(): ], ] - # 4. Acquiring memories + print("=== Mem-Reader Fast vs Fine Mode Comparison ===\n") + + # 3. Test Fine Mode (default) + print("🔄 Testing FINE mode (default, with LLM processing)...") start_time = time.time() - chat_memory = reader.get_memory( - scene_data, type="chat", info={"user_id": "user1", "session_id": "session1"} + fine_memory = reader.get_memory( + scene_data, type="chat", info={"user_id": "user1", "session_id": "session1"}, mode="fine" ) - print("\nChat Memory:\n", chat_memory) + fine_time = time.time() - start_time + print(f"✅ Fine mode completed in {fine_time:.2f} seconds") + print(f"📊 Fine mode generated {sum(len(mem_list) for mem_list in fine_memory)} memory items") + + # 4. Test Fast Mode + print("\n⚡ Testing FAST mode (quick processing, no LLM calls)...") + start_time = time.time() + fast_memory = reader.get_memory( + scene_data, type="chat", info={"user_id": "user1", "session_id": "session1"}, mode="fast" + ) + fast_time = time.time() - start_time + print(f"✅ Fast mode completed in {fast_time:.2f} seconds") + print(f"📊 Fast mode generated {sum(len(mem_list) for mem_list in fast_memory)} memory items") + + # 5. Performance Comparison + print("\n📈 Performance Comparison:") + print(f" Fine mode: {fine_time:.2f}s") + print(f" Fast mode: {fast_time:.2f}s") + print(f" Speed improvement: {fine_time / fast_time:.1f}x faster") - # 5. Example of processing documents - print("\n=== Processing Documents ===") + # 6. Show sample results from both modes + print("\n🔍 Sample Results Comparison:") + print("\n--- FINE Mode Results (first 3 items) ---") + for i, mem_list in enumerate(fine_memory[:3]): + for j, mem_item in enumerate(mem_list[:2]): # Show first 2 items from each list + print(f" [{i}][{j}] {mem_item.memory[:100]}...") + + print("\n--- FAST Mode Results (first 3 items) ---") + for i, mem_list in enumerate(fast_memory[:3]): + for j, mem_item in enumerate(mem_list[:2]): # Show first 2 items from each list + print(f" [{i}][{j}] {mem_item.memory[:100]}...") + + # 7. Example of processing documents (only in fine mode) + print("\n=== Processing Documents (Fine Mode Only) ===") # Example document paths (you should replace these with actual document paths) doc_paths = [ "examples/mem_reader/text1.txt", "examples/mem_reader/text2.txt", ] - # 6. Acquiring memories from documents - doc_memory = reader.get_memory( - doc_paths, - "doc", - info={ - "user_id": "1111", - "session_id": "2222", - }, - ) - print("\nDocument Memory:\n", doc_memory) - end_time = time.time() - print(f"The runtime is {end_time - start_time} seconds.") + + try: + # 6. Acquiring memories from documents + doc_memory = reader.get_memory( + doc_paths, + "doc", + info={ + "user_id": "1111", + "session_id": "2222", + }, + mode="fine", + ) + print( + f"\n📄 Document Memory generated {sum(len(mem_list) for mem_list in doc_memory)} items" + ) + except Exception as e: + print(f"⚠️ Document processing failed: {e}") + print(" (This is expected if document files don't exist)") + + print("\n🎯 Summary:") + print(f" • Fast mode: {fast_time:.2f}s - Quick processing, no LLM calls") + print(f" • Fine mode: {fine_time:.2f}s - Full LLM processing for better understanding") + print(" • Use fast mode for: Real-time applications, high-throughput scenarios") + print(" • Use fine mode for: Quality analysis, detailed memory extraction") if __name__ == "__main__": diff --git a/src/memos/chunkers/sentence_chunker.py b/src/memos/chunkers/sentence_chunker.py index c499a49d2..080962482 100644 --- a/src/memos/chunkers/sentence_chunker.py +++ b/src/memos/chunkers/sentence_chunker.py @@ -30,9 +30,6 @@ def __init__(self, config: SentenceChunkerConfig): def chunk(self, text: str) -> list[str] | list[Chunk]: """Chunk the given text into smaller chunks based on sentences.""" - if len(text) <= self.config.chunk_size: - return [text] - chonkie_chunks = self.chunker.chunk(text) chunks = [] diff --git a/src/memos/mem_reader/simple_struct.py b/src/memos/mem_reader/simple_struct.py index cc979bb41..fc0e81e52 100644 --- a/src/memos/mem_reader/simple_struct.py +++ b/src/memos/mem_reader/simple_struct.py @@ -134,6 +134,7 @@ def __init__(self, config: SimpleStructMemReaderConfig): self.llm = LLMFactory.from_config(config.llm) self.embedder = EmbedderFactory.from_config(config.embedder) self.chunker = ChunkerFactory.from_config(config.chunker) + self.memory_max_length = 8000 def _make_memory_item( self, @@ -170,7 +171,6 @@ def _make_memory_item( def _process_chat_data(self, scene_data_info, info, **kwargs): mode = kwargs.get("mode", "fine") if mode == "fast": - # 使用合并逻辑处理短消息 raw_content_list = [] current_content = "" current_roles = set() @@ -185,8 +185,7 @@ def _process_chat_data(self, scene_data_info, info, **kwargs): prefix = f"{role}: " + (f"[{chat_time}]: " if chat_time else "") mem = f"{prefix}{content}\n" - - if len(mem) > 2000: + if len(mem) > self.memory_max_length: if current_content: raw_content_list.append( { @@ -196,9 +195,7 @@ def _process_chat_data(self, scene_data_info, info, **kwargs): "start_idx": current_idx, } ) - current_content = "" - current_roles = set() - current_sources = [] + current_content, current_roles, current_sources = "", set(), [] try: chunks = self.chunker.chunk(content) or [] @@ -209,8 +206,8 @@ def _process_chat_data(self, scene_data_info, info, **kwargs): if not chunks: chunks = [type("C", (), {"text": content})] - for chunk in chunks: - chunk_text = f"{prefix}{chunk.text}" + for c in chunks: + chunk_text = c.text if hasattr(c, "text") else c raw_content_list.append( { "text": chunk_text, @@ -227,7 +224,7 @@ def _process_chat_data(self, scene_data_info, info, **kwargs): } ) else: - if len(current_content + mem) > 2000: + if len(current_content + mem) > self.memory_max_length: if current_content: raw_content_list.append( { @@ -240,24 +237,14 @@ def _process_chat_data(self, scene_data_info, info, **kwargs): current_content = mem current_roles = {role} current_sources = [ - { - "type": "chat", - "index": idx, - "role": role, - "chat_time": chat_time, - } + {"type": "chat", "index": idx, "role": role, "chat_time": chat_time} ] current_idx = idx else: current_content += mem current_roles.add(role) current_sources.append( - { - "type": "chat", - "index": idx, - "role": role, - "chat_time": chat_time, - } + {"type": "chat", "index": idx, "role": role, "chat_time": chat_time} ) except Exception as e: From 37fcff81b475be581f18abfb44a6055bf71c78ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Thu, 16 Oct 2025 16:51:33 +0800 Subject: [PATCH 09/32] feat: add token-based window splitting and concurrency improvements --- src/memos/mem_reader/simple_struct.py | 227 +++++++++++++++++++------- 1 file changed, 172 insertions(+), 55 deletions(-) diff --git a/src/memos/mem_reader/simple_struct.py b/src/memos/mem_reader/simple_struct.py index fc0e81e52..0bd2932a0 100644 --- a/src/memos/mem_reader/simple_struct.py +++ b/src/memos/mem_reader/simple_struct.py @@ -3,6 +3,7 @@ import json import os import re +import traceback from abc import ABC from typing import Any @@ -41,6 +42,26 @@ "doc": {"en": SIMPLE_STRUCT_DOC_READER_PROMPT, "zh": SIMPLE_STRUCT_DOC_READER_PROMPT_ZH}, } +try: + import tiktoken + + try: + _ENC = tiktoken.encoding_for_model("gpt-4o-mini") + except Exception: + _ENC = tiktoken.get_encoding("cl100k_base") + + def _count_tokens_text(s: str) -> int: + return len(_ENC.encode(s or "")) +except Exception: + # Heuristic fallback: zh chars ~1 token, others ~1 token per ~4 chars + def _count_tokens_text(s: str) -> int: + if not s: + return 0 + zh_chars = re.findall(r"[\u4e00-\u9fff]", s) + zh = len(zh_chars) + rest = len(s) - zh + return zh + max(1, rest // 4) + def detect_lang(text): try: @@ -135,6 +156,9 @@ def __init__(self, config: SimpleStructMemReaderConfig): self.embedder = EmbedderFactory.from_config(config.embedder) self.chunker = ChunkerFactory.from_config(config.chunker) self.memory_max_length = 8000 + # Use token-based windowing; default to ~5000 tokens if not configured + self.chat_window_max_tokens = getattr(self.config, "chat_window_max_tokens", 5000) + self._count_tokens = _count_tokens_text def _make_memory_item( self, @@ -167,10 +191,37 @@ def _make_memory_item( ), ) + def _get_llm_response(self, mem_str: str) -> dict: + lang = detect_lang(mem_str) + template = PROMPT_DICT["chat"][lang] + examples = PROMPT_DICT["chat"][f"{lang}_example"] + prompt = template.replace("${conversation}", mem_str) + if self.config.remove_prompt_example: + prompt = prompt.replace(examples, "") + messages = [{"role": "user", "content": prompt}] + try: + response_text = self.llm.generate(messages) + response_json = self.parse_json_result(response_text) + except Exception as e: + logger.error(f"[LLM] Exception during chat generation: {e}") + response_json = { + "memory list": [ + { + "key": mem_str[:10], + "memory_type": "UserMemory", + "value": mem_str, + "tags": [], + } + ], + "summary": mem_str, + } + return response_json + @timed def _process_chat_data(self, scene_data_info, info, **kwargs): mode = kwargs.get("mode", "fine") if mode == "fast": + logger.debug("Using Fast Mode") raw_content_list = [] current_content = "" current_roles = set() @@ -179,13 +230,19 @@ def _process_chat_data(self, scene_data_info, info, **kwargs): for idx, item in enumerate(scene_data_info): try: - role = item.get("role", "user") + role = item.get("role", "") content = item.get("content", "") chat_time = item.get("chat_time", None) - prefix = f"{role}: " + (f"[{chat_time}]: " if chat_time else "") + prefix = ( + f"{role}: " + if (role and role != "mix") + else f"[{chat_time}]: " + if chat_time + else "" + ) mem = f"{prefix}{content}\n" - if len(mem) > self.memory_max_length: + if self._count_tokens(mem) > self.chat_window_max_tokens: if current_content: raw_content_list.append( { @@ -207,7 +264,8 @@ def _process_chat_data(self, scene_data_info, info, **kwargs): chunks = [type("C", (), {"text": content})] for c in chunks: - chunk_text = c.text if hasattr(c, "text") else c + chunk_body = c.text if hasattr(c, "text") else c + chunk_text = f"{prefix}{chunk_body}" raw_content_list.append( { "text": chunk_text, @@ -224,7 +282,7 @@ def _process_chat_data(self, scene_data_info, info, **kwargs): } ) else: - if len(current_content + mem) > self.memory_max_length: + if self._count_tokens(current_content + mem) > self.chat_window_max_tokens: if current_content: raw_content_list.append( { @@ -290,53 +348,73 @@ def _process_single_item(item_data): return None with ContextThreadPoolExecutor(max_workers=8) as executor: - futures = [executor.submit(_process_single_item, item) for item in raw_content_list] - - for future in concurrent.futures.as_completed(futures): + futures = { + executor.submit(_process_single_item, item): i + for i, item in enumerate(raw_content_list) + } + + chat_nodes = [None] * len(futures) + for fut in concurrent.futures.as_completed(futures): + i = futures[fut] try: - node = future.result() + node = fut.result() if node: - chat_nodes.append(node) + chat_nodes[i] = node except Exception as e: logger.error(f"[ChatFast] Future result error: {e}") + chat_nodes = [n for n in chat_nodes if n is not None] return chat_nodes + else: + logger.debug("Using Fine Mode") + mem_list = [] + for item in scene_data_info: + role = item.get("role", "") + content = item.get("content", "") + chat_time = item.get("chat_time", "") + prefix = ( + f"{role}: " + if (role and role != "mix") + else f"[{chat_time}]: " + if chat_time + else "" + ) + mem_list.append(f"{prefix}{content}\n") + response_json = self._get_llm_response("\n".join(mem_list)) + chat_read_nodes = [] + for memory_i_raw in response_json.get("memory list", []): + try: + memory_type = ( + memory_i_raw.get("memory_type", "LongTermMemory") + .replace("长期记忆", "LongTermMemory") + .replace("用户记忆", "UserMemory") + ) - mem_list = [] - for item in scene_data_info: - if "chat_time" in item: - mem = item["role"] + ": " + f"[{item['chat_time']}]: " + item["content"] - mem_list.append(mem) - else: - mem = item["role"] + ":" + item["content"] - mem_list.append(mem) - lang = detect_lang("\n".join(mem_list)) - template = PROMPT_DICT["chat"][lang] - examples = PROMPT_DICT["chat"][f"{lang}_example"] - - prompt = template.replace("${conversation}", "\n".join(mem_list)) - if self.config.remove_prompt_example: - prompt = prompt.replace(examples, "") + if memory_type not in ["LongTermMemory", "UserMemory"]: + memory_type = "LongTermMemory" - messages = [{"role": "user", "content": prompt}] + node_i = self._make_memory_item( + value=memory_i_raw.get("value", ""), + info=info, + memory_type=memory_type, + tags=memory_i_raw.get("tags", []) + if isinstance(memory_i_raw.get("tags", []), list) + else [], + key=memory_i_raw.get("key", ""), + sources=scene_data_info, + background=response_json.get("summary", ""), + type_="fact", + confidence=0.99, + ) + chat_read_nodes.append(node_i) + except Exception as e: + logger.error(f"[ChatReader] Error parsing memory item: {e}") - try: - response_text = self.llm.generate(messages) - response_json = self.parse_json_result(response_text) - except Exception as e: - logger.error(f"[LLM] Exception during chat generation: {e}") - response_json = { - "memory list": [ - { - "key": "\n".join(mem_list)[:10], - "memory_type": "UserMemory", - "value": "\n".join(mem_list), - "tags": [], - } - ], - "summary": "\n".join(mem_list), - } + return chat_read_nodes + def _process_transfer_chat_data(self, raw_node: TextualMemoryItem): + raw_memory = raw_node.memory + response_json = self._get_llm_response(raw_memory) chat_read_nodes = [] for memory_i_raw in response_json.get("memory list", []): try: @@ -345,19 +423,20 @@ def _process_single_item(item_data): .replace("长期记忆", "LongTermMemory") .replace("用户记忆", "UserMemory") ) - if memory_type not in ["LongTermMemory", "UserMemory"]: memory_type = "LongTermMemory" - node_i = self._make_memory_item( value=memory_i_raw.get("value", ""), - info=info, + info={ + "user_id": raw_node.metadata.user_id, + "session_id": raw_node.metadata.session_id, + }, memory_type=memory_type, tags=memory_i_raw.get("tags", []) if isinstance(memory_i_raw.get("tags", []), list) else [], key=memory_i_raw.get("key", ""), - sources=scene_data_info, + sources=raw_node.metadata.sources, background=response_json.get("summary", ""), type_="fact", confidence=0.99, @@ -426,9 +505,44 @@ def get_memory( for scene_data_info in list_scene_data_info ] for future in concurrent.futures.as_completed(futures): - res_memory = future.result() - memory_list.append(res_memory) + try: + res_memory = future.result() + if res_memory is not None: + memory_list.append(res_memory) + except Exception as e: + logger.error(f"Task failed with exception: {e}") + logger.error(traceback.format_exc()) + return memory_list + + def fine_transfer_simple_mem( + self, input_memories: list[list[TextualMemoryItem]], type: str + ) -> list[list[TextualMemoryItem]]: + if not input_memories: + return [] + + memory_list = [] + if type == "chat": + processing_func = self._process_transfer_chat_data + elif type == "doc": + processing_func = self._process_transfer_doc_data + else: + processing_func = self._process_transfer_doc_data + + # Process Q&A pairs concurrently with context propagation + with ContextThreadPoolExecutor() as executor: + futures = [ + executor.submit(processing_func, scene_data_info) + for scene_data_info in input_memories + ] + for future in concurrent.futures.as_completed(futures): + try: + res_memory = future.result() + if res_memory is not None: + memory_list.append(res_memory) + except Exception as e: + logger.error(f"Task failed with exception: {e}") + logger.error(traceback.format_exc()) return memory_list def get_scene_data_info(self, scene_data: list, type: str) -> list[str]: @@ -444,13 +558,6 @@ def get_scene_data_info(self, scene_data: list, type: str) -> list[str]: List of strings containing the processed scene data """ results = [] - parser_config = ParserConfigFactory.model_validate( - { - "backend": "markitdown", - "config": {}, - } - ) - parser = ParserFactory.from_config(parser_config) if type == "chat": for items in scene_data: @@ -468,6 +575,13 @@ def get_scene_data_info(self, scene_data: list, type: str) -> list[str]: if result: results.append(result) elif type == "doc": + parser_config = ParserConfigFactory.model_validate( + { + "backend": "markitdown", + "config": {}, + } + ) + parser = ParserFactory.from_config(parser_config) for item in scene_data: try: if os.path.exists(item): @@ -529,6 +643,9 @@ def _process_doc_data(self, scene_data_info, info, **kwargs): logger.error(f"[DocReader] Future task failed: {e}") return doc_nodes + def _process_transfer_doc_data(self, raw_node: TextualMemoryItem): + raise NotImplementedError + def parse_json_result(self, response_text): try: json_start = response_text.find("{") From 5f7e8e0528c22c2467b1da90043526cf074744e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Thu, 16 Oct 2025 17:11:41 +0800 Subject: [PATCH 10/32] feat: add split chunker into mode in simple struct mem reader --- src/memos/mem_reader/simple_struct.py | 254 ++++++++------------------ 1 file changed, 76 insertions(+), 178 deletions(-) diff --git a/src/memos/mem_reader/simple_struct.py b/src/memos/mem_reader/simple_struct.py index 0bd2932a0..1197ef04d 100644 --- a/src/memos/mem_reader/simple_struct.py +++ b/src/memos/mem_reader/simple_struct.py @@ -217,199 +217,97 @@ def _get_llm_response(self, mem_str: str) -> dict: } return response_json - @timed - def _process_chat_data(self, scene_data_info, info, **kwargs): - mode = kwargs.get("mode", "fine") - if mode == "fast": - logger.debug("Using Fast Mode") - raw_content_list = [] - current_content = "" - current_roles = set() - current_sources = [] - current_idx = 0 - - for idx, item in enumerate(scene_data_info): - try: - role = item.get("role", "") - content = item.get("content", "") - chat_time = item.get("chat_time", None) - - prefix = ( - f"{role}: " - if (role and role != "mix") - else f"[{chat_time}]: " - if chat_time - else "" - ) - mem = f"{prefix}{content}\n" - if self._count_tokens(mem) > self.chat_window_max_tokens: - if current_content: - raw_content_list.append( - { - "text": current_content, - "roles": current_roles, - "sources": current_sources, - "start_idx": current_idx, - } - ) - current_content, current_roles, current_sources = "", set(), [] - - try: - chunks = self.chunker.chunk(content) or [] - except Exception as e: - logger.warning(f"[ChatFast] chunker failed on item {idx}: {e}") - chunks = [] - - if not chunks: - chunks = [type("C", (), {"text": content})] - - for c in chunks: - chunk_body = c.text if hasattr(c, "text") else c - chunk_text = f"{prefix}{chunk_body}" - raw_content_list.append( - { - "text": chunk_text, - "roles": {role}, - "sources": [ - { - "type": "chat", - "index": idx, - "role": role, - "chat_time": chat_time, - } - ], - "start_idx": idx, - } - ) - else: - if self._count_tokens(current_content + mem) > self.chat_window_max_tokens: - if current_content: - raw_content_list.append( - { - "text": current_content, - "roles": current_roles, - "sources": current_sources, - "start_idx": current_idx, - } - ) - current_content = mem - current_roles = {role} - current_sources = [ - {"type": "chat", "index": idx, "role": role, "chat_time": chat_time} - ] - current_idx = idx - else: - current_content += mem - current_roles.add(role) - current_sources.append( - {"type": "chat", "index": idx, "role": role, "chat_time": chat_time} - ) + def _iter_chat_windows(self, scene_data_info, max_tokens=None, overlap=200): + """ + use token counter to get a slide window generator + """ + max_tokens = max_tokens or self.chat_window_max_tokens + buf, sources, start_idx = [], [], 0 + cur_text = "" + + for idx, item in enumerate(scene_data_info): + role = item.get("role", "") + content = item.get("content", "") + chat_time = item.get("chat_time", None) + prefix = ( + f"{role}: " + if (role and role != "mix") + else (f"[{chat_time}]: " if chat_time else "") + ) + line = f"{prefix}{content}\n" - except Exception as e: - logger.error(f"[ChatFast] Error preparing item {idx}: {e}") + if self._count_tokens(cur_text + line) > max_tokens and cur_text: + text = "".join(buf) + yield {"text": text, "sources": sources.copy(), "start_idx": start_idx} + while buf and self._count_tokens("".join(buf)) > overlap: + buf.pop(0) + sources.pop(0) + start_idx = idx + cur_text = "".join(buf) - if current_content: - raw_content_list.append( - { - "text": current_content, - "roles": current_roles, - "sources": current_sources, - "start_idx": current_idx, - } - ) + buf.append(line) + sources.append({"type": "chat", "index": idx, "role": role, "chat_time": chat_time}) + cur_text = "".join(buf) - chat_nodes = [] + if buf: + yield {"text": "".join(buf), "sources": sources.copy(), "start_idx": start_idx} - def _process_single_item(item_data): - try: - text = item_data["text"] - roles = item_data["roles"] - sources = item_data["sources"] - - mem_type = "UserMemory" if (roles and roles == {"user"}) else "LongTermMemory" - tags = ["mode:fast", f"lang:{detect_lang(text)}"] + [ - f"role:{r}" for r in sorted(roles) - ] - - node = self._make_memory_item( - value=text, - info=info, - memory_type=mem_type, - tags=tags, - key=None, - sources=sources, - background="", - type_="fact", - confidence=0.99, - ) - return node - except Exception as e: - logger.error(f"[ChatFast] Error processing item: {e}") - return None + @timed + def _process_chat_data(self, scene_data_info, info, **kwargs): + mode = kwargs.get("mode", "fine") + windows = list(self._iter_chat_windows(scene_data_info)) - with ContextThreadPoolExecutor(max_workers=8) as executor: - futures = { - executor.submit(_process_single_item, item): i - for i, item in enumerate(raw_content_list) - } + if mode == "fast": + logger.debug("Using unified Fast Mode") + + def _build_fast_node(w): + text = w["text"] + roles = {s.get("role", "") for s in w["sources"] if s.get("role")} + mem_type = "UserMemory" if roles == {"user"} else "LongTermMemory" + tags = ["mode:fast", f"lang:{detect_lang(text)}"] + [ + f"role:{r}" for r in sorted(roles) + ] + return self._make_memory_item( + value=text, info=info, memory_type=mem_type, tags=tags, sources=w["sources"] + ) - chat_nodes = [None] * len(futures) + with ContextThreadPoolExecutor(max_workers=8) as ex: + futures = {ex.submit(_build_fast_node, w): i for i, w in enumerate(windows)} + results = [None] * len(futures) for fut in concurrent.futures.as_completed(futures): i = futures[fut] try: node = fut.result() if node: - chat_nodes[i] = node + results[i] = node except Exception as e: - logger.error(f"[ChatFast] Future result error: {e}") - - chat_nodes = [n for n in chat_nodes if n is not None] + logger.error(f"[ChatFast] error: {e}") + chat_nodes = [r for r in results if r] return chat_nodes else: - logger.debug("Using Fine Mode") - mem_list = [] - for item in scene_data_info: - role = item.get("role", "") - content = item.get("content", "") - chat_time = item.get("chat_time", "") - prefix = ( - f"{role}: " - if (role and role != "mix") - else f"[{chat_time}]: " - if chat_time - else "" - ) - mem_list.append(f"{prefix}{content}\n") - response_json = self._get_llm_response("\n".join(mem_list)) + logger.debug("Using unified Fine Mode") chat_read_nodes = [] - for memory_i_raw in response_json.get("memory list", []): - try: - memory_type = ( - memory_i_raw.get("memory_type", "LongTermMemory") - .replace("长期记忆", "LongTermMemory") - .replace("用户记忆", "UserMemory") - ) - - if memory_type not in ["LongTermMemory", "UserMemory"]: - memory_type = "LongTermMemory" - - node_i = self._make_memory_item( - value=memory_i_raw.get("value", ""), - info=info, - memory_type=memory_type, - tags=memory_i_raw.get("tags", []) - if isinstance(memory_i_raw.get("tags", []), list) - else [], - key=memory_i_raw.get("key", ""), - sources=scene_data_info, - background=response_json.get("summary", ""), - type_="fact", - confidence=0.99, - ) - chat_read_nodes.append(node_i) - except Exception as e: - logger.error(f"[ChatReader] Error parsing memory item: {e}") - + for w in windows: + resp = self._get_llm_response(w["text"]) + for m in resp.get("memory list", []): + try: + memory_type = ( + m.get("memory_type", "LongTermMemory") + .replace("长期记忆", "LongTermMemory") + .replace("用户记忆", "UserMemory") + ) + node = self._make_memory_item( + value=m.get("value", ""), + info=info, + memory_type=memory_type, + tags=m.get("tags", []), + key=m.get("key", ""), + sources=w["sources"], + background=resp.get("summary", ""), + ) + chat_read_nodes.append(node) + except Exception as e: + logger.error(f"[ChatFine] parse error: {e}") return chat_read_nodes def _process_transfer_chat_data(self, raw_node: TextualMemoryItem): From 23555275ef22f450d45bf31385661ec88fe7ab74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Thu, 16 Oct 2025 20:36:36 +0800 Subject: [PATCH 11/32] feat: update async-mode add --- examples/mem_reader/reader.py | 317 ++++++++++++++++++ src/memos/graph_dbs/neo4j.py | 2 +- src/memos/mem_os/core.py | 23 +- src/memos/mem_reader/base.py | 7 + src/memos/mem_reader/simple_struct.py | 2 +- src/memos/mem_scheduler/base_scheduler.py | 68 +++- src/memos/mem_scheduler/general_scheduler.py | 27 +- src/memos/memories/textual/tree.py | 9 +- .../tree_text_memory/organize/manager.py | 3 +- .../tree_text_memory/retrieve/recall.py | 2 + .../tree_text_memory/retrieve/searcher.py | 2 +- 11 files changed, 426 insertions(+), 36 deletions(-) diff --git a/examples/mem_reader/reader.py b/examples/mem_reader/reader.py index 30e42d497..3da5d5e76 100644 --- a/examples/mem_reader/reader.py +++ b/examples/mem_reader/reader.py @@ -2,6 +2,11 @@ from memos.configs.mem_reader import SimpleStructMemReaderConfig from memos.mem_reader.simple_struct import SimpleStructMemReader +from memos.memories.textual.item import ( + SourceMessage, + TextualMemoryItem, + TreeNodeTextualMemoryMetadata, +) def main(): @@ -227,6 +232,318 @@ def main(): for j, mem_item in enumerate(mem_list[:2]): # Show first 2 items from each list print(f" [{i}][{j}] {mem_item.memory[:100]}...") + # 7. Example of transfer fast mode result into fine result + fast_mode_memories = [ + TextualMemoryItem( + id="4553141b-3a33-4548-b779-e677ec797a9f", + memory="user: Nate:Oh cool! I might check that one out some time soon! I do love watching classics.\nassistant: Joanna:Yep, that movie is awesome. I first watched it around 3 years ago. I even went out and got a physical copy!\nuser: Nate:Sounds cool! Have you seen it a lot? sounds like you know the movie well!\nassistant: Joanna:A few times. It's one of my favorites! I really like the idea and the acting.\nuser: Nate:Cool! I'll definitely check it out. Thanks for the recommendation!\nassistant: Joanna:No problem, Nate! Let me know if you like it!\n", + metadata=TreeNodeTextualMemoryMetadata( + user_id="nate_test", + session_id="root_session", + status="activated", + type="fact", + key="user: Nate:Oh cool", + confidence=0.9900000095367432, + source=None, + tags=["mode:fast", "lang:en", "role:assistant", "role:user"], + visibility=None, + updated_at="2025-10-16T17:16:30.094877+08:00", + memory_type="LongTermMemory", + sources=[ + SourceMessage( + type="chat", + role="user", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=0, + ), + SourceMessage( + type="chat", + role="assistant", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=1, + ), + SourceMessage( + type="chat", + role="user", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=2, + ), + SourceMessage( + type="chat", + role="assistant", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=3, + ), + SourceMessage( + type="chat", + role="user", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=4, + ), + SourceMessage( + type="chat", + role="assistant", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=5, + ), + ], + embedding=None, + created_at="2025-10-16T17:16:30.094919+08:00", + usage=[], + background="", + ), + ), + TextualMemoryItem( + id="752e42fa-92b6-491a-a430-6864a7730fba", + memory="user: Nate:It was! How about you? Do you have any hobbies you love?\nassistant: Joanna:Yeah! Besides writing, I also enjoy reading, watching movies, and exploring nature. Anything else you enjoy doing, Nate?\nuser: Nate:Playing video games and watching movies are my main hobbies.\nassistant: Joanna:Cool, Nate! So we both have similar interests. What type of movies do you like best?\nuser: Nate:I love action and sci-fi movies, the effects are so cool! What about you, what's your favorite genre?\nassistant: Joanna:I'm all about dramas and romcoms. I love getting immersed in the feelings and plots.\nuser: Nate:Wow, movies can be so powerful! Do you have any recommendations for me?\nassistant: Joanna:Yeah, totally! Have you seen this romantic drama that's all about memory and relationships? It's such a good one.\nuser: Nate:Oh cool! I might check that one out some time soon! I do love watching classics.\nassistant: Joanna:Yep, that movie is awesome. I first watched it around 3 years ago. I even went out and got a physical copy!\n", + metadata=TreeNodeTextualMemoryMetadata( + user_id="nate_test", + session_id="root_session", + status="activated", + type="fact", + key="user: Nate:It was", + confidence=0.9900000095367432, + source=None, + tags=["mode:fast", "lang:en", "role:assistant", "role:user"], + visibility=None, + updated_at="2025-10-16T17:16:30.095726+08:00", + memory_type="LongTermMemory", + sources=[ + SourceMessage( + type="chat", + role="user", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=0, + ), + SourceMessage( + type="chat", + role="assistant", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=1, + ), + SourceMessage( + type="chat", + role="user", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=2, + ), + SourceMessage( + type="chat", + role="assistant", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=3, + ), + SourceMessage( + type="chat", + role="user", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=4, + ), + SourceMessage( + type="chat", + role="assistant", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=5, + ), + SourceMessage( + type="chat", + role="user", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=6, + ), + SourceMessage( + type="chat", + role="assistant", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=7, + ), + SourceMessage( + type="chat", + role="user", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=8, + ), + SourceMessage( + type="chat", + role="assistant", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=9, + ), + ], + embedding=None, + created_at="2025-10-16T17:16:30.095767+08:00", + usage=[], + background="", + ), + ), + TextualMemoryItem( + id="c9cf448c-deee-43a8-bafd-eb15fde535b2", + memory="user: Nate:Hey Joanna! Long time no see! What's up? Anything fun going on?\nassistant: Joanna:Hey Nate! Long time no see! I've been working on a project lately - it's been pretty cool. What about you - any fun projects or hobbies?\nuser: Nate:Hey Joanna! That's cool! I won my first video game tournament last week - so exciting!\nassistant: Joanna:Wow Nate! Congrats on winning! Tell me more - what game was it?\nuser: Nate:Thanks! it's a team shooter game.\nassistant: Joanna:Wow, great job! What was is called?\nuser: Nate:The game was called Counter-Strike: Global Offensive, and me and my team had a blast to the very end!\nassistant: Joanna:Cool, Nate! Sounds like a fun experience, even if I'm not into games.\nuser: Nate:It was! How about you? Do you have any hobbies you love?\nassistant: Joanna:Yeah! Besides writing, I also enjoy reading, watching movies, and exploring nature. Anything else you enjoy doing, Nate?\n", + metadata=TreeNodeTextualMemoryMetadata( + user_id="nate_test", + session_id="root_session", + status="activated", + type="fact", + key="user: Nate:Hey Joanna", + confidence=0.9900000095367432, + source=None, + tags=["mode:fast", "lang:en", "role:assistant", "role:user"], + visibility=None, + updated_at="2025-10-16T17:16:30.098208+08:00", + memory_type="LongTermMemory", + sources=[ + SourceMessage( + type="chat", + role="user", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=0, + ), + SourceMessage( + type="chat", + role="assistant", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=1, + ), + SourceMessage( + type="chat", + role="user", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=2, + ), + SourceMessage( + type="chat", + role="assistant", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=3, + ), + SourceMessage( + type="chat", + role="user", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=4, + ), + SourceMessage( + type="chat", + role="assistant", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=5, + ), + SourceMessage( + type="chat", + role="user", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=6, + ), + SourceMessage( + type="chat", + role="assistant", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=7, + ), + SourceMessage( + type="chat", + role="user", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=8, + ), + SourceMessage( + type="chat", + role="assistant", + chat_time="7:31 pm on 21 January, 2022", + message_id=None, + content=None, + doc_path=None, + index=9, + ), + ], + embedding=None, + created_at="2025-10-16T17:16:30.098246+08:00", + usage=[], + background="", + ), + ), + ] + fine_memories = reader.fine_transfer_simple_mem(fast_mode_memories, type="chat") + print("\n--- Transfer Mode Results (first 3 items) ---") + for i, mem_list in enumerate(fine_memories[:3]): + for j, mem_item in enumerate(mem_list[:2]): # Show first 2 items from each list + print(f" [{i}][{j}] {mem_item.memory[:100]}...") + # 7. Example of processing documents (only in fine mode) print("\n=== Processing Documents (Fine Mode Only) ===") # Example document paths (you should replace these with actual document paths) diff --git a/src/memos/graph_dbs/neo4j.py b/src/memos/graph_dbs/neo4j.py index 96908913d..b109fb001 100644 --- a/src/memos/graph_dbs/neo4j.py +++ b/src/memos/graph_dbs/neo4j.py @@ -623,7 +623,7 @@ def search_by_embedding( vector (list[float]): The embedding vector representing query semantics. top_k (int): Number of top similar nodes to retrieve. scope (str, optional): Memory type filter (e.g., 'WorkingMemory', 'LongTermMemory'). - status (str, optional): Node status filter (e.g., 'active', 'archived'). + status (str, optional): Node status filter (e.g., 'activated', 'archived'). If provided, restricts results to nodes with matching status. threshold (float, optional): Minimum similarity score threshold (0 ~ 1). search_filter (dict, optional): Additional metadata filters for search results. diff --git a/src/memos/mem_os/core.py b/src/memos/mem_os/core.py index b536ec5b2..0487540fb 100644 --- a/src/memos/mem_os/core.py +++ b/src/memos/mem_os/core.py @@ -688,6 +688,12 @@ def add( logger.info( f"time add: get mem_cube_id check in mem_cubes time user_id: {target_user_id} time is: {time.time() - time_start_0}" ) + sync_mode = self.mem_cubes[mem_cube_id].text_mem.mode + if sync_mode == "async": + assert self.mem_scheduler is not None, ( + "Mem-Scheduler must be working when use synchronous memory adding." + ) + logger.debug(f"Mem-reader mode is: {sync_mode}") time_start_1 = time.time() if ( (messages is not None) @@ -697,11 +703,6 @@ def add( logger.info( f"time add: messages is not None and enable_textual_memory and text_mem is not None time user_id: {target_user_id} time is: {time.time() - time_start_1}" ) - sync_mode = self.mem_cubes[mem_cube_id].text_mem.mode - if sync_mode == "async": - assert self.mem_scheduler is not None, ( - "Mem-Scheduler must be working when use synchronous memory adding." - ) if self.mem_cubes[mem_cube_id].config.text_mem.backend != "tree_text": add_memory = [] @@ -774,10 +775,12 @@ def add( messages_list = [ [{"role": "user", "content": memory_content}] ] # for only user-str input and convert message + memories = self.mem_reader.get_memory( messages_list, type="chat", info={"user_id": target_user_id, "session_id": target_session_id}, + mode="fast" if sync_mode == "async" else "fine", ) mem_ids = [] @@ -791,6 +794,16 @@ def add( # submit messages for scheduler if self.enable_mem_scheduler and self.mem_scheduler is not None: mem_cube = self.mem_cubes[mem_cube_id] + if sync_mode == "async": + message_item = ScheduleMessageItem( + user_id=target_user_id, + mem_cube_id=mem_cube_id, + mem_cube=mem_cube, + label=MEM_READ_LABEL, + content=json.dumps(mem_ids), + timestamp=datetime.utcnow(), + ) + self.mem_scheduler.submit_messages(messages=[message_item]) message_item = ScheduleMessageItem( user_id=target_user_id, mem_cube_id=mem_cube_id, diff --git a/src/memos/mem_reader/base.py b/src/memos/mem_reader/base.py index ba8be8652..3095a0bc6 100644 --- a/src/memos/mem_reader/base.py +++ b/src/memos/mem_reader/base.py @@ -25,3 +25,10 @@ def get_memory( @abstractmethod def transform_memreader(self, data: dict) -> list[TextualMemoryItem]: """Transform the memory data into a list of TextualMemoryItem objects.""" + + @abstractmethod + def fine_transfer_simple_mem( + self, input_memories: list[list[TextualMemoryItem]], type: str + ) -> list[list[TextualMemoryItem]]: + """Fine Transform TextualMemoryItem List into another list of + TextualMemoryItem objects via calling llm to better understand users.""" diff --git a/src/memos/mem_reader/simple_struct.py b/src/memos/mem_reader/simple_struct.py index 1197ef04d..caf4c19df 100644 --- a/src/memos/mem_reader/simple_struct.py +++ b/src/memos/mem_reader/simple_struct.py @@ -413,7 +413,7 @@ def get_memory( return memory_list def fine_transfer_simple_mem( - self, input_memories: list[list[TextualMemoryItem]], type: str + self, input_memories: list[TextualMemoryItem], type: str ) -> list[list[TextualMemoryItem]]: if not input_memories: return [] diff --git a/src/memos/mem_scheduler/base_scheduler.py b/src/memos/mem_scheduler/base_scheduler.py index 0a8c43d78..550aee1f6 100644 --- a/src/memos/mem_scheduler/base_scheduler.py +++ b/src/memos/mem_scheduler/base_scheduler.py @@ -125,12 +125,17 @@ def initialize_modules( self.dispatcher_monitor.start() # initialize with auth_config - if self.auth_config_path is not None and Path(self.auth_config_path).exists(): - self.auth_config = AuthConfig.from_local_config(config_path=self.auth_config_path) - elif AuthConfig.default_config_exists(): - self.auth_config = AuthConfig.from_local_config() - else: - self.auth_config = AuthConfig.from_local_env() + try: + if self.auth_config_path is not None and Path(self.auth_config_path).exists(): + self.auth_config = AuthConfig.from_local_config( + config_path=self.auth_config_path + ) + elif AuthConfig.default_config_exists(): + self.auth_config = AuthConfig.from_local_config() + else: + self.auth_config = AuthConfig.from_local_env() + except Exception: + pass if self.auth_config is not None: self.rabbitmq_config = self.auth_config.rabbitmq @@ -637,3 +642,54 @@ def _cleanup_queues(self) -> None: self._web_log_message_queue.get_nowait() except queue.Empty: pass + + def mem_scheduler_wait(self, timeout: float = 180.0, poll: float = 0.1) -> bool: + """ + Block until the scheduler has finished processing all submitted messages. + + Strategy: + 1) Wait for the internal memos_message_queue to drain + - Prefer "unfinished_tasks" if available; otherwise fallback to empty() polling. + 2) If parallel dispatch is enabled, wait for all dispatched futures to complete via dispatcher.join(). + + Args: + timeout: Maximum seconds to wait in total. + poll: Polling interval when falling back to checks. + Returns: + True if drained before timeout, otherwise False. + """ + deadline = time.time() + timeout + + # 1) Wait for internal queue to drain + while True: + try: + unfinished = getattr(self.memos_message_queue, "unfinished_tasks", None) + if unfinished is not None: + if int(unfinished) == 0: + break + else: + if self.memos_message_queue.empty(): + break + except Exception: + # Be conservative: if any issue reading metrics, fallback to empty() + if self.memos_message_queue.empty(): + break + + if time.time() >= deadline: + logger.warning("mem_scheduler_wait: queue did not drain before timeout") + return False + time.sleep(poll) + + # 2) Wait for dispatcher futures (if running in parallel mode) + remaining = max(0.0, deadline - time.time()) + if self.enable_parallel_dispatch and self.dispatcher is not None: + try: + ok = self.dispatcher.join(timeout=remaining if remaining > 0 else 0) + except TypeError: + # Some implementations may not accept timeout + ok = self.dispatcher.join() + if not ok: + logger.warning("mem_scheduler_wait: dispatcher did not complete before timeout") + return False + + return True diff --git a/src/memos/mem_scheduler/general_scheduler.py b/src/memos/mem_scheduler/general_scheduler.py index 9e64e7311..13e92715d 100644 --- a/src/memos/mem_scheduler/general_scheduler.py +++ b/src/memos/mem_scheduler/general_scheduler.py @@ -1,4 +1,5 @@ import json +import traceback from memos.configs.mem_scheduler import GeneralSchedulerConfig from memos.log import get_logger @@ -297,27 +298,13 @@ def _process_memories_with_reader( logger.warning("No valid memory items found for processing") return - # Prepare scene data for mem_reader - scene_data = [] - for memory_item in memory_items: - scene_data.append( - { - "role": "user", # or determine from metadata - "content": memory_item.memory, - "chat_time": memory_item.metadata.updated_at - or memory_item.metadata.created_at, - } - ) - # Use mem_reader to process the memories - logger.info(f"Processing {len(scene_data)} memories with mem_reader") + logger.info(f"Processing {len(memory_items)} memories with mem_reader") # Extract memories using mem_reader - processed_memories = self.mem_reader.get_memory( - scene_data=scene_data, + processed_memories = self.mem_reader.fine_transfer_simple_mem( + memory_items, type="chat", - info={"user_id": user_id, "session_id": "", "mem_cube_id": mem_cube_id}, - mode="fine", # Use fast mode for async processing ) if processed_memories and len(processed_memories) > 0: @@ -349,8 +336,10 @@ def _process_memories_with_reader( text_mem.memory_manager.remove_and_refresh_memory() logger.info("Remove and Refresh Memories") - except Exception as e: - logger.error(f"Error in _process_memories_with_reader: {e}", exc_info=True) + except Exception: + logger.error( + f"Error in _process_memories_with_reader: {traceback.format_exc()}", exc_info=True + ) def _trigger_memory_reorganization( self, diff --git a/src/memos/memories/textual/tree.py b/src/memos/memories/textual/tree.py index 311bdd87a..ea087eac9 100644 --- a/src/memos/memories/textual/tree.py +++ b/src/memos/memories/textual/tree.py @@ -274,7 +274,14 @@ def get_all(self) -> dict: return all_items def delete(self, memory_ids: list[str]) -> None: - raise NotImplementedError + """Hard delete: permanently remove nodes and their edges from the graph.""" + if not memory_ids: + return + for mid in memory_ids: + try: + self.graph_store.delete_node(mid) + except Exception as e: + logger.warning(f"TreeTextMemory.delete_hard: failed to delete {mid}: {e}") def delete_all(self) -> None: """Delete all memories and their relationships from the graph store.""" diff --git a/src/memos/memories/textual/tree_text_memory/organize/manager.py b/src/memos/memories/textual/tree_text_memory/organize/manager.py index 1dd4aee5e..e8dc5c250 100644 --- a/src/memos/memories/textual/tree_text_memory/organize/manager.py +++ b/src/memos/memories/textual/tree_text_memory/organize/manager.py @@ -112,8 +112,7 @@ def _process_memory(self, memory: TextualMemoryItem): ids = [] # Add to WorkingMemory - working_id = self._add_memory_to_db(memory, "WorkingMemory") - ids.append(working_id) + self._add_memory_to_db(memory, "WorkingMemory") # Add to LongTermMemory and UserMemory if memory.metadata.memory_type in ["LongTermMemory", "UserMemory"]: diff --git a/src/memos/memories/textual/tree_text_memory/retrieve/recall.py b/src/memos/memories/textual/tree_text_memory/retrieve/recall.py index 84cc8ecb3..c6de85b0a 100644 --- a/src/memos/memories/textual/tree_text_memory/retrieve/recall.py +++ b/src/memos/memories/textual/tree_text_memory/retrieve/recall.py @@ -192,6 +192,7 @@ def _vector_recall( memory_scope: str, top_k: int = 20, max_num: int = 3, + status: str = "activated", cube_name: str | None = None, search_filter: dict | None = None, ) -> list[TextualMemoryItem]: @@ -207,6 +208,7 @@ def search_single(vec, filt=None): self.graph_store.search_by_embedding( vector=vec, top_k=top_k, + status=status, scope=memory_scope, cube_name=cube_name, search_filter=filt, diff --git a/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py b/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py index df154f23a..78bd73007 100644 --- a/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py +++ b/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py @@ -118,7 +118,7 @@ def _parse_task(self, query, info, mode, top_k=5, search_filter: dict | None = N related_nodes = [ self.graph_store.get_node(n["id"]) for n in self.graph_store.search_by_embedding( - query_embedding, top_k=top_k, search_filter=search_filter + query_embedding, top_k=top_k, status="activated", search_filter=search_filter ) ] memories = [] From 2ee4c4cfe0205e5abea20eb26a570eb0aada85d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Thu, 16 Oct 2025 21:06:19 +0800 Subject: [PATCH 12/32] chore: update gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index ae7bdc4d6..8319a4d2f 100644 --- a/.gitignore +++ b/.gitignore @@ -15,6 +15,7 @@ evaluation/.env !evaluation/configs-example/*.json evaluation/configs/* **tree_textual_memory_locomo** +**script.py** .env evaluation/scripts/personamem From 593faa503c97f25460a16ee42e5fca762ced3258 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Fri, 17 Oct 2025 14:42:06 +0800 Subject: [PATCH 13/32] feat: improve database note write performance --- src/memos/mem_os/core.py | 13 +++---- .../tree_text_memory/organize/manager.py | 34 ++++++++++++------- 2 files changed, 27 insertions(+), 20 deletions(-) diff --git a/src/memos/mem_os/core.py b/src/memos/mem_os/core.py index 0487540fb..efb2d0622 100644 --- a/src/memos/mem_os/core.py +++ b/src/memos/mem_os/core.py @@ -726,14 +726,11 @@ def add( logger.info( f"time add: get mem_reader time user_id: {target_user_id} time is: {time.time() - time_start_2}" ) - mem_ids = [] - for mem in memories: - mem_id_list: list[str] = self.mem_cubes[mem_cube_id].text_mem.add(mem) - mem_ids.extend(mem_id_list) - logger.info( - f"Added memory user {target_user_id} to memcube {mem_cube_id}: {mem_id_list}" - ) - + memories_flatten = [m for m_list in memories for m in m_list] + mem_ids: list[str] = self.mem_cubes[mem_cube_id].text_mem.add(memories_flatten) + logger.info( + f"Added memory user {target_user_id} to memcube {mem_cube_id}: {mem_ids}" + ) # submit messages for scheduler if self.enable_mem_scheduler and self.mem_scheduler is not None: mem_cube = self.mem_cubes[mem_cube_id] diff --git a/src/memos/memories/textual/tree_text_memory/organize/manager.py b/src/memos/memories/textual/tree_text_memory/organize/manager.py index e8dc5c250..0abefa19e 100644 --- a/src/memos/memories/textual/tree_text_memory/organize/manager.py +++ b/src/memos/memories/textual/tree_text_memory/organize/manager.py @@ -58,7 +58,7 @@ def add(self, memories: list[TextualMemoryItem]) -> list[str]: """ added_ids: list[str] = [] - with ContextThreadPoolExecutor(max_workers=8) as executor: + with ContextThreadPoolExecutor(max_workers=20) as executor: futures = {executor.submit(self._process_memory, m): m for m in memories} for future in as_completed(futures, timeout=60): try: @@ -109,18 +109,28 @@ def _process_memory(self, memory: TextualMemoryItem): Process and add memory to different memory types (WorkingMemory, LongTermMemory, UserMemory). This method runs asynchronously to process each memory item. """ - ids = [] - - # Add to WorkingMemory - self._add_memory_to_db(memory, "WorkingMemory") + ids: list[str] = [] + futures = [] + + with ContextThreadPoolExecutor(max_workers=2, thread_name_prefix="mem") as ex: + f_working = ex.submit(self._add_memory_to_db, memory, "WorkingMemory") + futures.append(f_working) + + if memory.metadata.memory_type in ("LongTermMemory", "UserMemory"): + f_graph = ex.submit( + self._add_to_graph_memory, + memory=memory, + memory_type=memory.metadata.memory_type, + ) + futures.append(f_graph) - # Add to LongTermMemory and UserMemory - if memory.metadata.memory_type in ["LongTermMemory", "UserMemory"]: - added_id = self._add_to_graph_memory( - memory=memory, - memory_type=memory.metadata.memory_type, - ) - ids.append(added_id) + for fut in as_completed(futures): + try: + res = fut.result() + if isinstance(res, str) and res: + ids.append(res) + except Exception: + logger.warning("Parallel memory processing failed:\n%s", traceback.format_exc()) return ids From 8d2263a1f8e1e5c65e5b9ddc7ea423878bcf90ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Mon, 20 Oct 2025 15:42:47 +0800 Subject: [PATCH 14/32] feat: fix mem-read scheduler --- src/memos/mem_reader/simple_struct.py | 49 ++++-- src/memos/mem_scheduler/base_scheduler.py | 143 ++++++++++++++---- .../mem_scheduler/general_modules/misc.py | 23 ++- src/memos/mem_scheduler/general_scheduler.py | 15 +- .../tree_text_memory/organize/manager.py | 1 - 5 files changed, 182 insertions(+), 49 deletions(-) diff --git a/src/memos/mem_reader/simple_struct.py b/src/memos/mem_reader/simple_struct.py index caf4c19df..5e2ffef59 100644 --- a/src/memos/mem_reader/simple_struct.py +++ b/src/memos/mem_reader/simple_struct.py @@ -264,9 +264,7 @@ def _build_fast_node(w): text = w["text"] roles = {s.get("role", "") for s in w["sources"] if s.get("role")} mem_type = "UserMemory" if roles == {"user"} else "LongTermMemory" - tags = ["mode:fast", f"lang:{detect_lang(text)}"] + [ - f"role:{r}" for r in sorted(roles) - ] + tags = ["mode:fast"] return self._make_memory_item( value=text, info=info, memory_type=mem_type, tags=tags, sources=w["sources"] ) @@ -544,19 +542,42 @@ def _process_doc_data(self, scene_data_info, info, **kwargs): def _process_transfer_doc_data(self, raw_node: TextualMemoryItem): raise NotImplementedError - def parse_json_result(self, response_text): + def parse_json_result(self, response_text: str) -> dict: + s = (response_text or "").strip() + + m = re.search(r"```(?:json)?\s*([\s\S]*?)```", s, flags=re.I) + s = (m.group(1) if m else s.replace("```", "")).strip() + + i = s.find("{") + if i == -1: + return {} + s = s[i:].strip() + + try: + return json.loads(s) + except json.JSONDecodeError: + pass + + j = max(s.rfind("}"), s.rfind("]")) + if j != -1: + try: + return json.loads(s[: j + 1]) + except json.JSONDecodeError: + pass + + def _cheap_close(t: str) -> str: + t += "}" * max(0, t.count("{") - t.count("}")) + t += "]" * max(0, t.count("[") - t.count("]")) + return t + + t = _cheap_close(s) try: - json_start = response_text.find("{") - response_text = response_text[json_start:] - response_text = response_text.replace("```", "").strip() - if not response_text.endswith("}"): - response_text += "}" - return json.loads(response_text) + return json.loads(t) except json.JSONDecodeError as e: - logger.error(f"[JSONParse] Failed to decode JSON: {e}\nRaw:\n{response_text}") - return {} - except Exception as e: - logger.error(f"[JSONParse] Unexpected error: {e}") + logger.error( + f"[JSONParse] Failed to decode JSON: {e}\nTail: Raw {response_text} \ + n{s[-400:]}" + ) return {} def transform_memreader(self, data: dict) -> list[TextualMemoryItem]: diff --git a/src/memos/mem_scheduler/base_scheduler.py b/src/memos/mem_scheduler/base_scheduler.py index 550aee1f6..ab657bdfa 100644 --- a/src/memos/mem_scheduler/base_scheduler.py +++ b/src/memos/mem_scheduler/base_scheduler.py @@ -643,50 +643,135 @@ def _cleanup_queues(self) -> None: except queue.Empty: pass - def mem_scheduler_wait(self, timeout: float = 180.0, poll: float = 0.1) -> bool: + def mem_scheduler_wait( + self, timeout: float = 180.0, poll: float = 0.1, log_every: float = 1.0 + ) -> bool: """ - Block until the scheduler has finished processing all submitted messages. - - Strategy: - 1) Wait for the internal memos_message_queue to drain - - Prefer "unfinished_tasks" if available; otherwise fallback to empty() polling. - 2) If parallel dispatch is enabled, wait for all dispatched futures to complete via dispatcher.join(). - - Args: - timeout: Maximum seconds to wait in total. - poll: Polling interval when falling back to checks. - Returns: - True if drained before timeout, otherwise False. + Uses EWMA throughput, detects leaked `unfinished_tasks`, and waits for dispatcher. """ - deadline = time.time() + timeout + deadline = time.monotonic() + timeout + + # --- helpers (local, no external deps) --- + def _unfinished() -> int: + """Prefer `unfinished_tasks`; fallback to `qsize()`.""" + try: + u = getattr(self.memos_message_queue, "unfinished_tasks", None) + if u is not None: + return int(u) + except Exception: + pass + try: + return int(self.memos_message_queue.qsize()) + except Exception: + return 0 + + def _fmt_eta(seconds: float | None) -> str: + """Format seconds to human-readable string.""" + if seconds is None or seconds != seconds or seconds == float("inf"): + return "unknown" + s = max(0, int(seconds)) + h, s = divmod(s, 3600) + m, s = divmod(s, 60) + if h > 0: + return f"{h:d}h{m:02d}m{s:02d}s" + if m > 0: + return f"{m:d}m{s:02d}s" + return f"{s:d}s" + + # --- EWMA throughput state (tasks/s) --- + alpha = 0.3 + rate = 0.0 + last_t = None # type: float | None + last_done = 0 + + # --- dynamic totals & stuck detection --- + init_unfinished = _unfinished() + done_total = 0 + last_unfinished = None + stuck_ticks = 0 + next_log = 0.0 - # 1) Wait for internal queue to drain while True: + # 1) read counters + curr_unfinished = _unfinished() try: - unfinished = getattr(self.memos_message_queue, "unfinished_tasks", None) - if unfinished is not None: - if int(unfinished) == 0: - break - else: - if self.memos_message_queue.empty(): - break + qsz = int(self.memos_message_queue.qsize()) except Exception: - # Be conservative: if any issue reading metrics, fallback to empty() - if self.memos_message_queue.empty(): - break + qsz = -1 + + pend = run = 0 + stats_fn = getattr(self.dispatcher, "stats", None) + if self.enable_parallel_dispatch and self.dispatcher is not None and callable(stats_fn): + try: + st = ( + stats_fn() + ) # expected: {'pending':int,'running':int,'done':int?,'rate':float?} + pend = int(st.get("pending", 0)) + run = int(st.get("running", 0)) + except Exception: + pass + + # 2) dynamic total (allows new tasks queued while waiting) + total_now = max(init_unfinished, done_total + curr_unfinished) + done_total = max(0, total_now - curr_unfinished) + + # 3) update EWMA throughput + now = time.monotonic() + if last_t is None: + last_t = now + else: + dt = max(1e-6, now - last_t) + dc = max(0, done_total - last_done) + inst = dc / dt + rate = inst if rate == 0.0 else alpha * inst + (1 - alpha) * rate + last_t = now + last_done = done_total + + eta = None if rate <= 1e-9 else (curr_unfinished / rate) + + # 4) progress log (throttled) + if now >= next_log: + print( + f"[mem_scheduler_wait] remaining≈{curr_unfinished} | throughput≈{rate:.2f} msg/s | ETA≈{_fmt_eta(eta)} " + f"| qsize={qsz} pending={pend} running={run}" + ) + next_log = now + max(0.2, log_every) + + # 5) exit / stuck detection + idle_dispatcher = ( + (pend == 0 and run == 0) + if (self.enable_parallel_dispatch and self.dispatcher is not None) + else True + ) + if curr_unfinished == 0: + break + if curr_unfinished > 0 and qsz == 0 and idle_dispatcher: + if last_unfinished == curr_unfinished: + stuck_ticks += 1 + else: + stuck_ticks = 0 + else: + stuck_ticks = 0 + last_unfinished = curr_unfinished + + if stuck_ticks >= 3: + logger.warning( + "mem_scheduler_wait: detected leaked 'unfinished_tasks' -> treating queue as drained" + ) + break - if time.time() >= deadline: + if now >= deadline: logger.warning("mem_scheduler_wait: queue did not drain before timeout") return False + time.sleep(poll) - # 2) Wait for dispatcher futures (if running in parallel mode) - remaining = max(0.0, deadline - time.time()) + # 6) wait dispatcher (second stage) + remaining = max(0.0, deadline - time.monotonic()) if self.enable_parallel_dispatch and self.dispatcher is not None: try: ok = self.dispatcher.join(timeout=remaining if remaining > 0 else 0) except TypeError: - # Some implementations may not accept timeout ok = self.dispatcher.join() if not ok: logger.warning("mem_scheduler_wait: dispatcher did not complete before timeout") diff --git a/src/memos/mem_scheduler/general_modules/misc.py b/src/memos/mem_scheduler/general_modules/misc.py index 3c7116b74..abc41ca36 100644 --- a/src/memos/mem_scheduler/general_modules/misc.py +++ b/src/memos/mem_scheduler/general_modules/misc.py @@ -173,7 +173,9 @@ def put(self, item: T, block: bool = False, timeout: float | None = None) -> Non """Put an item into the queue. If the queue is full, the oldest item will be automatically removed to make space. - This operation is thread-safe. + IMPORTANT: When we drop an item we also call `task_done()` to keep + the internal `unfinished_tasks` counter consistent (the dropped task + will never be processed). Args: item: The item to be put into the queue @@ -184,19 +186,34 @@ def put(self, item: T, block: bool = False, timeout: float | None = None) -> Non # First try non-blocking put super().put(item, block=block, timeout=timeout) except Full: + # Remove oldest item and mark it done to avoid leaking unfinished_tasks with suppress(Empty): - self.get_nowait() # Remove oldest item + _ = self.get_nowait() + # If the removed item had previously incremented unfinished_tasks, + # we must decrement here since it will never be processed. + with suppress(ValueError): + self.task_done() # Retry putting the new item super().put(item, block=block, timeout=timeout) def get_queue_content_without_pop(self) -> list[T]: """Return a copy of the queue's contents without modifying it.""" - return list(self.queue) + # Ensure a consistent snapshot by holding the mutex + with self.mutex: + return list(self.queue) def clear(self) -> None: """Remove all items from the queue. This operation is thread-safe. + IMPORTANT: We also decrement `unfinished_tasks` by the number of + items cleared, since those tasks will never be processed. """ with self.mutex: + dropped = len(self.queue) self.queue.clear() + # Call task_done() outside of the mutex to avoid deadlocks because + # Queue.task_done() acquires the same condition bound to `self.mutex`. + for _ in range(dropped): + with suppress(ValueError): + self.task_done() diff --git a/src/memos/mem_scheduler/general_scheduler.py b/src/memos/mem_scheduler/general_scheduler.py index 13e92715d..688396c54 100644 --- a/src/memos/mem_scheduler/general_scheduler.py +++ b/src/memos/mem_scheduler/general_scheduler.py @@ -1,3 +1,4 @@ +import concurrent.futures import json import traceback @@ -222,7 +223,7 @@ def _add_message_consumer(self, messages: list[ScheduleMessageItem]) -> None: def _mem_read_message_consumer(self, messages: list[ScheduleMessageItem]) -> None: logger.info(f"Messages {messages} assigned to {MEM_READ_LABEL} handler.") - for message in messages: + def process_message(message: ScheduleMessageItem): try: user_id = message.user_id mem_cube_id = message.mem_cube_id @@ -231,6 +232,8 @@ def _mem_read_message_consumer(self, messages: list[ScheduleMessageItem]) -> Non # Parse the memory IDs from content mem_ids = json.loads(content) if isinstance(content, str) else content + if not mem_ids: + return logger.info( f"Processing mem_read for user_id={user_id}, mem_cube_id={mem_cube_id}, mem_ids={mem_ids}" @@ -240,7 +243,7 @@ def _mem_read_message_consumer(self, messages: list[ScheduleMessageItem]) -> Non text_mem = mem_cube.text_mem if not isinstance(text_mem, TreeTextMemory): logger.error(f"Expected TreeTextMemory but got {type(text_mem).__name__}") - continue + return # Use mem_reader to process the memories self._process_memories_with_reader( @@ -258,6 +261,14 @@ def _mem_read_message_consumer(self, messages: list[ScheduleMessageItem]) -> Non except Exception as e: logger.error(f"Error processing mem_read message: {e}", exc_info=True) + with concurrent.futures.ThreadPoolExecutor(max_workers=min(8, len(messages))) as executor: + futures = [executor.submit(process_message, msg) for msg in messages] + for future in concurrent.futures.as_completed(futures): + try: + future.result() + except Exception as e: + logger.error(f"Thread task failed: {e}", exc_info=True) + def _process_memories_with_reader( self, mem_ids: list[str], diff --git a/src/memos/memories/textual/tree_text_memory/organize/manager.py b/src/memos/memories/textual/tree_text_memory/organize/manager.py index 0abefa19e..49d01e841 100644 --- a/src/memos/memories/textual/tree_text_memory/organize/manager.py +++ b/src/memos/memories/textual/tree_text_memory/organize/manager.py @@ -146,7 +146,6 @@ def _add_memory_to_db(self, memory: TextualMemoryItem, memory_type: str) -> str: # Insert node into graph self.graph_store.add_node(working_memory.id, working_memory.memory, metadata) - return working_memory.id def _add_to_graph_memory(self, memory: TextualMemoryItem, memory_type: str): """ From e250ab801e4e415a5e8aa06ea771a7eb243ae160 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Mon, 20 Oct 2025 16:07:54 +0800 Subject: [PATCH 15/32] fix: nebula group-by bug --- src/memos/graph_dbs/nebular.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/memos/graph_dbs/nebular.py b/src/memos/graph_dbs/nebular.py index 38f08ff8d..3eed41b69 100644 --- a/src/memos/graph_dbs/nebular.py +++ b/src/memos/graph_dbs/nebular.py @@ -1144,10 +1144,9 @@ def get_grouped_counts( group_by_fields.append(alias) # Full GQL query construction gql = f""" - MATCH (n) + MATCH (n@Memory /*+ INDEX(idx_memory_user_name) */) {where_clause} RETURN {", ".join(return_fields)}, COUNT(n) AS count - GROUP BY {", ".join(group_by_fields)} """ result = self.execute_query(gql) # Pure GQL string execution From 14e986e893c90320240da4765c819f7042d9e2a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Mon, 20 Oct 2025 16:25:16 +0800 Subject: [PATCH 16/32] fix: bug in adding mem scheduler --- src/memos/mem_scheduler/general_scheduler.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/memos/mem_scheduler/general_scheduler.py b/src/memos/mem_scheduler/general_scheduler.py index 688396c54..e3fe5a7de 100644 --- a/src/memos/mem_scheduler/general_scheduler.py +++ b/src/memos/mem_scheduler/general_scheduler.py @@ -201,7 +201,15 @@ def _add_message_consumer(self, messages: list[ScheduleMessageItem]) -> None: mem_cube = msg.mem_cube for memory_id in userinput_memory_ids: - mem_item: TextualMemoryItem = mem_cube.text_mem.get(memory_id=memory_id) + try: + mem_item: TextualMemoryItem = mem_cube.text_mem.get( + memory_id=memory_id + ) + except Exception: + logger.warning( + f"This MemoryItem {memory_id} has already been deleted." + ) + continue mem_type = mem_item.metadata.memory_type mem_content = mem_item.memory From 31adec0f96246bdeeca692025d4c1d5520d87284 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Tue, 21 Oct 2025 11:24:10 +0800 Subject: [PATCH 17/32] fix: nebula index; mem-reader chat-time; --- src/memos/graph_dbs/nebular.py | 13 ++++++++++--- src/memos/mem_os/core.py | 2 +- src/memos/mem_reader/simple_struct.py | 15 ++++++++------- src/memos/mem_scheduler/base_scheduler.py | 2 +- src/memos/mem_scheduler/general_scheduler.py | 12 ++++++++---- 5 files changed, 28 insertions(+), 16 deletions(-) diff --git a/src/memos/graph_dbs/nebular.py b/src/memos/graph_dbs/nebular.py index 3eed41b69..96151b441 100644 --- a/src/memos/graph_dbs/nebular.py +++ b/src/memos/graph_dbs/nebular.py @@ -445,7 +445,7 @@ def remove_oldest_memory(self, memory_type: str, keep_latest: int) -> None: count = self.count_nodes(memory_type) if count > keep_latest: delete_query = f""" - MATCH (n@Memory /*+ INDEX(idx_memory_user_name) */) + MATCH (n@Memory /*+ INDEX(idx_memory_user_name_memory_type) */) WHERE n.memory_type = '{memory_type}' {optional_condition} ORDER BY n.updated_at DESC @@ -605,7 +605,7 @@ def get_memory_count(self, memory_type: str) -> int: @timed def count_nodes(self, scope: str | None = None) -> int: - query = "MATCH (n@Memory)" + query = "MATCH (n@Memory /*+ INDEX(idx_memory_user_name) */)" conditions = [] if scope: @@ -1584,7 +1584,14 @@ def _create_basic_property_indexes(self) -> None: Create standard B-tree indexes on user_name when use Shared Database Multi-Tenant Mode. """ - fields = ["status", "memory_type", "created_at", "updated_at", "user_name"] + fields = [ + "status", + "memory_type", + "created_at", + "updated_at", + "user_name", + "user_name_memory_type", + ] for field in fields: index_name = f"idx_memory_{field}" diff --git a/src/memos/mem_os/core.py b/src/memos/mem_os/core.py index efb2d0622..71f85fc86 100644 --- a/src/memos/mem_os/core.py +++ b/src/memos/mem_os/core.py @@ -691,7 +691,7 @@ def add( sync_mode = self.mem_cubes[mem_cube_id].text_mem.mode if sync_mode == "async": assert self.mem_scheduler is not None, ( - "Mem-Scheduler must be working when use synchronous memory adding." + "Mem-Scheduler must be working when use asynchronous memory adding." ) logger.debug(f"Mem-reader mode is: {sync_mode}") time_start_1 = time.time() diff --git a/src/memos/mem_reader/simple_struct.py b/src/memos/mem_reader/simple_struct.py index 5e2ffef59..9ec8ca166 100644 --- a/src/memos/mem_reader/simple_struct.py +++ b/src/memos/mem_reader/simple_struct.py @@ -157,7 +157,7 @@ def __init__(self, config: SimpleStructMemReaderConfig): self.chunker = ChunkerFactory.from_config(config.chunker) self.memory_max_length = 8000 # Use token-based windowing; default to ~5000 tokens if not configured - self.chat_window_max_tokens = getattr(self.config, "chat_window_max_tokens", 5000) + self.chat_window_max_tokens = getattr(self.config, "chat_window_max_tokens", 1024) self._count_tokens = _count_tokens_text def _make_memory_item( @@ -229,11 +229,12 @@ def _iter_chat_windows(self, scene_data_info, max_tokens=None, overlap=200): role = item.get("role", "") content = item.get("content", "") chat_time = item.get("chat_time", None) - prefix = ( - f"{role}: " - if (role and role != "mix") - else (f"[{chat_time}]: " if chat_time else "") - ) + parts = [] + if role and str(role).lower() != "mix": + parts.append(f"{role}: ") + if chat_time: + parts.append(f"[{chat_time}]: ") + prefix = "".join(parts) line = f"{prefix}{content}\n" if self._count_tokens(cur_text + line) > max_tokens and cur_text: @@ -576,7 +577,7 @@ def _cheap_close(t: str) -> str: except json.JSONDecodeError as e: logger.error( f"[JSONParse] Failed to decode JSON: {e}\nTail: Raw {response_text} \ - n{s[-400:]}" + json: {s}" ) return {} diff --git a/src/memos/mem_scheduler/base_scheduler.py b/src/memos/mem_scheduler/base_scheduler.py index ab657bdfa..9ec976405 100644 --- a/src/memos/mem_scheduler/base_scheduler.py +++ b/src/memos/mem_scheduler/base_scheduler.py @@ -644,7 +644,7 @@ def _cleanup_queues(self) -> None: pass def mem_scheduler_wait( - self, timeout: float = 180.0, poll: float = 0.1, log_every: float = 1.0 + self, timeout: float = 180.0, poll: float = 0.1, log_every: float = 0.01 ) -> bool: """ Uses EWMA throughput, detects leaked `unfinished_tasks`, and waits for dispatcher. diff --git a/src/memos/mem_scheduler/general_scheduler.py b/src/memos/mem_scheduler/general_scheduler.py index e3fe5a7de..5c6152b32 100644 --- a/src/memos/mem_scheduler/general_scheduler.py +++ b/src/memos/mem_scheduler/general_scheduler.py @@ -321,10 +321,14 @@ def _process_memories_with_reader( logger.info(f"Processing {len(memory_items)} memories with mem_reader") # Extract memories using mem_reader - processed_memories = self.mem_reader.fine_transfer_simple_mem( - memory_items, - type="chat", - ) + try: + processed_memories = self.mem_reader.fine_transfer_simple_mem( + memory_items, + type="chat", + ) + except Exception as e: + logger.warning(f"{e}: Fail to transfer mem: {memory_items}") + processed_memories = [] if processed_memories and len(processed_memories) > 0: # Flatten the results (mem_reader returns list of lists) From 18f3cc8b49168d626e7b8773bd174689c99ab166 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Tue, 21 Oct 2025 11:36:10 +0800 Subject: [PATCH 18/32] format: searcher --- .../memories/textual/tree_text_memory/retrieve/searcher.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py b/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py index 9263c34dc..96c6c97f1 100644 --- a/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py +++ b/src/memos/memories/textual/tree_text_memory/retrieve/searcher.py @@ -134,7 +134,11 @@ def _parse_task( related_nodes = [ self.graph_store.get_node(n["id"]) for n in self.graph_store.search_by_embedding( - query_embedding, top_k=top_k, status="activated", search_filter=search_filter, user_name=user_name + query_embedding, + top_k=top_k, + status="activated", + search_filter=search_filter, + user_name=user_name, ) ] memories = [] From 4e0133e57c7fdb0e55c8c633a81e80eacad379f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Tue, 21 Oct 2025 16:13:31 +0800 Subject: [PATCH 19/32] fix: some bug in shceduler and mem-reader --- src/memos/graph_dbs/nebular.py | 3 +-- src/memos/mem_reader/simple_struct.py | 3 +++ src/memos/mem_scheduler/general_scheduler.py | 22 ++-------------- src/memos/memories/textual/tree.py | 2 +- .../tree_text_memory/organize/manager.py | 25 +++++++++++-------- 5 files changed, 21 insertions(+), 34 deletions(-) diff --git a/src/memos/graph_dbs/nebular.py b/src/memos/graph_dbs/nebular.py index 9f0b38635..12b493e58 100644 --- a/src/memos/graph_dbs/nebular.py +++ b/src/memos/graph_dbs/nebular.py @@ -446,7 +446,7 @@ def remove_oldest_memory( count = self.count_nodes(memory_type, user_name) if count > keep_latest: delete_query = f""" - MATCH (n@Memory /*+ INDEX(idx_memory_user_name_memory_type) */) + MATCH (n@Memory /*+ INDEX(idx_memory_user_name) */) WHERE n.memory_type = '{memory_type}' {optional_condition} ORDER BY n.updated_at DESC @@ -1627,7 +1627,6 @@ def _create_basic_property_indexes(self) -> None: "created_at", "updated_at", "user_name", - "user_name_memory_type", ] for field in fields: diff --git a/src/memos/mem_reader/simple_struct.py b/src/memos/mem_reader/simple_struct.py index 9ec8ca166..9f5eb9832 100644 --- a/src/memos/mem_reader/simple_struct.py +++ b/src/memos/mem_reader/simple_struct.py @@ -575,6 +575,9 @@ def _cheap_close(t: str) -> str: try: return json.loads(t) except json.JSONDecodeError as e: + if "Invalid \\escape" in str(e): + s = s.replace("\\", "\\\\") + return json.loads(s) logger.error( f"[JSONParse] Failed to decode JSON: {e}\nTail: Raw {response_text} \ json: {s}" diff --git a/src/memos/mem_scheduler/general_scheduler.py b/src/memos/mem_scheduler/general_scheduler.py index e1c940290..e25858dc7 100644 --- a/src/memos/mem_scheduler/general_scheduler.py +++ b/src/memos/mem_scheduler/general_scheduler.py @@ -184,7 +184,7 @@ def _answer_message_consumer(self, messages: list[ScheduleMessageItem]) -> None: def _add_message_consumer(self, messages: list[ScheduleMessageItem]) -> None: logger.info(f"Messages {messages} assigned to {ADD_LABEL} handler.") # Process the query in a session turn - grouped_messages = self.dispatcher.group_messages_by_user_and_cube(messages=messages) + grouped_messages = self.dispatcher._group_messages_by_user_and_mem_cube(messages=messages) self.validate_schedule_messages(messages=messages, label=ADD_LABEL) try: @@ -364,6 +364,7 @@ def _process_memories_with_reader( logger.info("Delete raw mem_ids") text_mem.memory_manager.remove_and_refresh_memory() logger.info("Remove and Refresh Memories") + logger.debug(f"Finished add {user_id} memory: {mem_ids}") except Exception: logger.error( @@ -384,25 +385,6 @@ def _trigger_memory_reorganization( user_id: User ID mem_cube_id: Memory cube ID """ - try: - # Check if reorganization is enabled - if hasattr(text_mem, "is_reorganize") and text_mem.is_reorganize: - logger.info( - f"Triggering memory reorganization for user_id={user_id}, mem_cube_id={mem_cube_id}" - ) - - # Get current working memory size - current_sizes = text_mem.get_current_memory_size() - logger.info(f"Current memory sizes: {current_sizes}") - - # The reorganization will be handled by the memory manager - # This is just a trigger point for logging and monitoring - logger.info("Memory reorganization triggered successfully") - else: - logger.info("Memory reorganization is disabled, skipping reorganization trigger") - - except Exception as e: - logger.error(f"Error in _trigger_memory_reorganization: {e}", exc_info=True) def process_session_turn( self, diff --git a/src/memos/memories/textual/tree.py b/src/memos/memories/textual/tree.py index ea087eac9..23244d98e 100644 --- a/src/memos/memories/textual/tree.py +++ b/src/memos/memories/textual/tree.py @@ -89,7 +89,7 @@ def add(self, memories: list[TextualMemoryItem | dict[str, Any]]) -> list[str]: Args: memories: List of TextualMemoryItem objects or dictionaries to add. """ - return self.memory_manager.add(memories) + return self.memory_manager.add(memories, mode=self.mode) def replace_working_memory(self, memories: list[TextualMemoryItem]) -> None: self.memory_manager.replace_working_memory(memories) diff --git a/src/memos/memories/textual/tree_text_memory/organize/manager.py b/src/memos/memories/textual/tree_text_memory/organize/manager.py index d014dcd47..54776134b 100644 --- a/src/memos/memories/textual/tree_text_memory/organize/manager.py +++ b/src/memos/memories/textual/tree_text_memory/organize/manager.py @@ -52,7 +52,9 @@ def __init__( ) self._merged_threshold = merged_threshold - def add(self, memories: list[TextualMemoryItem], user_name: str | None = None) -> list[str]: + def add( + self, memories: list[TextualMemoryItem], user_name: str | None = None, mode: str = "sync" + ) -> list[str]: """ Add new memories in parallel to different memory types. """ @@ -67,17 +69,18 @@ def add(self, memories: list[TextualMemoryItem], user_name: str | None = None) - except Exception as e: logger.exception("Memory processing error: ", exc_info=e) - for mem_type in ["WorkingMemory", "LongTermMemory", "UserMemory"]: - try: - self.graph_store.remove_oldest_memory( - memory_type="WorkingMemory", - keep_latest=self.memory_size[mem_type], - user_name=user_name, - ) - except Exception: - logger.warning(f"Remove {mem_type} error: {traceback.format_exc()}") + if mode == "sync": + for mem_type in ["WorkingMemory", "LongTermMemory", "UserMemory"]: + try: + self.graph_store.remove_oldest_memory( + memory_type="WorkingMemory", + keep_latest=self.memory_size[mem_type], + user_name=user_name, + ) + except Exception: + logger.warning(f"Remove {mem_type} error: {traceback.format_exc()}") - self._refresh_memory_size(user_name=user_name) + self._refresh_memory_size(user_name=user_name) return added_ids def replace_working_memory( From 6653beafc55c8d8dd9ccd1c71735e33d32d66226 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Tue, 21 Oct 2025 16:57:36 +0800 Subject: [PATCH 20/32] feat: add mem-organize in scheduler --- src/memos/mem_scheduler/general_scheduler.py | 100 ++++++++++++++++-- .../mem_scheduler/schemas/general_schemas.py | 1 + 2 files changed, 92 insertions(+), 9 deletions(-) diff --git a/src/memos/mem_scheduler/general_scheduler.py b/src/memos/mem_scheduler/general_scheduler.py index e25858dc7..f47cc0cc5 100644 --- a/src/memos/mem_scheduler/general_scheduler.py +++ b/src/memos/mem_scheduler/general_scheduler.py @@ -10,6 +10,7 @@ ADD_LABEL, ANSWER_LABEL, DEFAULT_MAX_QUERY_KEY_WORDS, + MEM_ORGANIZE_LABEL, MEM_READ_LABEL, QUERY_LABEL, WORKING_MEMORY_TYPE, @@ -38,6 +39,7 @@ def __init__(self, config: GeneralSchedulerConfig): ANSWER_LABEL: self._answer_message_consumer, ADD_LABEL: self._add_message_consumer, MEM_READ_LABEL: self._mem_read_message_consumer, + MEM_ORGANIZE_LABEL: self._mem_reorganize_message_consumer, } self.dispatcher.register_handlers(handlers) @@ -350,11 +352,6 @@ def _process_memories_with_reader( logger.info( f"Added {len(enhanced_mem_ids)} enhanced memories: {enhanced_mem_ids}" ) - - # Trigger memory reorganization if needed - self._trigger_memory_reorganization( - text_mem=text_mem, user_id=user_id, mem_cube_id=mem_cube_id - ) else: logger.info("No enhanced memories generated by mem_reader") else: @@ -371,20 +368,105 @@ def _process_memories_with_reader( f"Error in _process_memories_with_reader: {traceback.format_exc()}", exc_info=True ) - def _trigger_memory_reorganization( + def _mem_reorganize_message_consumer(self, messages: list[ScheduleMessageItem]) -> None: + logger.info(f"Messages {messages} assigned to {MEM_READ_LABEL} handler.") + + def process_message(message: ScheduleMessageItem): + try: + user_id = message.user_id + mem_cube_id = message.mem_cube_id + mem_cube = message.mem_cube + content = message.content + + # Parse the memory IDs from content + mem_ids = json.loads(content) if isinstance(content, str) else content + if not mem_ids: + return + + logger.info( + f"Processing mem_read for user_id={user_id}, mem_cube_id={mem_cube_id}, mem_ids={mem_ids}" + ) + + # Get the text memory from the mem_cube + text_mem = mem_cube.text_mem + if not isinstance(text_mem, TreeTextMemory): + logger.error(f"Expected TreeTextMemory but got {type(text_mem).__name__}") + return + + # Use mem_reader to process the memories + self._process_memories_with_reorganize( + mem_ids=mem_ids, + user_id=user_id, + mem_cube_id=mem_cube_id, + mem_cube=mem_cube, + text_mem=text_mem, + ) + + logger.info( + f"Successfully processed mem_read for user_id={user_id}, mem_cube_id={mem_cube_id}" + ) + + except Exception as e: + logger.error(f"Error processing mem_read message: {e}", exc_info=True) + + with concurrent.futures.ThreadPoolExecutor(max_workers=min(8, len(messages))) as executor: + futures = [executor.submit(process_message, msg) for msg in messages] + for future in concurrent.futures.as_completed(futures): + try: + future.result() + except Exception as e: + logger.error(f"Thread task failed: {e}", exc_info=True) + + def _process_memories_with_reorganize( self, - text_mem: TreeTextMemory, + mem_ids: list[str], user_id: str, mem_cube_id: str, + mem_cube: GeneralMemCube, + text_mem: TreeTextMemory, ) -> None: """ - Trigger memory reorganization after enhanced processing. + Process memories using mem_reorganize for enhanced memory processing. Args: - text_mem: Text memory instance + mem_ids: List of memory IDs to process user_id: User ID mem_cube_id: Memory cube ID + mem_cube: Memory cube instance + text_mem: Text memory instance """ + try: + # Get the mem_reader from the parent MOSCore + if not hasattr(self, "mem_reader") or self.mem_reader is None: + logger.warning( + "mem_reader not available in scheduler, skipping enhanced processing" + ) + return + + # Get the original memory items + memory_items = [] + for mem_id in mem_ids: + try: + memory_item = text_mem.get(mem_id) + memory_items.append(memory_item) + except Exception as e: + logger.warning(f"Failed to get memory {mem_id}: {e}") + continue + + if not memory_items: + logger.warning("No valid memory items found for processing") + return + + # Use mem_reader to process the memories + logger.info(f"Processing {len(memory_items)} memories with mem_reader") + text_mem.memory_manager.remove_and_refresh_memory() + logger.info("Remove and Refresh Memories") + logger.debug(f"Finished add {user_id} memory: {mem_ids}") + + except Exception: + logger.error( + f"Error in _process_memories_with_reader: {traceback.format_exc()}", exc_info=True + ) def process_session_turn( self, diff --git a/src/memos/mem_scheduler/schemas/general_schemas.py b/src/memos/mem_scheduler/schemas/general_schemas.py index 6599a012e..248c42e80 100644 --- a/src/memos/mem_scheduler/schemas/general_schemas.py +++ b/src/memos/mem_scheduler/schemas/general_schemas.py @@ -9,6 +9,7 @@ ANSWER_LABEL = "answer" ADD_LABEL = "add" MEM_READ_LABEL = "mem_read" +MEM_ORGANIZE_LABEL = "mem_organize" TreeTextMemory_SEARCH_METHOD = "tree_text_memory_search" TreeTextMemory_FINE_SEARCH_METHOD = "tree_text_memory_fine_search" From af5c940abbb737c91288b5ea60578a49dfbf02af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Tue, 21 Oct 2025 20:16:52 +0800 Subject: [PATCH 21/32] feat: add tree.mode to config; modify scheduler config --- src/memos/configs/mem_scheduler.py | 2 -- src/memos/configs/memory.py | 5 +++++ src/memos/mem_scheduler/base_scheduler.py | 2 +- src/memos/memories/textual/tree.py | 5 +---- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/memos/configs/mem_scheduler.py b/src/memos/configs/mem_scheduler.py index 39586081c..2d6155ec2 100644 --- a/src/memos/configs/mem_scheduler.py +++ b/src/memos/configs/mem_scheduler.py @@ -28,13 +28,11 @@ class BaseSchedulerConfig(BaseConfig): thread_pool_max_workers: int = Field( default=DEFAULT_THREAD_POOL_MAX_WORKERS, gt=1, - lt=20, description=f"Maximum worker threads in pool (default: {DEFAULT_THREAD_POOL_MAX_WORKERS})", ) consume_interval_seconds: float = Field( default=DEFAULT_CONSUME_INTERVAL_SECONDS, gt=0, - le=60, description=f"Interval for consuming messages from queue in seconds (default: {DEFAULT_CONSUME_INTERVAL_SECONDS})", ) auth_config_path: str | None = Field( diff --git a/src/memos/configs/memory.py b/src/memos/configs/memory.py index 237450e15..2c3a715f7 100644 --- a/src/memos/configs/memory.py +++ b/src/memos/configs/memory.py @@ -179,6 +179,11 @@ class TreeTextMemoryConfig(BaseTextMemoryConfig): ), ) + mode: str | None = Field( + default="sync", + description=("whether use asynchronous mode in memory add"), + ) + class SimpleTreeTextMemoryConfig(TreeTextMemoryConfig): """Simple tree text memory configuration class.""" diff --git a/src/memos/mem_scheduler/base_scheduler.py b/src/memos/mem_scheduler/base_scheduler.py index 4950f87bb..1e8b042b1 100644 --- a/src/memos/mem_scheduler/base_scheduler.py +++ b/src/memos/mem_scheduler/base_scheduler.py @@ -88,7 +88,7 @@ def __init__(self, config: BaseSchedulerConfig): # internal message queue self.max_internal_message_queue_size = self.config.get( - "max_internal_message_queue_size", 100 + "max_internal_message_queue_size", 10000 ) self.memos_message_queue: Queue[ScheduleMessageItem] = Queue( maxsize=self.max_internal_message_queue_size diff --git a/src/memos/memories/textual/tree.py b/src/memos/memories/textual/tree.py index 23244d98e..74a52eb8e 100644 --- a/src/memos/memories/textual/tree.py +++ b/src/memos/memories/textual/tree.py @@ -30,13 +30,10 @@ class TreeTextMemory(BaseTextMemory): """General textual memory implementation for storing and retrieving memories.""" - # Override the default mode to async for TreeTextMemory - mode: str = "async" - def __init__(self, config: TreeTextMemoryConfig): """Initialize memory with the given configuration.""" # Set mode from class default or override if needed - self.mode = getattr(self.__class__, "mode", "async") + self.mode = config.mode self.config: TreeTextMemoryConfig = config self.extractor_llm: OpenAILLM | OllamaLLM | AzureLLM = LLMFactory.from_config( config.extractor_llm From 28a20e98060c86ab76161ef86398896d80164e9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Tue, 21 Oct 2025 20:40:26 +0800 Subject: [PATCH 22/32] fix: test bug --- src/memos/memories/textual/tree.py | 2 +- tests/memories/textual/test_tree.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/memos/memories/textual/tree.py b/src/memos/memories/textual/tree.py index 74a52eb8e..fccd83fa6 100644 --- a/src/memos/memories/textual/tree.py +++ b/src/memos/memories/textual/tree.py @@ -81,7 +81,7 @@ def __init__(self, config: TreeTextMemoryConfig): else: logger.info("No internet retriever configured") - def add(self, memories: list[TextualMemoryItem | dict[str, Any]]) -> list[str]: + def add(self, memories: list[TextualMemoryItem | dict[str, Any]], **kwargs) -> list[str]: """Add memories. Args: memories: List of TextualMemoryItem objects or dictionaries to add. diff --git a/tests/memories/textual/test_tree.py b/tests/memories/textual/test_tree.py index f3e662992..772a79d78 100644 --- a/tests/memories/textual/test_tree.py +++ b/tests/memories/textual/test_tree.py @@ -66,7 +66,7 @@ def test_add_calls_manager(mock_tree_text_memory): metadata=TreeNodeTextualMemoryMetadata(updated_at=None), ) mock_tree_text_memory.add([mock_item]) - mock_tree_text_memory.memory_manager.add.assert_called_once() + mock_tree_text_memory.memory_manager.add.assert_called_once_with([mock_item], mode="sync") def test_get_working_memory_sorted(mock_tree_text_memory): @@ -161,4 +161,4 @@ def test_add_returns_ids(mock_tree_text_memory): result = mock_tree_text_memory.add(mock_items) assert result == dummy_ids - mock_tree_text_memory.memory_manager.add.assert_called_once_with(mock_items) + mock_tree_text_memory.memory_manager.add.assert_called_once_with(mock_items, mode="sync") From d00a55377ceaf194937450a2f6e6de9ea1998bd0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Wed, 22 Oct 2025 17:56:07 +0800 Subject: [PATCH 23/32] feat: add organize handler and submit reorganize scheduler --- src/memos/mem_os/core.py | 12 ++++++++ src/memos/mem_scheduler/general_scheduler.py | 30 ++++++++++++++++++-- 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/src/memos/mem_os/core.py b/src/memos/mem_os/core.py index 0010897c0..11095accd 100644 --- a/src/memos/mem_os/core.py +++ b/src/memos/mem_os/core.py @@ -17,6 +17,7 @@ from memos.mem_scheduler.schemas.general_schemas import ( ADD_LABEL, ANSWER_LABEL, + MEM_ORGANIZE_LABEL, MEM_READ_LABEL, QUERY_LABEL, ) @@ -740,6 +741,17 @@ def add( ) self.mem_scheduler.submit_messages(messages=[message_item]) + elif sync_mode == "sync": + message_item = ScheduleMessageItem( + user_id=user_id, + mem_cube_id=mem_cube_id, + mem_cube=mem_cube, + label=MEM_ORGANIZE_LABEL, + content=json.dumps(mem_ids), + timestamp=datetime.utcnow(), + ) + self.mem_scheduler.submit_messages(messages=[message_item]) + message_item = ScheduleMessageItem( user_id=target_user_id, mem_cube_id=mem_cube_id, diff --git a/src/memos/mem_scheduler/general_scheduler.py b/src/memos/mem_scheduler/general_scheduler.py index f47cc0cc5..6111c916b 100644 --- a/src/memos/mem_scheduler/general_scheduler.py +++ b/src/memos/mem_scheduler/general_scheduler.py @@ -2,6 +2,8 @@ import json import traceback +from datetime import datetime + from memos.configs.mem_scheduler import GeneralSchedulerConfig from memos.log import get_logger from memos.mem_cube.general import GeneralMemCube @@ -43,6 +45,11 @@ def __init__(self, config: GeneralSchedulerConfig): } self.dispatcher.register_handlers(handlers) + # Lazy-initialize reorganize state only if organize handler is enabled + if handlers.get(MEM_ORGANIZE_LABEL): + self._reorg_state = {} + self._reorg_locks = {} + def long_memory_update_process( self, user_id: str, mem_cube_id: str, messages: list[ScheduleMessageItem] ): @@ -247,7 +254,7 @@ def process_message(message: ScheduleMessageItem): content = message.content # Parse the memory IDs from content - mem_ids = json.loads(content) if isinstance(content, str) else content + mem_ids = json.loads(content) if not mem_ids: return @@ -352,6 +359,25 @@ def _process_memories_with_reader( logger.info( f"Added {len(enhanced_mem_ids)} enhanced memories: {enhanced_mem_ids}" ) + # Trigger organize only when we really added new nodes + try: + if "handlers" in dir(self.dispatcher) and MEM_ORGANIZE_LABEL not in getattr( + self.dispatcher, "handlers", {} + ): + # Dispatcher exists but organize not enabled; skip enqueue. + pass + else: + message_item = ScheduleMessageItem( + user_id=user_id, + mem_cube_id=mem_cube_id, + mem_cube=mem_cube, + label=MEM_ORGANIZE_LABEL, + content=json.dumps(enhanced_mem_ids), + timestamp=datetime.utcnow(), + ) + self.submit_messages(messages=[message_item]) + except Exception as e: + logger.error(f"Failed to enqueue MEM_ORGANIZE task: {e}", exc_info=True) else: logger.info("No enhanced memories generated by mem_reader") else: @@ -369,7 +395,7 @@ def _process_memories_with_reader( ) def _mem_reorganize_message_consumer(self, messages: list[ScheduleMessageItem]) -> None: - logger.info(f"Messages {messages} assigned to {MEM_READ_LABEL} handler.") + logger.info(f"Messages {messages} assigned to {MEM_ORGANIZE_LABEL} handler.") def process_message(message: ScheduleMessageItem): try: From 1f735c541dfa65374b03e92eee17c59ad35a1cfe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Wed, 22 Oct 2025 21:02:29 +0800 Subject: [PATCH 24/32] feat: move all async organization modules in scheduler; add user_name in all reorganize class so that reorganization module could be used in singleton; Remove useless function in manager and related test scripts; --- src/memos/mem_scheduler/general_scheduler.py | 121 ++++-- .../tree_text_memory/organize/manager.py | 109 +---- .../tree_text_memory/organize/reorganizer.py | 381 ++++++------------ tests/memories/textual/test_tree_manager.py | 31 -- 4 files changed, 210 insertions(+), 432 deletions(-) diff --git a/src/memos/mem_scheduler/general_scheduler.py b/src/memos/mem_scheduler/general_scheduler.py index 6111c916b..1505be79b 100644 --- a/src/memos/mem_scheduler/general_scheduler.py +++ b/src/memos/mem_scheduler/general_scheduler.py @@ -1,5 +1,6 @@ import concurrent.futures import json +import threading import traceback from datetime import datetime @@ -397,51 +398,107 @@ def _process_memories_with_reader( def _mem_reorganize_message_consumer(self, messages: list[ScheduleMessageItem]) -> None: logger.info(f"Messages {messages} assigned to {MEM_ORGANIZE_LABEL} handler.") - def process_message(message: ScheduleMessageItem): + # Group by cube; we only trigger once per cube per batch. + grouped_by_cube: dict[str, GeneralMemCube] = {} + for msg in messages: try: - user_id = message.user_id - mem_cube_id = message.mem_cube_id - mem_cube = message.mem_cube - content = message.content + if msg.mem_cube_id and msg.mem_cube: + grouped_by_cube[msg.mem_cube_id] = msg.mem_cube + except Exception: + continue - # Parse the memory IDs from content - mem_ids = json.loads(content) if isinstance(content, str) else content - if not mem_ids: - return + if not grouped_by_cube: + logger.debug("[Reorganize] No valid mem_cube in messages; skip.") + return - logger.info( - f"Processing mem_read for user_id={user_id}, mem_cube_id={mem_cube_id}, mem_ids={mem_ids}" + # Fire reorganize in parallel across different cubes; each cube is single-flight via lock. + with concurrent.futures.ThreadPoolExecutor( + max_workers=min(8, len(grouped_by_cube)) + ) as executor: + futures = [] + for mem_cube_id, mem_cube in grouped_by_cube.items(): + futures.append( + executor.submit(self._run_reorganize_singleflight, mem_cube, mem_cube_id, None) ) - # Get the text memory from the mem_cube - text_mem = mem_cube.text_mem - if not isinstance(text_mem, TreeTextMemory): - logger.error(f"Expected TreeTextMemory but got {type(text_mem).__name__}") - return + for f in concurrent.futures.as_completed(futures): + try: + f.result() + except Exception as e: + logger.error(f"[Reorganize] Task failed: {e}", exc_info=True) - # Use mem_reader to process the memories - self._process_memories_with_reorganize( - mem_ids=mem_ids, - user_id=user_id, - mem_cube_id=mem_cube_id, - mem_cube=mem_cube, - text_mem=text_mem, + def _get_reorg_lock(self, mem_cube_id: str) -> threading.Lock: + """ + Return a per-cube lock; lazily create it only when MEM_ORGANIZE is enabled. + """ + # If organize handler is disabled, this dict may not exist; guard it. + if not hasattr(self, "_reorg_locks"): + self._reorg_locks = {} + lock = self._reorg_locks.get(mem_cube_id) + if lock is None: + lock = threading.Lock() + self._reorg_locks[mem_cube_id] = lock + return lock + + def _run_reorganize_singleflight( + self, + mem_cube: GeneralMemCube, + mem_cube_id: str, + scopes: list[str] | None = None, + ) -> None: + """ + Run one reorganize pass for a mem_cube ensuring single-flight per cube. + If `scopes` is None, run both LongTermMemory and UserMemory (safe default). + """ + lock = self._get_reorg_lock(mem_cube_id) + if not lock.acquire(blocking=False): + logger.info( + f"[Reorganize] Another task is already running for mem_cube_id={mem_cube_id}; skipping this trigger." + ) + return + + try: + text_mem = mem_cube.text_mem + if not isinstance(text_mem, TreeTextMemory): + logger.error( + f"[Reorganize] Expected TreeTextMemory but got {type(text_mem).__name__} for mem_cube_id={mem_cube_id}" ) + return - logger.info( - f"Successfully processed mem_read for user_id={user_id}, mem_cube_id={mem_cube_id}" + # Fetch reorganizer from the attached memory manager + reorganizer = text_mem.memory_manager.reorganizer + if not reorganizer or not getattr(reorganizer, "is_reorganize", True): + logger.debug( + f"[Reorganize] Reorganizer disabled or missing for mem_cube_id={mem_cube_id}; skip." ) + return - except Exception as e: - logger.error(f"Error processing mem_read message: {e}", exc_info=True) + # Optional: also respect internal optimizing flags if present + try: + if any(getattr(reorganizer, "_is_optimizing", {}).values()): + logger.debug( + f"[Reorganize] Reorganizer busy (internal flag) for mem_cube_id={mem_cube_id}; skip." + ) + return + except Exception: + # If structure differs, just proceed; locking still guarantees single-flight per cube. + pass - with concurrent.futures.ThreadPoolExecutor(max_workers=min(8, len(messages))) as executor: - futures = [executor.submit(process_message, msg) for msg in messages] - for future in concurrent.futures.as_completed(futures): + run_scopes = scopes or ["LongTermMemory", "UserMemory"] + for scope in run_scopes: try: - future.result() + logger.info( + f"[Reorganize] Start optimize_structure(scope={scope}) for mem_cube_id={mem_cube_id}" + ) + reorganizer.optimize_structure(scope=scope) except Exception as e: - logger.error(f"Thread task failed: {e}", exc_info=True) + logger.warning( + f"[Reorganize] optimize_structure failed for scope={scope}, mem_cube_id={mem_cube_id}: {e}", + exc_info=True, + ) + + finally: + lock.release() def _process_memories_with_reorganize( self, diff --git a/src/memos/memories/textual/tree_text_memory/organize/manager.py b/src/memos/memories/textual/tree_text_memory/organize/manager.py index 54776134b..776f71765 100644 --- a/src/memos/memories/textual/tree_text_memory/organize/manager.py +++ b/src/memos/memories/textual/tree_text_memory/organize/manager.py @@ -9,11 +9,8 @@ from memos.graph_dbs.neo4j import Neo4jGraphDB from memos.llms.factory import AzureLLM, OllamaLLM, OpenAILLM from memos.log import get_logger -from memos.memories.textual.item import TextualMemoryItem, TreeNodeTextualMemoryMetadata -from memos.memories.textual.tree_text_memory.organize.reorganizer import ( - GraphStructureReorganizer, - QueueMessage, -) +from memos.memories.textual.item import TextualMemoryItem +from memos.memories.textual.tree_text_memory.organize.reorganizer import GraphStructureReorganizer logger = get_logger(__name__) @@ -27,7 +24,6 @@ def __init__( llm: OpenAILLM | OllamaLLM | AzureLLM, memory_size: dict | None = None, threshold: float | None = 0.80, - merged_threshold: float | None = 0.92, is_reorganize: bool = False, ): self.graph_store = graph_store @@ -50,7 +46,6 @@ def __init__( self.reorganizer = GraphStructureReorganizer( graph_store, llm, embedder, is_reorganize=is_reorganize ) - self._merged_threshold = merged_threshold def add( self, memories: list[TextualMemoryItem], user_name: str | None = None, mode: str = "sync" @@ -194,92 +189,6 @@ def _add_to_graph_memory( memory.metadata.model_dump(exclude_none=True), user_name=user_name, ) - self.reorganizer.add_message( - QueueMessage( - op="add", - after_node=[node_id], - ) - ) - return node_id - - def _inherit_edges(self, from_id: str, to_id: str) -> None: - """ - Migrate all non-lineage edges from `from_id` to `to_id`, - and remove them from `from_id` after copying. - """ - edges = self.graph_store.get_edges(from_id, type="ANY", direction="ANY") - - for edge in edges: - if edge["type"] == "MERGED_TO": - continue # Keep lineage edges - - new_from = to_id if edge["from"] == from_id else edge["from"] - new_to = to_id if edge["to"] == from_id else edge["to"] - - if new_from == new_to: - continue - - # Add edge to merged node if it doesn't already exist - if not self.graph_store.edge_exists(new_from, new_to, edge["type"], direction="ANY"): - self.graph_store.add_edge(new_from, new_to, edge["type"]) - - # Remove original edge if it involved the archived node - self.graph_store.delete_edge(edge["from"], edge["to"], edge["type"]) - - def _ensure_structure_path( - self, memory_type: str, metadata: TreeNodeTextualMemoryMetadata - ) -> str: - """ - Ensure structural path exists (ROOT → ... → final node), return last node ID. - - Args: - path: like ["hobby", "photography"] - - Returns: - Final node ID of the structure path. - """ - # Step 1: Try to find an existing memory node with content == tag - existing = self.graph_store.get_by_metadata( - [ - {"field": "memory", "op": "=", "value": metadata.key}, - {"field": "memory_type", "op": "=", "value": memory_type}, - ] - ) - if existing: - node_id = existing[0] # Use the first match - else: - # Step 2: If not found, create a new structure node - new_node = TextualMemoryItem( - memory=metadata.key, - metadata=TreeNodeTextualMemoryMetadata( - user_id=metadata.user_id, - session_id=metadata.session_id, - memory_type=memory_type, - status="activated", - tags=[], - key=metadata.key, - embedding=self.embedder.embed([metadata.key])[0], - usage=[], - sources=[], - confidence=0.99, - background="", - ), - ) - self.graph_store.add_node( - id=new_node.id, - memory=new_node.memory, - metadata=new_node.metadata.model_dump(exclude_none=True), - ) - self.reorganizer.add_message( - QueueMessage( - op="add", - after_node=[new_node.id], - ) - ) - - node_id = new_node.id - - # Step 3: Return this structure node ID as the parent_id return node_id def remove_and_refresh_memory(self): @@ -306,17 +215,3 @@ def _cleanup_memories_if_needed(self) -> None: logger.debug(f"Cleaned up {memory_type}: {current_count} -> {limit}") except Exception: logger.warning(f"Remove {memory_type} error: {traceback.format_exc()}") - - def wait_reorganizer(self): - """ - Wait for the reorganizer to finish processing all messages. - """ - logger.debug("Waiting for reorganizer to finish processing messages...") - self.reorganizer.wait_until_current_task_done() - - def close(self): - self.wait_reorganizer() - self.reorganizer.stop() - - def __del__(self): - self.close() diff --git a/src/memos/memories/textual/tree_text_memory/organize/reorganizer.py b/src/memos/memories/textual/tree_text_memory/organize/reorganizer.py index 0337225d1..1a2568a3c 100644 --- a/src/memos/memories/textual/tree_text_memory/organize/reorganizer.py +++ b/src/memos/memories/textual/tree_text_memory/organize/reorganizer.py @@ -1,24 +1,20 @@ import json -import threading import time import traceback from collections import defaultdict from concurrent.futures import as_completed -from queue import PriorityQueue -from typing import Literal import numpy as np from memos.context.context import ContextThreadPoolExecutor from memos.dependency import require_python_package from memos.embedders.factory import OllamaEmbedder -from memos.graph_dbs.item import GraphDBEdge, GraphDBNode +from memos.graph_dbs.item import GraphDBNode from memos.graph_dbs.neo4j import Neo4jGraphDB from memos.llms.base import BaseLLM from memos.log import get_logger from memos.memories.textual.item import SourceMessage, TreeNodeTextualMemoryMetadata -from memos.memories.textual.tree_text_memory.organize.handler import NodeHandler from memos.memories.textual.tree_text_memory.organize.relation_reason_detector import ( RelationAndReasoningDetector, ) @@ -44,30 +40,6 @@ def build_summary_parent_node(cluster_nodes): return normalized_sources -class QueueMessage: - def __init__( - self, - op: Literal["add", "remove", "merge", "update", "end"], - # `str` for node and edge IDs, `GraphDBNode` and `GraphDBEdge` for actual objects - before_node: list[str] | list[GraphDBNode] | None = None, - before_edge: list[str] | list[GraphDBEdge] | None = None, - after_node: list[str] | list[GraphDBNode] | None = None, - after_edge: list[str] | list[GraphDBEdge] | None = None, - ): - self.op = op - self.before_node = before_node - self.before_edge = before_edge - self.after_node = after_node - self.after_edge = after_edge - - def __str__(self) -> str: - return f"QueueMessage(op={self.op}, before_node={self.before_node if self.before_node is None else len(self.before_node)}, after_node={self.after_node if self.after_node is None else len(self.after_node)})" - - def __lt__(self, other: "QueueMessage") -> bool: - op_priority = {"add": 2, "remove": 2, "merge": 1, "end": 0} - return op_priority[self.op] < op_priority[other.op] - - def extract_first_to_last_brace(text: str): start = text.find("{") end = text.rfind("}") @@ -77,134 +49,91 @@ def extract_first_to_last_brace(text: str): return json_str, json.loads(json_str) +def recursive_clustering( + nodes_list, depth=0, max_cluster_size: int = 20, min_cluster_size: int = 10 +): + """Recursively split clusters until each is <= max_cluster_size.""" + from sklearn.cluster import MiniBatchKMeans + + indent = " " * depth + logger.info(f"{indent}[Recursive] Start clustering {len(nodes_list)} nodes at depth {depth}") + + if len(nodes_list) <= max_cluster_size: + logger.info(f"{indent}[Recursive] Node count <= {max_cluster_size}, stop splitting.") + return [nodes_list] + # Try kmeans with k = ceil(len(nodes) / max_cluster_size) + x_nodes = [n for n in nodes_list if n.metadata.embedding] + x = np.array([n.metadata.embedding for n in x_nodes]) + + if len(x) < min_cluster_size: + logger.info(f"{indent}[Recursive] Too few embeddings ({len(x)}), skipping clustering.") + return [nodes_list] + + k = min(len(x), (len(nodes_list) + max_cluster_size - 1) // max_cluster_size) + k = max(1, k) + + try: + logger.info(f"{indent}[Recursive] Clustering with k={k} on {len(x)} points.") + kmeans = MiniBatchKMeans(n_clusters=k, batch_size=256, random_state=42) + labels = kmeans.fit_predict(x) + + label_groups = defaultdict(list) + for node, label in zip(x_nodes, labels, strict=False): + label_groups[label].append(node) + + # Map: label -> nodes with no embedding (fallback group) + no_embedding_nodes = [n for n in nodes_list if not n.metadata.embedding] + if no_embedding_nodes: + logger.warning( + f"{indent}[Recursive] {len(no_embedding_nodes)} nodes have no embedding. Added to largest cluster." + ) + # Assign to the largest cluster + largest_label = max(label_groups.items(), key=lambda kv: len(kv[1]))[0] + label_groups[largest_label].extend(no_embedding_nodes) + + result = [] + for label, sub_group in label_groups.items(): + logger.info(f"{indent} Cluster-{label}: {len(sub_group)} nodes") + result.extend( + recursive_clustering( + sub_group, + depth=depth + 1, + max_cluster_size=max_cluster_size, + min_cluster_size=min_cluster_size, + ) + ) + return result + + except Exception as e: + logger.warning(f"{indent}[Recursive] Clustering failed: {e}, fallback to one cluster.") + return [nodes_list] + + +def _parse_json_result(response_text): + try: + response_text = response_text.replace("```", "").replace("json", "") + response_json = extract_first_to_last_brace(response_text)[1] + return response_json + except json.JSONDecodeError as e: + logger.warning(f"Failed to parse LLM response as JSON: {e}\nRaw response:\n{response_text}") + return {} + + class GraphStructureReorganizer: def __init__( self, graph_store: Neo4jGraphDB, llm: BaseLLM, embedder: OllamaEmbedder, is_reorganize: bool ): - self.queue = PriorityQueue() # Min-heap self.graph_store = graph_store self.llm = llm self.embedder = embedder self.relation_detector = RelationAndReasoningDetector( self.graph_store, self.llm, self.embedder ) - self.resolver = NodeHandler(graph_store=graph_store, llm=llm, embedder=embedder) - self.is_reorganize = is_reorganize - self._reorganize_needed = True - if self.is_reorganize: - # ____ 1. For queue message driven thread ___________ - self.thread = threading.Thread(target=self._run_message_consumer_loop) - self.thread.start() - # ____ 2. For periodic structure optimization _______ - self._stop_scheduler = False - self._is_optimizing = {"LongTermMemory": False, "UserMemory": False} - self.structure_optimizer_thread = threading.Thread( - target=self._run_structure_organizer_loop - ) - self.structure_optimizer_thread.start() - - def add_message(self, message: QueueMessage): - self.queue.put_nowait(message) - - def wait_until_current_task_done(self): - """ - Wait until: - 1) queue is empty - 2) any running structure optimization is done - """ - deadline = time.time() + 600 - if not self.is_reorganize: - return - - if not self.queue.empty(): - self.queue.join() - logger.debug("Queue is now empty.") - - while any(self._is_optimizing.values()): - logger.debug(f"Waiting for structure optimizer to finish... {self._is_optimizing}") - if time.time() > deadline: - logger.error(f"Wait timed out; flags={self._is_optimizing}") - break - time.sleep(1) - logger.debug("Structure optimizer is now idle.") - - def _run_message_consumer_loop(self): - while True: - message = self.queue.get() - if message.op == "end": - break - - try: - if self._preprocess_message(message): - self.handle_message(message) - except Exception: - logger.error(traceback.format_exc()) - self.queue.task_done() - - @require_python_package( - import_name="schedule", - install_command="pip install schedule", - install_link="https://schedule.readthedocs.io/en/stable/installation.html", - ) - def _run_structure_organizer_loop(self): - """ - Use schedule library to periodically trigger structure optimization. - This runs until the stop flag is set. - """ - import schedule - - schedule.every(100).seconds.do(self.optimize_structure, scope="LongTermMemory") - schedule.every(100).seconds.do(self.optimize_structure, scope="UserMemory") - - logger.info("Structure optimizer schedule started.") - while not getattr(self, "_stop_scheduler", False): - if any(self._is_optimizing.values()): - time.sleep(1) - continue - if self._reorganize_needed: - logger.info("[Reorganizer] Triggering optimize_structure due to new nodes.") - self.optimize_structure(scope="LongTermMemory") - self.optimize_structure(scope="UserMemory") - self._reorganize_needed = False - time.sleep(30) - - def stop(self): - """ - Stop the reorganizer thread. - """ - if not self.is_reorganize: - return - - self.add_message(QueueMessage(op="end")) - self.thread.join() - logger.info("Reorganize thread stopped.") - self._stop_scheduler = True - self.structure_optimizer_thread.join() - logger.info("Structure optimizer stopped.") - - def handle_message(self, message: QueueMessage): - handle_map = {"add": self.handle_add, "remove": self.handle_remove} - handle_map[message.op](message) - logger.debug(f"message queue size: {self.queue.qsize()}") - - def handle_add(self, message: QueueMessage): - logger.debug(f"Handling add operation: {str(message)[:500]}") - added_node = message.after_node[0] - detected_relationships = self.resolver.detect( - added_node, scope=added_node.metadata.memory_type - ) - if detected_relationships: - for added_node, existing_node, relation in detected_relationships: - self.resolver.resolve(added_node, existing_node, relation) - - self._reorganize_needed = True - - def handle_remove(self, message: QueueMessage): - logger.debug(f"Handling remove operation: {str(message)[:50]}") def optimize_structure( self, + user_name: str, scope: str = "LongTermMemory", local_tree_threshold: int = 10, min_cluster_size: int = 4, @@ -229,28 +158,24 @@ def _check_deadline(where: str): return True return False - if self._is_optimizing[scope]: - logger.info(f"[GraphStructureReorganize] Already optimizing for {scope}. Skipping.") - return - - if self.graph_store.node_not_exist(scope): + if self.graph_store.node_not_exist(scope, user_name=user_name): logger.debug(f"[GraphStructureReorganize] No nodes for scope={scope}. Skip.") return - self._is_optimizing[scope] = True try: logger.debug( f"[GraphStructureReorganize] 🔍 Starting structure optimization for scope: {scope}" ) - logger.debug( f"[GraphStructureReorganize] Num of scope in self.graph_store is" - f" {self.graph_store.get_memory_count(scope)}" + f" {self.graph_store.get_memory_count(scope, user_name=user_name)}" ) # Load candidate nodes if _check_deadline("[GraphStructureReorganize] Before loading candidates"): return - raw_nodes = self.graph_store.get_structure_optimization_candidates(scope) + raw_nodes = self.graph_store.get_structure_optimization_candidates( + scope, user_name=user_name + ) nodes = [GraphDBNode(**n) for n in raw_nodes] if not nodes: @@ -282,6 +207,7 @@ def _check_deadline(where: str): scope, local_tree_threshold, min_cluster_size, + user_name, ) ) @@ -299,7 +225,6 @@ def _check_deadline(where: str): logger.info("[GraphStructure Reorganize] Structure optimization finished.") finally: - self._is_optimizing[scope] = False logger.info("[GraphStructureReorganize] Structure optimization finished.") def _process_cluster_and_write( @@ -308,6 +233,7 @@ def _process_cluster_and_write( scope: str, local_tree_threshold: int, min_cluster_size: int, + user_name: str, ): if len(cluster_nodes) <= min_cluster_size: return @@ -320,15 +246,17 @@ def _process_cluster_and_write( if len(sub_nodes) < min_cluster_size: continue # Skip tiny noise sub_parent_node = self._summarize_cluster(sub_nodes, scope) - self._create_parent_node(sub_parent_node) - self._link_cluster_nodes(sub_parent_node, sub_nodes) + self._create_parent_node(sub_parent_node, user_name) + self._link_cluster_nodes(sub_parent_node, sub_nodes, user_name) sub_parents.append(sub_parent_node) if sub_parents and len(sub_parents) >= min_cluster_size: cluster_parent_node = self._summarize_cluster(cluster_nodes, scope) - self._create_parent_node(cluster_parent_node) + self._create_parent_node(cluster_parent_node, user_name) for sub_parent in sub_parents: - self.graph_store.add_edge(cluster_parent_node.id, sub_parent.id, "PARENT") + self.graph_store.add_edge( + cluster_parent_node.id, sub_parent.id, "PARENT", user_name=user_name + ) logger.info("Adding relations/reasons") nodes_to_check = cluster_nodes @@ -352,10 +280,16 @@ def _process_cluster_and_write( # 1) Add pairwise relations for rel in results["relations"]: if not self.graph_store.edge_exists( - rel["source_id"], rel["target_id"], rel["relation_type"] + rel["source_id"], + rel["target_id"], + rel["relation_type"], + user_name=user_name, ): self.graph_store.add_edge( - rel["source_id"], rel["target_id"], rel["relation_type"] + rel["source_id"], + rel["target_id"], + rel["relation_type"], + user_name=user_name, ) # 2) Add inferred nodes and link to sources @@ -364,14 +298,21 @@ def _process_cluster_and_write( inf_node.id, inf_node.memory, inf_node.metadata.model_dump(exclude_none=True), + user_name=user_name, ) for src_id in inf_node.metadata.sources: - self.graph_store.add_edge(src_id, inf_node.id, "INFERS") + self.graph_store.add_edge( + src_id, inf_node.id, "INFERS", user_name=user_name + ) # 3) Add sequence links for seq in results["sequence_links"]: - if not self.graph_store.edge_exists(seq["from_id"], seq["to_id"], "FOLLOWS"): - self.graph_store.add_edge(seq["from_id"], seq["to_id"], "FOLLOWS") + if not self.graph_store.edge_exists( + seq["from_id"], seq["to_id"], "FOLLOWS", user_name=user_name + ): + self.graph_store.add_edge( + seq["from_id"], seq["to_id"], "FOLLOWS", user_name=user_name + ) # 4) Add aggregate concept nodes for agg_node in results["aggregate_nodes"]: @@ -379,9 +320,12 @@ def _process_cluster_and_write( agg_node.id, agg_node.memory, agg_node.metadata.model_dump(exclude_none=True), + user_name=user_name, ) for child_id in agg_node.metadata.sources: - self.graph_store.add_edge(agg_node.id, child_id, "AGGREGATE_TO") + self.graph_store.add_edge( + agg_node.id, child_id, "AGGREGATE_TO", user_name=user_name + ) logger.info("[Reorganizer] Cluster relation/reasoning done.") @@ -407,7 +351,7 @@ def _local_subcluster( messages = [{"role": "user", "content": prompt}] response_text = self.llm.generate(messages) - response_json = self._parse_json_result(response_text) + response_json = _parse_json_result(response_text) assigned_ids = set() result_subclusters = [] @@ -442,7 +386,6 @@ def _partition(self, nodes, min_cluster_size: int = 10, max_cluster_size: int = Returns: List of clusters, each as a list of GraphDBNode """ - from sklearn.cluster import MiniBatchKMeans if len(nodes) <= max_cluster_size: logger.info( @@ -450,63 +393,9 @@ def _partition(self, nodes, min_cluster_size: int = 10, max_cluster_size: int = ) return [nodes] - def recursive_clustering(nodes_list, depth=0): - """Recursively split clusters until each is <= max_cluster_size.""" - indent = " " * depth - logger.info( - f"{indent}[Recursive] Start clustering {len(nodes_list)} nodes at depth {depth}" - ) - - if len(nodes_list) <= max_cluster_size: - logger.info( - f"{indent}[Recursive] Node count <= {max_cluster_size}, stop splitting." - ) - return [nodes_list] - # Try kmeans with k = ceil(len(nodes) / max_cluster_size) - x_nodes = [n for n in nodes_list if n.metadata.embedding] - x = np.array([n.metadata.embedding for n in x_nodes]) - - if len(x) < min_cluster_size: - logger.info( - f"{indent}[Recursive] Too few embeddings ({len(x)}), skipping clustering." - ) - return [nodes_list] - - k = min(len(x), (len(nodes_list) + max_cluster_size - 1) // max_cluster_size) - k = max(1, k) - - try: - logger.info(f"{indent}[Recursive] Clustering with k={k} on {len(x)} points.") - kmeans = MiniBatchKMeans(n_clusters=k, batch_size=256, random_state=42) - labels = kmeans.fit_predict(x) - - label_groups = defaultdict(list) - for node, label in zip(x_nodes, labels, strict=False): - label_groups[label].append(node) - - # Map: label -> nodes with no embedding (fallback group) - no_embedding_nodes = [n for n in nodes_list if not n.metadata.embedding] - if no_embedding_nodes: - logger.warning( - f"{indent}[Recursive] {len(no_embedding_nodes)} nodes have no embedding. Added to largest cluster." - ) - # Assign to largest cluster - largest_label = max(label_groups.items(), key=lambda kv: len(kv[1]))[0] - label_groups[largest_label].extend(no_embedding_nodes) - - result = [] - for label, sub_group in label_groups.items(): - logger.info(f"{indent} Cluster-{label}: {len(sub_group)} nodes") - result.extend(recursive_clustering(sub_group, depth=depth + 1)) - return result - - except Exception as e: - logger.warning( - f"{indent}[Recursive] Clustering failed: {e}, fallback to one cluster." - ) - return [nodes_list] - - raw_clusters = recursive_clustering(nodes) + raw_clusters = recursive_clustering( + nodes, max_cluster_size=max_cluster_size, min_cluster_size=min_cluster_size + ) filtered_clusters = [c for c in raw_clusters if len(c) > min_cluster_size] logger.info(f"[KMeansPartition] Total clusters before filtering: {len(raw_clusters)}") @@ -538,7 +427,7 @@ def _summarize_cluster(self, cluster_nodes: list[GraphDBNode], scope: str) -> Gr messages = [{"role": "user", "content": prompt}] response_text = self.llm.generate(messages) - response_json = self._parse_json_result(response_text) + response_json = _parse_json_result(response_text) # Extract fields parent_key = response_json.get("key", "").strip() @@ -567,18 +456,7 @@ def _summarize_cluster(self, cluster_nodes: list[GraphDBNode], scope: str) -> Gr ) return parent_node - def _parse_json_result(self, response_text): - try: - response_text = response_text.replace("```", "").replace("json", "") - response_json = extract_first_to_last_brace(response_text)[1] - return response_json - except json.JSONDecodeError as e: - logger.warning( - f"Failed to parse LLM response as JSON: {e}\nRaw response:\n{response_text}" - ) - return {} - - def _create_parent_node(self, parent_node: GraphDBNode) -> None: + def _create_parent_node(self, parent_node: GraphDBNode, user_name: str) -> None: """ Create a new parent node for the cluster. """ @@ -586,38 +464,17 @@ def _create_parent_node(self, parent_node: GraphDBNode) -> None: parent_node.id, parent_node.memory, parent_node.metadata.model_dump(exclude_none=True), + user_name=user_name, ) - def _link_cluster_nodes(self, parent_node: GraphDBNode, child_nodes: list[GraphDBNode]): + def _link_cluster_nodes( + self, parent_node: GraphDBNode, child_nodes: list[GraphDBNode], user_name: str + ): """ Add PARENT edges from the parent node to all nodes in the cluster. """ for child in child_nodes: if not self.graph_store.edge_exists( - parent_node.id, child.id, "PARENT", direction="OUTGOING" + parent_node.id, child.id, "PARENT", direction="OUTGOING", user_name=user_name ): - self.graph_store.add_edge(parent_node.id, child.id, "PARENT") - - def _preprocess_message(self, message: QueueMessage) -> bool: - message = self._convert_id_to_node(message) - if message.after_node is None or None in message.after_node: - logger.debug( - f"Found non-existent node in after_node in message: {message}, skip this message." - ) - return False - return True - - def _convert_id_to_node(self, message: QueueMessage) -> QueueMessage: - """ - Convert IDs in the message.after_node to GraphDBNode objects. - """ - for i, node in enumerate(message.after_node or []): - if not isinstance(node, str): - continue - raw_node = self.graph_store.get_node(node, include_embedding=True) - if raw_node is None: - logger.debug(f"Node with ID {node} not found in the graph store.") - message.after_node[i] = None - else: - message.after_node[i] = GraphDBNode(**raw_node) - return message + self.graph_store.add_edge(parent_node.id, child.id, "PARENT", user_name=user_name) diff --git a/tests/memories/textual/test_tree_manager.py b/tests/memories/textual/test_tree_manager.py index 1ad730ee5..e3ec89243 100644 --- a/tests/memories/textual/test_tree_manager.py +++ b/tests/memories/textual/test_tree_manager.py @@ -102,37 +102,6 @@ def test_add_to_graph_memory_creates_new_node(memory_manager, mock_graph_store): assert mock_graph_store.add_node.called -def test_inherit_edges(memory_manager, mock_graph_store): - from_id = "from_id" - to_id = "to_id" - mock_graph_store.get_edges.return_value = [ - {"from": from_id, "to": "node_b", "type": "RELATE"}, - {"from": "node_c", "to": from_id, "type": "RELATE"}, - ] - memory_manager._inherit_edges(from_id, to_id) - assert mock_graph_store.add_edge.call_count > 0 - - -def test_ensure_structure_path_creates_new(memory_manager, mock_graph_store): - mock_graph_store.get_by_metadata.return_value = [] - meta = TreeNodeTextualMemoryMetadata( - key="hobby", - embedding=[0.1] * 5, - user_id="user123", - session_id="sess", - ) - node_id = memory_manager._ensure_structure_path("UserMemory", meta) - assert isinstance(node_id, str) - assert mock_graph_store.add_node.called - - -def test_ensure_structure_path_reuses_existing(memory_manager, mock_graph_store): - mock_graph_store.get_by_metadata.return_value = ["existing_node_id"] - meta = TreeNodeTextualMemoryMetadata(key="hobby") - node_id = memory_manager._ensure_structure_path("UserMemory", meta) - assert node_id == "existing_node_id" - - def test_add_returns_written_node_ids(memory_manager): memory = TextualMemoryItem( memory="test memory", From e0f089e654c6b3f965670a0dee8e8ff9c065405d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Wed, 22 Oct 2025 21:11:05 +0800 Subject: [PATCH 25/32] feat: modify tree_textual_memory example --- examples/core_memories/tree_textual_memory.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/examples/core_memories/tree_textual_memory.py b/examples/core_memories/tree_textual_memory.py index d2e197e5b..0db3af196 100644 --- a/examples/core_memories/tree_textual_memory.py +++ b/examples/core_memories/tree_textual_memory.py @@ -172,7 +172,8 @@ added_ids = my_tree_textual_memory.add(m_list) for i, id in enumerate(added_ids): print(f"{i}'th added result is:" + my_tree_textual_memory.get(id).memory) - my_tree_textual_memory.memory_manager.wait_reorganizer() + # wait the synchronous thread + # TODO: USE SCHEDULE MODULE TO WAIT time.sleep(60) @@ -233,7 +234,8 @@ for m_list in doc_memory: added_ids = my_tree_textual_memory.add(m_list) - my_tree_textual_memory.memory_manager.wait_reorganizer() + # wait the synchronous thread + # TODO: USE SCHEDULE MODULE TO WAIT results = my_tree_textual_memory.search( "Tell me about what memos consist of?", @@ -245,9 +247,10 @@ print(f"{i}'th similar result is: " + str(r["memory"])) print(f"Successfully search {len(results)} memories") -# close the synchronous thread in memory manager -my_tree_textual_memory.memory_manager.close() +# close the synchronous thread +# TODO: USE SCHEDULE MODULE TO CLOSE # my_tree_textual_memory.dump +# Note that you cannot drop this tree when`use_multi_db` == +# false. my_tree_textual_memory.drop() """ my_tree_textual_memory.dump("tmp/my_tree_textual_memory") -my_tree_textual_memory.drop() From 2ac82019a471c6ec329b6eab9af412cc052ce51a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Wed, 22 Oct 2025 21:54:57 +0800 Subject: [PATCH 26/32] feat: modify reorganizer and add passing user_name in relation_reason_detector --- ...textual_memory_relation_reason_detector.py | 5 +++- src/memos/graph_dbs/base.py | 2 +- .../organize/relation_reason_detector.py | 25 +++++++++++++------ .../tree_text_memory/organize/reorganizer.py | 17 ++++++++++--- 4 files changed, 36 insertions(+), 13 deletions(-) diff --git a/examples/basic_modules/tree_textual_memory_relation_reason_detector.py b/examples/basic_modules/tree_textual_memory_relation_reason_detector.py index 72e4deb60..294f8973a 100644 --- a/examples/basic_modules/tree_textual_memory_relation_reason_detector.py +++ b/examples/basic_modules/tree_textual_memory_relation_reason_detector.py @@ -27,6 +27,8 @@ ) embedder = EmbedderFactory.from_config(embedder_config) +user_name = "lucy4" + # === Step 2: Initialize Neo4j GraphStore === graph_config = GraphDBConfigFactory( backend="neo4j", @@ -34,7 +36,7 @@ "uri": "bolt://localhost:7687", "user": "neo4j", "password": "12345678", - "db_name": "lucy4", + "db_name": user_name, "auto_create": True, }, ) @@ -178,6 +180,7 @@ results = relation_detector.process_node( node=node, + user_name=user_name, exclude_ids=[node.id], # Exclude self when searching for neighbors top_k=5, ) diff --git a/src/memos/graph_dbs/base.py b/src/memos/graph_dbs/base.py index b26db5afa..ba1611cbf 100644 --- a/src/memos/graph_dbs/base.py +++ b/src/memos/graph_dbs/base.py @@ -70,7 +70,7 @@ def edge_exists(self, source_id: str, target_id: str, type: str) -> bool: # Graph Query & Reasoning @abstractmethod - def get_node(self, id: str, include_embedding: bool = False) -> dict[str, Any] | None: + def get_node(self, id: str, include_embedding: bool = False, **kwargs) -> dict[str, Any] | None: """ Retrieve the metadata and content of a node. Args: diff --git a/src/memos/memories/textual/tree_text_memory/organize/relation_reason_detector.py b/src/memos/memories/textual/tree_text_memory/organize/relation_reason_detector.py index ad9dcb2b8..71d88b955 100644 --- a/src/memos/memories/textual/tree_text_memory/organize/relation_reason_detector.py +++ b/src/memos/memories/textual/tree_text_memory/organize/relation_reason_detector.py @@ -1,9 +1,9 @@ import json import traceback -from memos.embedders.factory import OllamaEmbedder +from memos.embedders.base import BaseEmbedder +from memos.graph_dbs.base import BaseGraphDB from memos.graph_dbs.item import GraphDBNode -from memos.graph_dbs.neo4j import Neo4jGraphDB from memos.llms.base import BaseLLM from memos.log import get_logger from memos.memories.textual.item import TreeNodeTextualMemoryMetadata @@ -18,12 +18,14 @@ class RelationAndReasoningDetector: - def __init__(self, graph_store: Neo4jGraphDB, llm: BaseLLM, embedder: OllamaEmbedder): + def __init__(self, graph_store: BaseGraphDB, llm: BaseLLM, embedder: BaseEmbedder): self.graph_store = graph_store self.llm = llm self.embedder = embedder - def process_node(self, node: GraphDBNode, exclude_ids: list[str], top_k: int = 5): + def process_node( + self, node: GraphDBNode, user_name: str, exclude_ids: list[str], top_k: int = 5 + ): """ Unified pipeline for: 1) Pairwise relations (cause, condition, conflict, relate) @@ -52,6 +54,7 @@ def process_node(self, node: GraphDBNode, exclude_ids: list[str], top_k: int = 5 exclude_ids=exclude_ids, top_k=top_k, min_overlap=2, + user_name=user_name, ) nearest = [GraphDBNode(**cand_data) for cand_data in nearest] @@ -62,7 +65,7 @@ def process_node(self, node: GraphDBNode, exclude_ids: list[str], top_k: int = 5 """ # 2) Inferred nodes (from causal/condition) - inferred = self._infer_fact_nodes_from_relations(pairwise) + inferred = self._infer_fact_nodes_from_relations(pairwise, user_name=user_name) results["inferred_nodes"].extend(inferred) """ @@ -115,12 +118,18 @@ def _detect_pairwise_causal_condition_relations( return results - def _infer_fact_nodes_from_relations(self, pairwise_results: dict): + def _infer_fact_nodes_from_relations(self, pairwise_results: dict, user_name: str): inferred_nodes = [] for rel in pairwise_results["relations"]: if rel["relation_type"] in ("CAUSE", "CONDITION"): - src = self.graph_store.get_node(rel["source_id"]) - tgt = self.graph_store.get_node(rel["target_id"]) + src = self.graph_store.get_node( + rel["source_id"], + user_name=user_name, + ) + tgt = self.graph_store.get_node( + rel["target_id"], + user_name=user_name, + ) if not src or not tgt: continue diff --git a/src/memos/memories/textual/tree_text_memory/organize/reorganizer.py b/src/memos/memories/textual/tree_text_memory/organize/reorganizer.py index 1a2568a3c..944daf315 100644 --- a/src/memos/memories/textual/tree_text_memory/organize/reorganizer.py +++ b/src/memos/memories/textual/tree_text_memory/organize/reorganizer.py @@ -174,7 +174,7 @@ def _check_deadline(where: str): if _check_deadline("[GraphStructureReorganize] Before loading candidates"): return raw_nodes = self.graph_store.get_structure_optimization_candidates( - scope, user_name=user_name + scope, user_name=user_name, include_embedding=True ) nodes = [GraphDBNode(**n) for n in raw_nodes] @@ -208,6 +208,7 @@ def _check_deadline(where: str): local_tree_threshold, min_cluster_size, user_name, + _check_deadline, ) ) @@ -234,6 +235,7 @@ def _process_cluster_and_write( local_tree_threshold: int, min_cluster_size: int, user_name: str, + check_deadline_func, ): if len(cluster_nodes) <= min_cluster_size: return @@ -271,11 +273,20 @@ def _process_cluster_and_write( node, exclude_ids, 10, # top_k + user_name=user_name, ) ) - for f in as_completed(futures, timeout=300): - results = f.result() + for f in as_completed(futures): + if check_deadline_func("[GraphStructureReorganize] Relations/reasons"): + for x in futures: + x.cancel() + return + try: + results = f.result() + except Exception as e: + logger.warning(f"Relation task failed: {e}", exc_info=True) + continue # 1) Add pairwise relations for rel in results["relations"]: From 11d4f00342f0644c13f9a648c2432799bfa2a0be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Tue, 28 Oct 2025 15:07:18 +0800 Subject: [PATCH 27/32] feat: delete reorganize task switch button in core --- src/memos/mem_os/core.py | 19 ------------------- src/memos/mem_os/product.py | 3 ++- 2 files changed, 2 insertions(+), 20 deletions(-) diff --git a/src/memos/mem_os/core.py b/src/memos/mem_os/core.py index 11095accd..bb9523d7f 100644 --- a/src/memos/mem_os/core.py +++ b/src/memos/mem_os/core.py @@ -165,25 +165,6 @@ def mem_scheduler_off(self) -> bool: logger.error(f"Failed to stop scheduler: {e!s}") return False - def mem_reorganizer_on(self) -> bool: - pass - - def mem_reorganizer_off(self) -> bool: - """temporally implement""" - for mem_cube in self.mem_cubes.values(): - logger.info(f"try to close reorganizer for {mem_cube.text_mem.config.cube_id}") - if mem_cube.text_mem and mem_cube.text_mem.is_reorganize: - logger.info(f"close reorganizer for {mem_cube.text_mem.config.cube_id}") - mem_cube.text_mem.memory_manager.close() - mem_cube.text_mem.memory_manager.wait_reorganizer() - - def mem_reorganizer_wait(self) -> bool: - for mem_cube in self.mem_cubes.values(): - logger.info(f"try to close reorganizer for {mem_cube.text_mem.config.cube_id}") - if mem_cube.text_mem and mem_cube.text_mem.is_reorganize: - logger.info(f"close reorganizer for {mem_cube.text_mem.config.cube_id}") - mem_cube.text_mem.memory_manager.wait_reorganizer() - def _register_chat_history( self, user_id: str | None = None, session_id: str | None = None ) -> None: diff --git a/src/memos/mem_os/product.py b/src/memos/mem_os/product.py index 7e0ed9aef..9350fbfec 100644 --- a/src/memos/mem_os/product.py +++ b/src/memos/mem_os/product.py @@ -3,6 +3,7 @@ import os import random import time +import traceback from collections.abc import Generator from datetime import datetime @@ -215,7 +216,7 @@ def _restore_user_instances( logger.error(f"Failed to restore user configuration for {user_id}: {e}") except Exception as e: - logger.error(f"Error during user instance restoration: {e}") + logger.error(f"Error during user instance restoration: {e}: {traceback.print_exc()}") def _initialize_cube_from_default_config( self, cube_id: str, user_id: str, default_config: GeneralMemCubeConfig From 87bc80e0a30d9ac4419030dd6854edb11b6de3ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Tue, 28 Oct 2025 15:10:11 +0800 Subject: [PATCH 28/32] feat: fix get candidate nodes; add get neighbors; [TODO]: neo4j and polardb --- src/memos/graph_dbs/nebular.py | 129 +++++++++++++++++++++++++++------ 1 file changed, 108 insertions(+), 21 deletions(-) diff --git a/src/memos/graph_dbs/nebular.py b/src/memos/graph_dbs/nebular.py index 12b493e58..dd810c9bc 100644 --- a/src/memos/graph_dbs/nebular.py +++ b/src/memos/graph_dbs/nebular.py @@ -1174,7 +1174,7 @@ def get_grouped_counts( group_by_fields.append(alias) # Full GQL query construction gql = f""" - MATCH (n /*+ INDEX(idx_memory_user_name) */) + MATCH (n@Memory /*+ INDEX(idx_memory_user_name) */) {where_clause} RETURN {", ".join(return_fields)}, COUNT(n) AS count """ @@ -1381,31 +1381,55 @@ def get_structure_optimization_candidates( where_clause += f' AND n.user_name = "{user_name}"' return_fields = self._build_return_fields(include_embedding) - return_fields += f", n.{self.dim_field} AS {self.dim_field}" - query = f""" + gql = f""" MATCH (n@Memory /*+ INDEX(idx_memory_user_name) */) WHERE {where_clause} - OPTIONAL MATCH (n)-[@PARENT]->(c@Memory) - OPTIONAL MATCH (p@Memory)-[@PARENT]->(n) - WHERE c IS NULL AND p IS NULL - RETURN {return_fields} + OPTIONAL MATCH (n)-[@PARENT]->(c@Memory {{user_name: "{user_name}"}}) + OPTIONAL MATCH (p@Memory {{user_name: "{user_name}"}})-[@PARENT]->(n) + RETURN {return_fields}, + c.id AS child_id, + p.id AS parent_id """ - candidates = [] - node_ids = set() + per_node_seen_has_child_or_parent: dict[str, bool] = {} + per_node_payload: dict[str, dict] = {} + try: - results = self.execute_query(query) - for row in results: - props = {k: v.value for k, v in row.items()} - node = self._parse_node(props) - node_id = node["id"] - if node_id not in node_ids: - candidates.append(node) - node_ids.add(node_id) + results = self.execute_query(gql) except Exception as e: - logger.error(f"Failed : {e}, traceback: {traceback.format_exc()}") - return candidates + logger.error( + f"[get_structure_optimization_candidates] Query failed: {e}, " + f"traceback: {traceback.format_exc()}" + ) + return [] + + for row in results: + props = {k: v.value for k, v in row.items() if k not in ("child_id", "parent_id")} + node = self._parse_node(props) + nid = node["id"] + + if nid not in per_node_payload: + per_node_payload[nid] = node + per_node_seen_has_child_or_parent[nid] = False + + child_val = row.get("child_id") + parent_val = row.get("parent_id") + + child_unwrapped = self._parse_value(child_val) if (child_val is not None) else None + parent_unwrapped = self._parse_value(parent_val) if (parent_val is not None) else None + + if child_unwrapped: + per_node_seen_has_child_or_parent[nid] = True + if parent_unwrapped: + per_node_seen_has_child_or_parent[nid] = True + + isolated_nodes: list[dict] = [] + for nid, node_obj in per_node_payload.items(): + if not per_node_seen_has_child_or_parent[nid]: + isolated_nodes.append(node_obj) + + return isolated_nodes @timed def drop_database(self) -> None: @@ -1450,7 +1474,7 @@ def get_context_chain(self, id: str, type: str = "FOLLOWS") -> list[str]: @timed def get_neighbors( - self, id: str, type: str, direction: Literal["in", "out", "both"] = "out" + self, id: str, type: str, direction: Literal["in", "out", "both"] = "both" ) -> list[str]: """ Get connected node IDs in a specific direction and relationship type. @@ -1461,7 +1485,70 @@ def get_neighbors( Returns: List of neighboring node IDs. """ - raise NotImplementedError + if direction not in ("in", "out", "both"): + raise ValueError(f"Unsupported direction: {direction}") + + user_name = self.config.user_name + id_val = self._format_value(id) # e.g. '"5225-uuid..."' + user_val = self._format_value(user_name) # e.g. '"lme_user_1"' + edge_type = type # assume caller passes valid edge tag + + def _run_out_query() -> list[str]: + # out: (this)-[edge_type]->(dst) + gql = f""" + MATCH (src@Memory {{id: {id_val}, user_name: {user_val}}}) + -[r@{edge_type}]-> + (dst@Memory {{user_name: {user_val}}}) + RETURN DISTINCT dst.id AS neighbor + """.strip() + try: + result = self.execute_query(gql) + except Exception as e: + logger.error(f"[get_neighbors][out] Query failed: {e}, gql={gql}") + return [] + + out_ids = [] + try: + for row in result: + out_ids.append(row["neighbor"].value) + except Exception as e: + logger.error(f"[get_neighbors][out] Parse failed: {e}") + return out_ids + + def _run_in_query() -> list[str]: + # in: (src)-[edge_type]->(this) + gql = f""" + MATCH (src@Memory {{user_name: {user_val}}}) + -[r@{edge_type}]-> + (dst@Memory {{id: {id_val}, user_name: {user_val}}}) + RETURN DISTINCT src.id AS neighbor + """.strip() + try: + result = self.execute_query(gql) + except Exception as e: + logger.error(f"[get_neighbors][in] Query failed: {e}, gql={gql}") + return [] + + in_ids = [] + try: + for row in result: + in_ids.append(row["neighbor"].value) + except Exception as e: + logger.error(f"[get_neighbors][in] Parse failed: {e}") + return in_ids + + if direction == "out": + return list(set(_run_out_query())) + elif direction == "in": + return list(set(_run_in_query())) + else: # direction == "both" + out_ids = _run_out_query() + in_ids = _run_in_query() + merged = set(out_ids) + merged.update(in_ids) + if id in merged: + merged.remove(id) + return list(merged) @timed def get_path(self, source_id: str, target_id: str, max_depth: int = 3) -> list[str]: From afa5bcf490b8694e15a20dcfdb71d6cc9740a0ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Tue, 28 Oct 2025 15:29:17 +0800 Subject: [PATCH 29/32] feat: update reorganize --- .../organize/relation_reason_detector.py | 6 +- .../tree_text_memory/organize/reorganizer.py | 120 ++++++++-- .../templates/tree_reorganize_prompts.py | 224 ++++++++++++++---- 3 files changed, 281 insertions(+), 69 deletions(-) diff --git a/src/memos/memories/textual/tree_text_memory/organize/relation_reason_detector.py b/src/memos/memories/textual/tree_text_memory/organize/relation_reason_detector.py index 71d88b955..2d8b72ecc 100644 --- a/src/memos/memories/textual/tree_text_memory/organize/relation_reason_detector.py +++ b/src/memos/memories/textual/tree_text_memory/organize/relation_reason_detector.py @@ -24,7 +24,11 @@ def __init__(self, graph_store: BaseGraphDB, llm: BaseLLM, embedder: BaseEmbedde self.embedder = embedder def process_node( - self, node: GraphDBNode, user_name: str, exclude_ids: list[str], top_k: int = 5 + self, + node: GraphDBNode, + exclude_ids: list[str], + top_k: int = 5, + user_name: str | None = None, ): """ Unified pipeline for: diff --git a/src/memos/memories/textual/tree_text_memory/organize/reorganizer.py b/src/memos/memories/textual/tree_text_memory/organize/reorganizer.py index 944daf315..596f64f70 100644 --- a/src/memos/memories/textual/tree_text_memory/organize/reorganizer.py +++ b/src/memos/memories/textual/tree_text_memory/organize/reorganizer.py @@ -1,5 +1,4 @@ import json -import time import traceback from collections import defaultdict @@ -133,8 +132,8 @@ def __init__( def optimize_structure( self, - user_name: str, scope: str = "LongTermMemory", + user_name: str | None = None, local_tree_threshold: int = 10, min_cluster_size: int = 4, min_group_size: int = 20, @@ -147,6 +146,8 @@ def optimize_structure( 3. Create parent nodes and build local PARENT trees. """ # --- Total time watch dog: check functions --- + import time + start_ts = time.time() def _check_deadline(where: str): @@ -176,7 +177,25 @@ def _check_deadline(where: str): raw_nodes = self.graph_store.get_structure_optimization_candidates( scope, user_name=user_name, include_embedding=True ) - nodes = [GraphDBNode(**n) for n in raw_nodes] + logger.debug( + f"[GraphStructureReorganize] Find {len(raw_nodes)} nodes to optimize" + f"which is {[node['id'] for node in raw_nodes]}" + ) + + def _norm(s): + return s.strip().lower() if isinstance(s, str) else s + + filtered_raw = [] + for n in raw_nodes: + tags = (n.get("metadata") or {}).get("tags") or [] + if not any(_norm(t) == "mode:fast" for t in tags if isinstance(t, str)): + filtered_raw.append(n) + dropped = len(raw_nodes) - len(filtered_raw) + if dropped: + logger.info( + f"[GraphStructureReorganize] Tag filter dropped {dropped} nodes (mode:fast)." + ) + nodes = [GraphDBNode(**n) for n in filtered_raw] if not nodes: logger.info("[GraphStructureReorganize] No nodes to optimize. Skipping.") @@ -191,10 +210,9 @@ def _check_deadline(where: str): if _check_deadline("[GraphStructureReorganize] Before partition"): return partitioned_groups = self._partition(nodes) - logger.info( + logger.debug( f"[GraphStructureReorganize] Partitioned into {len(partitioned_groups)} clusters." ) - if _check_deadline("[GraphStructureReorganize] Before submit partition task"): return with ContextThreadPoolExecutor(max_workers=4) as executor: @@ -244,16 +262,41 @@ def _process_cluster_and_write( sub_clusters = self._local_subcluster(cluster_nodes) sub_parents = [] - for sub_nodes in sub_clusters: - if len(sub_nodes) < min_cluster_size: - continue # Skip tiny noise - sub_parent_node = self._summarize_cluster(sub_nodes, scope) - self._create_parent_node(sub_parent_node, user_name) - self._link_cluster_nodes(sub_parent_node, sub_nodes, user_name) - sub_parents.append(sub_parent_node) - + def _process_one_subcluster(sub_nodes): + try: + sub_parent_node = self._summarize_cluster(sub_nodes, scope) + self._create_parent_node(sub_parent_node, user_name) + self._link_cluster_nodes(sub_parent_node, sub_nodes, user_name) + sub_nodes_str = "\n|_____".join([sub_node.memory for sub_node in sub_nodes]) + logger.debug( + f"Processed a group by nodes. \nThe Structure is: " + f"\n Parent Node: {sub_parent_node.memory}\n" + f"\n Child Node: {sub_nodes_str}" + ) + return sub_parent_node + except Exception as e: + logger.warning(f"Process sub-cluster failed: {e}", exc_info=True) + return None + + valid_sub_clusters = [sc for sc in sub_clusters if len(sc) >= min_cluster_size] + + max_workers = min(4, len(valid_sub_clusters)) + if max_workers > 0: + with ContextThreadPoolExecutor(max_workers=max_workers) as executor: + futures = [ + executor.submit(_process_one_subcluster, sc) for sc in valid_sub_clusters + ] + for fut in as_completed(futures): + res = fut.result() + if res is not None: + sub_parents.append(res) + + logger.debug(f"len of sub-parents: {len(sub_parents)}") if sub_parents and len(sub_parents) >= min_cluster_size: cluster_parent_node = self._summarize_cluster(cluster_nodes, scope) + logger.debug( + f"Find cluster_parent node: {cluster_parent_node.id}: {cluster_parent_node.memory}" + ) self._create_parent_node(cluster_parent_node, user_name) for sub_parent in sub_parents: self.graph_store.add_edge( @@ -363,6 +406,7 @@ def _local_subcluster( messages = [{"role": "user", "content": prompt}] response_text = self.llm.generate(messages) response_json = _parse_json_result(response_text) + logger.debug(f"In Sub-Cluster: \ninput: {prompt}\n output: {response_json}") assigned_ids = set() result_subclusters = [] @@ -411,12 +455,26 @@ def _partition(self, nodes, min_cluster_size: int = 10, max_cluster_size: int = logger.info(f"[KMeansPartition] Total clusters before filtering: {len(raw_clusters)}") for i, cluster in enumerate(raw_clusters): - logger.info(f"[KMeansPartition] Cluster-{i}: {len(cluster)} nodes") - - logger.info( + logger.debug(f"[KMeansPartition] Cluster-{i}: {len(cluster)} nodes") + logger.debug(f"[KMeansPartition] Total clusters before filtering: {len(raw_clusters)}") + logger.debug( f"[KMeansPartition] Clusters after filtering (>{min_cluster_size}): {len(filtered_clusters)}" ) + seen_ids = set() + duplicate_ids = set() + + for i, cluster in enumerate(raw_clusters): + ids = [n.id for n in cluster] + mems = [n.memory[:80].replace("\n", " ") + "..." for n in cluster] + logger.debug(f"[Cluster-{i}] size={len(cluster)}") + for nid, mem in zip(ids, mems, strict=False): + logger.debug(f" - id={nid} | mem={mem}") + if nid in seen_ids: + duplicate_ids.add(nid) + else: + seen_ids.add(nid) + return filtered_clusters def _summarize_cluster(self, cluster_nodes: list[GraphDBNode], scope: str) -> GraphDBNode: @@ -426,12 +484,30 @@ def _summarize_cluster(self, cluster_nodes: list[GraphDBNode], scope: str) -> Gr if not cluster_nodes: raise ValueError("Cluster nodes cannot be empty.") - memories_items_text = "\n\n".join( - [ - f"{i}. key: {n.metadata.key}\nvalue: {n.memory}\nsummary:{n.metadata.background}" - for i, n in enumerate(cluster_nodes) - ] - ) + memories_items_text = "" + for i, n in enumerate(cluster_nodes): + # Build raw dialogue excerpt + # We won't hard-cut mid-sentence. We'll collect turns until ~300 chars, then stop before breaking. + excerpt_parts = [] + current_len = 0 + for source_j in n.metadata.sources: + turn_text = f'{source_j.role}: "{source_j.content_safe}"' + # if adding this turn blows us past ~300, break BEFORE adding + if current_len + len(turn_text) > 1500: + break + excerpt_parts.append(turn_text) + current_len += len(turn_text) + excerpt_parts.append("...") + raw_dialogue_excerpt = "\n".join(excerpt_parts) + + mem_i = ( + f"\nChild Memory {i}:\n" + f"- canonical_value: {n.memory}\n" + f"- user_summary: {n.metadata.background}\n" + f"- raw_dialogue_excerpt:\n{raw_dialogue_excerpt if raw_dialogue_excerpt else '(none)'}\n" + ) + + memories_items_text += mem_i # Build prompt prompt = REORGANIZE_PROMPT.replace("{memory_items_text}", memories_items_text) diff --git a/src/memos/templates/tree_reorganize_prompts.py b/src/memos/templates/tree_reorganize_prompts.py index 086f59a1e..88730b2b5 100644 --- a/src/memos/templates/tree_reorganize_prompts.py +++ b/src/memos/templates/tree_reorganize_prompts.py @@ -1,40 +1,111 @@ -REORGANIZE_PROMPT = """You are a memory clustering and summarization expert. +REORGANIZE_PROMPT = """YYou are a memory consolidation and summarization expert. -Given the following child memory items: +You will receive a set of child memories that have already been clustered together. These child memories all belong to the same ongoing life thread for the user — the same situation, goal, or period of focus. -{memory_items_text} +Your job is to generate one parent memory node for this life thread. -Please perform: -1. Identify information that reflects user's experiences, beliefs, concerns, decisions, plans, or reactions — including meaningful input from assistant that user acknowledged or responded to. -2. Resolve all time, person, and event references clearly: - - Convert relative time expressions (e.g., “yesterday,” “next Friday”) into absolute dates using the message timestamp if possible. - - Clearly distinguish between event time and message time. - - If uncertainty exists, state it explicitly (e.g., “around June 2025,” “exact date unclear”). - - Include specific locations if mentioned. - - Resolve all pronouns, aliases, and ambiguous references into full names or identities. - - Disambiguate people with the same name if applicable. -3. Always write from a third-person perspective, referring to user as -"The user" or by name if name mentioned, rather than using first-person ("I", "me", "my"). -For example, write "The user felt exhausted..." instead of "I felt exhausted...". -4. Do not omit any information that user is likely to remember. - - Include all key experiences, thoughts, emotional responses, and plans — even if they seem minor. - - Prioritize completeness and fidelity over conciseness. - - Do not generalize or skip details that could be personally meaningful to user. -5. Summarize all child memory items into one memory item. +This parent node will sit above all the child memories. It should read like a concise outline of what this whole thread is about: what the user was working on, why it mattered, and roughly when it was happening. -Language rules: -- The `key`, `value`, `tags`, `summary` fields must match the mostly used language of the input memory items. **如果输入是中文,请输出中文** -- Keep `memory_type` in English. +Input format: +Each child memory will appear in the following structure: -Return valid JSON: +Child Memory X: +- canonical_value: A factual description of what the user asked, did, planned, or cared about (time, entity, need). +- user_summary: A higher-level narrative summary, which may contain interpretation. +- raw_dialogue_excerpt: Short excerpts from the real conversation between the user and the assistant. This is the evidence of what the user actually said, committed to, or felt. + +Evidence priority (this is critical): +1. Treat raw_dialogue_excerpt as the highest-fidelity source of the user's actual intent, feelings, concerns, plans, or commitments. +2. Use canonical_value to bring in clear factual context: dates, places, roles, objects of interest. +3. Use user_summary only to help you recognize that these moments are part of the same thread. Do NOT import personality claims, value judgments, or motivations from user_summary unless they are also supported by raw_dialogue_excerpt or canonical_value. + +Do NOT invent new intentions, emotions, commitments, or timelines that are not supported by the provided evidence. + +Your output must follow these rules: + +1. Capture the throughline, not every step: + - What was the sustained situation, goal, or focus across these memories? + - Over what approximate time period did this happen? Use clear absolute timing if available (e.g. "early March 2025"). If timing is unclear, say "timeframe unclear." + - Which key places, roles, people, or assets keep showing up in this thread? (e.g. a Berlin conference, the user's manager Elena, the user's injured knee, house hunting in Oakland) + - What recurring motivation or concern did the user express? (e.g. wanting to perform well without sounding too salesy; wanting to protect their knee without losing training progress) + +2. Stay high-level, not chronological: + - Do NOT dump every detail from each child memory. + - Do NOT list every piece of advice the assistant gave. + - Do NOT regurgitate every number or spec. + - Instead, in 2–5 sentences, describe what this thread is about, why it mattered to the user, and the general timing/context. + +3. Be strictly factual: + - Only include statements supported by raw_dialogue_excerpt or clearly stated in canonical_value. + - If the user is “planning to,” “trying to,” or “considering,” say exactly that. Do not upgrade it to “the user has done.” + - If timing is fuzzy, acknowledge that (“timeframe unclear”). + +4. Tone and perspective: + - Write in third-person. Refer to the user as “The user” (or by their explicit name if provided). Never use first-person (“I,” “my”). + - Use a neutral, descriptive tone. This is not marketing copy and not an emotional diary. + - The output language must match the dominant language of the child memories. If the child memories are mostly English, write in English. 如果输入主要是中文,就用中文。 + - Do not use bullet points. + +Output format (must be strictly valid JSON): { - "key": , - "memory_type": , - "value": , - "tags": , - "summary": + "key": , + "memory_type": "LongTermMemory", + "value": , + "tags": +} + +Definitions: +- `key`: This is the title of the life thread. It should sound like something the user would remember later (e.g. "Preparing for the Berlin security talk (March 2025)") rather than something like "Q1 External Stakeholder Communications Enablement." +- `value`: This is the concise narrative of what was going on, why it mattered, and when. +- `tags`: Retrieval hooks for later. + +======================== +EXAMPLE +======================== + +Example input sub-cluster (3 items): +Child Memory 0: +- canonical_value: On March 2, 2025, the user said they were nervous about giving a talk in Berlin next week and asked for help cleaning up their presentation slides. +- user_summary: The user was preparing to speak at a conference in Berlin and wanted the presentation to feel confident and professional. +- raw_dialogue_excerpt: +user: "I'm giving a talk in Berlin next week and I'm honestly nervous." +user: "Can you help me clean up my slides so I don't sound like I'm just selling?" +assistant: "You mentioned your manager Elena wants you to highlight the product's security roadmap." + +Child Memory 1: +- canonical_value: The user said their manager Elena wanted them to highlight the product's security roadmap in that Berlin talk, and the user was worried about sounding too 'salesy.' +- user_summary: The user wanted to come across as credible, not like pure marketing. +- raw_dialogue_excerpt: +user: "Elena wants me to talk about the security roadmap, but I don't want to sound like a salesperson." + +Child Memory 2: +- canonical_value: The user asked what clothes would look professional but still comfortable under stage lighting at the Berlin conference. +- user_summary: The user was trying to present well on stage. +- raw_dialogue_excerpt: +user: "What should I wear on stage so I look professional but I'm not dying under the lights?" + +Correct output JSON: + +{ + "key": "Preparing for the Berlin security talk (March 2025)", + "memory_type": "LongTermMemory", + "value": "In early March 2025, The user was preparing to present at a conference in Berlin and felt anxious about performing well. The user asked for help refining their slides and mentioned that their manager Elena wanted the presentation to emphasize the product's security roadmap, but the user did not want the talk to sound overly salesy. The user also asked about what to wear on stage so they would look professional while staying comfortable under the conference lighting.", + "tags": ["Berlin talk prep", "manager Elena", "security roadmap", "presentation anxiety", "stage presence", "March 2025"] } +Why this is correct: +- It captures the ongoing thread (preparing for the Berlin conference talk). +- It states the approximate timeframe ("early March 2025"). +- It mentions the key person (manager Elena) and the main concern (sound credible, not salesy). +- It includes the performance/appearance angle (slides, clothing under lights). +- It keeps third-person (“The user”) and doesn’t invent anything that wasn’t in the evidence. +- It is an outline-style summary, not a blow-by-blow timeline. + +======================== + +Sub-cluster input: +{memory_items_text} + """ DOC_REORGANIZE_PROMPT = """You are a document summarization and knowledge extraction expert. @@ -74,36 +145,97 @@ """ - LOCAL_SUBCLUSTER_PROMPT = """You are a memory organization expert. -You are given a cluster of memory items, each with an ID and content. -Your task is to divide these into smaller, semantically meaningful sub-clusters. +You will receive a batch of memory items from the same user. Each item has an ID and some content. + +Your task is to group these memory items into sub-clusters. Each sub-cluster should represent one coherent "life thread" the user was actively dealing with during a specific period, in a specific context, for a specific goal. + +Definition of a sub-cluster / life thread: +- A sub-cluster is a set of memories that clearly belong to the same ongoing situation, project, or goal in the user's life. +- The stronger these signals are, the more likely the items belong together: + - They happen in the same general time window (same day / same few days / same period). + - They occur in the same context (e.g. preparing for a conference trip, rehabbing an injury, onboarding into a new manager role). + - They repeatedly mention the same people or entities (e.g. the user's manager Elena, the user's dog Milo, a real estate agent). + - They reflect the same motivation or aim (e.g. “get ready to present at a conference,” “protect my knee while staying in shape,” “figure out how to lead a new team,” “understand home-buying budget”). + +Hard constraints: +- Do NOT merge memories that clearly come from different life threads, even if they share similar words or emotions. + - Do NOT merge “preparing to present in Berlin at a security conference” with “doing physical therapy after a knee injury.” They are different goals. + - Do NOT merge “learning to manage a new team at work” with “researching mortgage / down payment for a house in Oakland.” These are separate parts of life. +- Each sub-cluster must contain 2–10 items. +- If an item cannot be placed into any multi-item sub-cluster without breaking the rules above, treat it as a singleton. +- A singleton means: this item currently stands alone in its own thread. Do NOT force unrelated items together just to avoid a singleton. +- Each item ID must appear exactly once: either in one sub-cluster or in `singletons`. No duplicates. + +Output requirements: +- You must return strictly valid JSON. +- For each sub-cluster, `key` must be a short, natural title that sounds like how a human would label that period of their life — not corporate jargon. + - Good: "Getting ready to present in Berlin (March 2025)" + - Bad: "Q2 International Presentation Enablement Workstream" +- The language of each `key` should match the dominant language of that sub-cluster. If the sub-cluster is mostly in Chinese, use Chinese. If it's English, use English. + +Return format (must be followed exactly): +{ + "clusters": [ + { + "ids": ["", "", ...], + "key": "" + }, + ... + ], + "singletons": [ + { + "id": "", + "reason": "" + }, + ... + ] +} -Instructions: -- Identify natural topics by analyzing common time, place, people, and event elements. -- Each sub-cluster must reflect a coherent theme that helps retrieval. -- Each sub-cluster should have 2–10 items. Discard singletons. -- Each item ID must appear in exactly one sub-cluster or be discarded. No duplicates are allowed. -- All IDs in the output must be from the provided Memory items. -- Return strictly valid JSON only. +======================== +EXAMPLE +======================== -Example: If you have items about a project across multiple phases, group them by milestone, team, or event. +Example input memory items (illustrative): -Language rules: -- The `key` fields must match the mostly used language of the clustered memories. **如果输入是中文,请输出中文** +- ID: A1 | Value: On March 2, 2025, the user said they were nervous about giving a talk in Berlin next week and asked for help cleaning up their presentation slides. +- ID: A2 | Value: The user said their manager Elena wanted them to highlight the product's security roadmap in that Berlin talk, and the user was worried about sounding too "salesy." +- ID: A3 | Value: The user asked what clothes would look professional but still comfortable under stage lighting at the Berlin conference. +- ID: B1 | Value: The user said they injured their left knee while running stairs on February 28, 2025, and that a doctor told them to avoid high-impact exercise for at least two weeks. +- ID: B2 | Value: The user asked for low-impact leg strengthening exercises that wouldn't aggravate the injured knee and said they were worried about losing training progress. +- ID: C1 | Value: The user said they started casually browsing houses in Oakland and wanted to understand how much down payment they'd need for a $900k place. + +Correct output JSON for this example: -Return valid JSON: { "clusters": [ { - "ids": ["", "", ...], - "key": "" + "ids": ["A1", "A2", "A3"], + "key": "Getting ready to present in Berlin (March 2025)" }, - ... + { + "ids": ["B1", "B2"], + "key": "Recovering from the knee injury" + } + ], + "singletons": [ + { + "id": "C1", + "reason": "House hunting / down payment research currently has no other related items" + } ] } +Explanation: +- A1/A2/A3 all describe the same thread: preparing to give a talk in Berlin. Same event, same time range, same anxiety about performance and tone. +- B1/B2 are about rehabbing a knee injury and staying in shape without making it worse. +- C1 is about browsing houses / down payment planning in Oakland. That is unrelated to conference prep or injury recovery, so it is a singleton. +- We did NOT force C1 into any cluster. +- We did NOT merge the Berlin prep with the knee rehab just because both involve “worry,” since they are different motivations and contexts. + +======================== + Memory items: {joined_scene} """ From b09552f6d538c370279b56d13bbecce126dacd22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Tue, 28 Oct 2025 15:30:23 +0800 Subject: [PATCH 30/32] fix: mem-reader --- src/memos/mem_reader/simple_struct.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/memos/mem_reader/simple_struct.py b/src/memos/mem_reader/simple_struct.py index 9f5eb9832..5c922f3d0 100644 --- a/src/memos/mem_reader/simple_struct.py +++ b/src/memos/mem_reader/simple_struct.py @@ -51,7 +51,8 @@ _ENC = tiktoken.get_encoding("cl100k_base") def _count_tokens_text(s: str) -> int: - return len(_ENC.encode(s or "")) + # allow special tokens like <|endoftext|> instead of raising ValueError + return len(_ENC.encode(s or "", disallowed_special=())) except Exception: # Heuristic fallback: zh chars ~1 token, others ~1 token per ~4 chars def _count_tokens_text(s: str) -> int: @@ -247,7 +248,15 @@ def _iter_chat_windows(self, scene_data_info, max_tokens=None, overlap=200): cur_text = "".join(buf) buf.append(line) - sources.append({"type": "chat", "index": idx, "role": role, "chat_time": chat_time}) + sources.append( + { + "type": "chat", + "index": idx, + "role": role, + "content": content, + "chat_time": chat_time, + } + ) cur_text = "".join(buf) if buf: From 3ad9c8d3a245fa06acc678e3604e0b1158bd2d68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Tue, 28 Oct 2025 15:34:01 +0800 Subject: [PATCH 31/32] feat: update reorganize scheduler --- src/memos/mem_scheduler/base_scheduler.py | 136 ------------------- src/memos/mem_scheduler/general_scheduler.py | 109 +++++++++------ src/memos/memories/textual/item.py | 5 + 3 files changed, 74 insertions(+), 176 deletions(-) diff --git a/src/memos/mem_scheduler/base_scheduler.py b/src/memos/mem_scheduler/base_scheduler.py index 1e8b042b1..f57cf21c9 100644 --- a/src/memos/mem_scheduler/base_scheduler.py +++ b/src/memos/mem_scheduler/base_scheduler.py @@ -736,139 +736,3 @@ def _cleanup_queues(self) -> None: self._web_log_message_queue.get_nowait() except queue.Empty: pass - - def mem_scheduler_wait( - self, timeout: float = 180.0, poll: float = 0.1, log_every: float = 0.01 - ) -> bool: - """ - Uses EWMA throughput, detects leaked `unfinished_tasks`, and waits for dispatcher. - """ - deadline = time.monotonic() + timeout - - # --- helpers (local, no external deps) --- - def _unfinished() -> int: - """Prefer `unfinished_tasks`; fallback to `qsize()`.""" - try: - u = getattr(self.memos_message_queue, "unfinished_tasks", None) - if u is not None: - return int(u) - except Exception: - pass - try: - return int(self.memos_message_queue.qsize()) - except Exception: - return 0 - - def _fmt_eta(seconds: float | None) -> str: - """Format seconds to human-readable string.""" - if seconds is None or seconds != seconds or seconds == float("inf"): - return "unknown" - s = max(0, int(seconds)) - h, s = divmod(s, 3600) - m, s = divmod(s, 60) - if h > 0: - return f"{h:d}h{m:02d}m{s:02d}s" - if m > 0: - return f"{m:d}m{s:02d}s" - return f"{s:d}s" - - # --- EWMA throughput state (tasks/s) --- - alpha = 0.3 - rate = 0.0 - last_t = None # type: float | None - last_done = 0 - - # --- dynamic totals & stuck detection --- - init_unfinished = _unfinished() - done_total = 0 - last_unfinished = None - stuck_ticks = 0 - next_log = 0.0 - - while True: - # 1) read counters - curr_unfinished = _unfinished() - try: - qsz = int(self.memos_message_queue.qsize()) - except Exception: - qsz = -1 - - pend = run = 0 - stats_fn = getattr(self.dispatcher, "stats", None) - if self.enable_parallel_dispatch and self.dispatcher is not None and callable(stats_fn): - try: - st = ( - stats_fn() - ) # expected: {'pending':int,'running':int,'done':int?,'rate':float?} - pend = int(st.get("pending", 0)) - run = int(st.get("running", 0)) - except Exception: - pass - - # 2) dynamic total (allows new tasks queued while waiting) - total_now = max(init_unfinished, done_total + curr_unfinished) - done_total = max(0, total_now - curr_unfinished) - - # 3) update EWMA throughput - now = time.monotonic() - if last_t is None: - last_t = now - else: - dt = max(1e-6, now - last_t) - dc = max(0, done_total - last_done) - inst = dc / dt - rate = inst if rate == 0.0 else alpha * inst + (1 - alpha) * rate - last_t = now - last_done = done_total - - eta = None if rate <= 1e-9 else (curr_unfinished / rate) - - # 4) progress log (throttled) - if now >= next_log: - print( - f"[mem_scheduler_wait] remaining≈{curr_unfinished} | throughput≈{rate:.2f} msg/s | ETA≈{_fmt_eta(eta)} " - f"| qsize={qsz} pending={pend} running={run}" - ) - next_log = now + max(0.2, log_every) - - # 5) exit / stuck detection - idle_dispatcher = ( - (pend == 0 and run == 0) - if (self.enable_parallel_dispatch and self.dispatcher is not None) - else True - ) - if curr_unfinished == 0: - break - if curr_unfinished > 0 and qsz == 0 and idle_dispatcher: - if last_unfinished == curr_unfinished: - stuck_ticks += 1 - else: - stuck_ticks = 0 - else: - stuck_ticks = 0 - last_unfinished = curr_unfinished - - if stuck_ticks >= 3: - logger.warning( - "mem_scheduler_wait: detected leaked 'unfinished_tasks' -> treating queue as drained" - ) - break - - if now >= deadline: - logger.warning("mem_scheduler_wait: queue did not drain before timeout") - return False - - time.sleep(poll) - - # 6) wait dispatcher (second stage) - remaining = max(0.0, deadline - time.monotonic()) - if self.enable_parallel_dispatch and self.dispatcher is not None: - try: - ok = self.dispatcher.join(timeout=remaining if remaining > 0 else 0) - except TypeError: - ok = self.dispatcher.join() - if not ok: - logger.warning("mem_scheduler_wait: dispatcher did not complete before timeout") - return False - - return True diff --git a/src/memos/mem_scheduler/general_scheduler.py b/src/memos/mem_scheduler/general_scheduler.py index 1505be79b..92e9b9d4a 100644 --- a/src/memos/mem_scheduler/general_scheduler.py +++ b/src/memos/mem_scheduler/general_scheduler.py @@ -351,6 +351,8 @@ def _process_memories_with_reader( flattened_memories = [] for memory_list in processed_memories: flattened_memories.extend(memory_list) + for mem in memory_list: + logger.debug(f"Add Processed Mem Reader Mem: {mem.id}: {mem.memory}") logger.info(f"mem_reader processed {len(flattened_memories)} enhanced memories") @@ -367,6 +369,10 @@ def _process_memories_with_reader( ): # Dispatcher exists but organize not enabled; skip enqueue. pass + elif not getattr( + text_mem.memory_manager.reorganizer, "is_reorganize", True + ): + pass else: message_item = ScheduleMessageItem( user_id=user_id, @@ -440,6 +446,15 @@ def _get_reorg_lock(self, mem_cube_id: str) -> threading.Lock: self._reorg_locks[mem_cube_id] = lock return lock + def _get_reorg_state(self, mem_cube_id: str): + if not hasattr(self, "_reorg_state"): + self._reorg_state = {} + st = self._reorg_state.get(mem_cube_id) + if st is None: + st = {"running": False, "rerun_requested": False} + self._reorg_state[mem_cube_id] = st + return st + def _run_reorganize_singleflight( self, mem_cube: GeneralMemCube, @@ -451,54 +466,68 @@ def _run_reorganize_singleflight( If `scopes` is None, run both LongTermMemory and UserMemory (safe default). """ lock = self._get_reorg_lock(mem_cube_id) - if not lock.acquire(blocking=False): - logger.info( - f"[Reorganize] Another task is already running for mem_cube_id={mem_cube_id}; skipping this trigger." - ) - return + state = self._get_reorg_state(mem_cube_id) - try: - text_mem = mem_cube.text_mem - if not isinstance(text_mem, TreeTextMemory): - logger.error( - f"[Reorganize] Expected TreeTextMemory but got {type(text_mem).__name__} for mem_cube_id={mem_cube_id}" + with lock: + if state["running"]: + state["rerun_requested"] = True + print( + f"[Reorganize] Already running for {mem_cube_id}; mark trailing rerun and skip." ) return + state["running"] = True + state["rerun_requested"] = False - # Fetch reorganizer from the attached memory manager - reorganizer = text_mem.memory_manager.reorganizer - if not reorganizer or not getattr(reorganizer, "is_reorganize", True): - logger.debug( - f"[Reorganize] Reorganizer disabled or missing for mem_cube_id={mem_cube_id}; skip." - ) - return + try: + print("state is not running.. start to run!") + # ===== Run First Turn ===== + self._run_reorganize_once(mem_cube, mem_cube_id, scopes) - # Optional: also respect internal optimizing flags if present - try: - if any(getattr(reorganizer, "_is_optimizing", {}).values()): - logger.debug( - f"[Reorganize] Reorganizer busy (internal flag) for mem_cube_id={mem_cube_id}; skip." - ) - return - except Exception: - # If structure differs, just proceed; locking still guarantees single-flight per cube. - pass + # ===== Run Trailing Turn ===== + do_trailing = False + with lock: + if state["rerun_requested"]: + state["rerun_requested"] = False + do_trailing = True - run_scopes = scopes or ["LongTermMemory", "UserMemory"] - for scope in run_scopes: - try: - logger.info( - f"[Reorganize] Start optimize_structure(scope={scope}) for mem_cube_id={mem_cube_id}" - ) - reorganizer.optimize_structure(scope=scope) - except Exception as e: - logger.warning( - f"[Reorganize] optimize_structure failed for scope={scope}, mem_cube_id={mem_cube_id}: {e}", - exc_info=True, - ) + if do_trailing: + logger.info(f"[Reorganize] Running single trailing pass for {mem_cube_id}.") + self._run_reorganize_once(mem_cube, mem_cube_id, scopes) finally: - lock.release() + with lock: + state["running"] = False + + def _run_reorganize_once( + self, mem_cube: GeneralMemCube, mem_cube_id: str, scopes: list[str] | None + ): + print(f"[Reorganize] Acquired lock for mem_cube_id={mem_cube_id}; starting reorganize.") + text_mem = mem_cube.text_mem + if not isinstance(text_mem, TreeTextMemory): + logger.error( + f"[Reorganize] Expected TreeTextMemory but got {type(text_mem).__name__} for mem_cube_id={mem_cube_id}" + ) + return + + reorganizer = text_mem.memory_manager.reorganizer + if not reorganizer or not getattr(reorganizer, "is_reorganize", True): + logger.debug( + f"[Reorganize] Reorganizer disabled or missing for mem_cube_id={mem_cube_id}; skip." + ) + return + + run_scopes = scopes or ["LongTermMemory", "UserMemory"] + for scope in run_scopes: + logger.info( + f"[Reorganize] Start optimize_structure(scope={scope}) for mem_cube_id={mem_cube_id}" + ) + try: + reorganizer.optimize_structure(scope=scope) + except Exception as e: + logger.warning( + f"[Reorganize] optimize_structure failed for scope={scope}, mem_cube_id={mem_cube_id}: {e}", + exc_info=True, + ) def _process_memories_with_reorganize( self, diff --git a/src/memos/memories/textual/item.py b/src/memos/memories/textual/item.py index 2da283d47..654b44055 100644 --- a/src/memos/memories/textual/item.py +++ b/src/memos/memories/textual/item.py @@ -42,6 +42,11 @@ class SourceMessage(BaseModel): model_config = ConfigDict(extra="allow") + @property + def content_safe(self) -> str: + """Always return a string, fallback to '' if content is None.""" + return self.content or "" + class TextualMemoryMetadata(BaseModel): """Metadata for a memory item. From bd47a4977760836d538daed9c23c24b9736b5a69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=AD=E9=98=B3=E9=98=B3?= Date: Tue, 28 Oct 2025 16:11:30 +0800 Subject: [PATCH 32/32] fix: core.py --- src/memos/mem_os/core.py | 80 ++++++++++------------------------------ 1 file changed, 19 insertions(+), 61 deletions(-) diff --git a/src/memos/mem_os/core.py b/src/memos/mem_os/core.py index f7132f704..736b04b74 100644 --- a/src/memos/mem_os/core.py +++ b/src/memos/mem_os/core.py @@ -721,67 +721,6 @@ def add( "Mem-Scheduler must be working when use asynchronous memory adding." ) logger.debug(f"Mem-reader mode is: {sync_mode}") - time_start_1 = time.time() - if ( - (messages is not None) - and self.config.enable_textual_memory - and self.mem_cubes[mem_cube_id].text_mem - ): - logger.info( - f"time add: messages is not None and enable_textual_memory and text_mem is not None time user_id: {target_user_id} time is: {time.time() - time_start_1}" - ) - - if self.mem_cubes[mem_cube_id].config.text_mem.backend != "tree_text": - add_memory = [] - metadata = TextualMemoryMetadata( - user_id=target_user_id, session_id=target_session_id, source="conversation" - ) - for message in messages: - add_memory.append( - TextualMemoryItem(memory=message["content"], metadata=metadata) - ) - self.mem_cubes[mem_cube_id].text_mem.add(add_memory) - else: - messages_list = [messages] - time_start_2 = time.time() - memories = self.mem_reader.get_memory( - messages_list, - type="chat", - info={"user_id": target_user_id, "session_id": target_session_id}, - mode="fast" if sync_mode == "async" else "fine", - ) - logger.info( - f"time add: get mem_reader time user_id: {target_user_id} time is: {time.time() - time_start_2}" - ) - memories_flatten = [m for m_list in memories for m in m_list] - mem_ids: list[str] = self.mem_cubes[mem_cube_id].text_mem.add(memories_flatten) - logger.info( - f"Added memory user {target_user_id} to memcube {mem_cube_id}: {mem_ids}" - ) - # submit messages for scheduler - if self.enable_mem_scheduler and self.mem_scheduler is not None: - mem_cube = self.mem_cubes[mem_cube_id] - if sync_mode == "async": - message_item = ScheduleMessageItem( - user_id=target_user_id, - mem_cube_id=mem_cube_id, - mem_cube=mem_cube, - label=MEM_READ_LABEL, - content=json.dumps(mem_ids), - timestamp=datetime.utcnow(), - ) - self.mem_scheduler.submit_messages(messages=[message_item]) - - elif sync_mode == "sync": - message_item = ScheduleMessageItem( - user_id=user_id, - mem_cube_id=mem_cube_id, - mem_cube=mem_cube, - label=MEM_ORGANIZE_LABEL, - content=json.dumps(mem_ids), - timestamp=datetime.utcnow(), - ) - self.mem_scheduler.submit_messages(messages=[message_item]) def process_textual_memory(): if ( @@ -825,6 +764,25 @@ def process_textual_memory(): timestamp=datetime.utcnow(), ) self.mem_scheduler.submit_messages(messages=[message_item]) + elif sync_mode == "sync": + message_item = ScheduleMessageItem( + user_id=user_id, + mem_cube_id=mem_cube_id, + mem_cube=mem_cube, + label=MEM_ORGANIZE_LABEL, + content=json.dumps(mem_ids), + timestamp=datetime.utcnow(), + ) + self.mem_scheduler.submit_messages(messages=[message_item]) + message_item = ScheduleMessageItem( + user_id=target_user_id, + mem_cube_id=mem_cube_id, + mem_cube=mem_cube, + label=ADD_LABEL, + content=json.dumps(mem_ids), + timestamp=datetime.utcnow(), + ) + self.mem_scheduler.submit_messages(messages=[message_item]) def process_preference_memory(): if (