diff --git a/python/pyproject.toml b/python/pyproject.toml
index 23d3cb0d0..b7af3d252 100644
--- a/python/pyproject.toml
+++ b/python/pyproject.toml
@@ -31,6 +31,8 @@ dependencies = [
"ccxt>=4.5.15",
"baostock>=0.8.9",
"func-timeout>=4.3.5",
+ "langchain-openai>=1.1.1",
+ "langgraph>=1.0.4",
]
[project.optional-dependencies]
diff --git a/python/uv.lock b/python/uv.lock
index 8f5fe746e..24fca98bc 100644
--- a/python/uv.lock
+++ b/python/uv.lock
@@ -1,5 +1,5 @@
version = 1
-revision = 2
+revision = 3
requires-python = ">=3.12"
resolution-markers = [
"python_full_version >= '3.14'",
@@ -1456,12 +1456,33 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/1e/e8/685f47e0d754320684db4425a0967f7d3fa70126bffd76110b7009a0090f/joblib-1.5.2-py3-none-any.whl", hash = "sha256:4e1f0bdbb987e6d843c70cf43714cb276623def372df3c22fe5266b2670bc241", size = 308396, upload-time = "2025-08-27T12:15:45.188Z" },
]
+[[package]]
+name = "jsonpatch"
+version = "1.33"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "jsonpointer" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/42/78/18813351fe5d63acad16aec57f94ec2b70a09e53ca98145589e185423873/jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c", size = 21699, upload-time = "2023-06-26T12:07:29.144Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/73/07/02e16ed01e04a374e644b575638ec7987ae846d25ad97bcc9945a3ee4b0e/jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade", size = 12898, upload-time = "2023-06-16T21:01:28.466Z" },
+]
+
[[package]]
name = "jsonpath"
version = "0.82.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/cf/a1/693351acd0a9edca4de9153372a65e75398898ea7f8a5c722ab00f464929/jsonpath-0.82.2.tar.gz", hash = "sha256:d87ef2bcbcded68ee96bc34c1809b69457ecec9b0c4dd471658a12bd391002d1", size = 10353, upload-time = "2023-08-24T18:57:55.459Z" }
+[[package]]
+name = "jsonpointer"
+version = "3.0.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/6a/0a/eebeb1fa92507ea94016a2a790b93c2ae41a7e18778f85471dc54475ed25/jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef", size = 9114, upload-time = "2024-06-10T19:24:42.462Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/71/92/5e77f98553e9e75130c78900d000368476aed74276eb8ae8796f65f00918/jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942", size = 7595, upload-time = "2024-06-10T19:24:40.698Z" },
+]
+
[[package]]
name = "jsonschema"
version = "4.25.1"
@@ -1559,6 +1580,39 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/a9/0a/36d753b01198b0590eb45e283b07d54feaaab89d528cf7bb048eeeaf2dce/lancedb-0.25.2-cp39-abi3-win_amd64.whl", hash = "sha256:9bd990f27667d37cec0f41686e9c83e8051bb45cb4b6d48355fcc9f8e2c6b0f7", size = 41081428, upload-time = "2025-10-08T18:59:54.832Z" },
]
+[[package]]
+name = "langchain-core"
+version = "1.1.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "jsonpatch" },
+ { name = "langsmith" },
+ { name = "packaging" },
+ { name = "pydantic" },
+ { name = "pyyaml" },
+ { name = "tenacity" },
+ { name = "typing-extensions" },
+ { name = "uuid-utils" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/67/8d/99652acb7feaa4e16c9162429bc7a446f04749ef438aa02fce74b4319a00/langchain_core-1.1.2.tar.gz", hash = "sha256:75456c5cc10c3b53b80488bf5c6a4bcc3447b53e011533a8744bb0638b85dd78", size = 802560, upload-time = "2025-12-08T15:28:17.689Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/fb/68/2caf612e4b5e25d7938c96809b7ccbafb5906958bcad8c18d9211f092679/langchain_core-1.1.2-py3-none-any.whl", hash = "sha256:74dfd4dcc10a290e3701a64e35e0bea3f68420f5b7527820ced9414f5b2dc281", size = 475847, upload-time = "2025-12-08T15:28:16.467Z" },
+]
+
+[[package]]
+name = "langchain-openai"
+version = "1.1.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "langchain-core" },
+ { name = "openai" },
+ { name = "tiktoken" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/05/38/c6517187ea5f0db6d682083116a020409b01eef0547b4330542c117cd25d/langchain_openai-1.1.1.tar.gz", hash = "sha256:72aa7262854104e0b2794522a90c49353c79d0132caa1be27ef253852685d5e7", size = 1037309, upload-time = "2025-12-08T16:17:29.838Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/1d/95/d65d3e187cd717baeb62988ae90a995f93564657f67518fb775af4090880/langchain_openai-1.1.1-py3-none-any.whl", hash = "sha256:69b9be37e6ae3372b4d937cb9365cf55c0c59b5f7870e7507cb7d802a8b98b30", size = 84291, upload-time = "2025-12-08T16:17:24.418Z" },
+]
+
[[package]]
name = "langdetect"
version = "1.0.9"
@@ -1568,6 +1622,81 @@ dependencies = [
]
sdist = { url = "https://files.pythonhosted.org/packages/0e/72/a3add0e4eec4eb9e2569554f7c70f4a3c27712f40e3284d483e88094cc0e/langdetect-1.0.9.tar.gz", hash = "sha256:cbc1fef89f8d062739774bd51eda3da3274006b3661d199c2655f6b3f6d605a0", size = 981474, upload-time = "2021-05-07T07:54:13.562Z" }
+[[package]]
+name = "langgraph"
+version = "1.0.4"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "langchain-core" },
+ { name = "langgraph-checkpoint" },
+ { name = "langgraph-prebuilt" },
+ { name = "langgraph-sdk" },
+ { name = "pydantic" },
+ { name = "xxhash" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/d6/3c/af87902d300c1f467165558c8966d8b1e1f896dace271d3f35a410a5c26a/langgraph-1.0.4.tar.gz", hash = "sha256:86d08e25d7244340f59c5200fa69fdd11066aa999b3164b531e2a20036fac156", size = 484397, upload-time = "2025-11-25T20:31:48.608Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/14/52/4eb25a3f60399da34ba34adff1b3e324cf0d87eb7a08cebf1882a9b5e0d5/langgraph-1.0.4-py3-none-any.whl", hash = "sha256:b1a835ceb0a8d69b9db48075e1939e28b1ad70ee23fa3fa8f90149904778bacf", size = 157271, upload-time = "2025-11-25T20:31:47.518Z" },
+]
+
+[[package]]
+name = "langgraph-checkpoint"
+version = "3.0.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "langchain-core" },
+ { name = "ormsgpack" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/0f/07/2b1c042fa87d40cf2db5ca27dc4e8dd86f9a0436a10aa4361a8982718ae7/langgraph_checkpoint-3.0.1.tar.gz", hash = "sha256:59222f875f85186a22c494aedc65c4e985a3df27e696e5016ba0b98a5ed2cee0", size = 137785, upload-time = "2025-11-04T21:55:47.774Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/48/e3/616e3a7ff737d98c1bbb5700dd62278914e2a9ded09a79a1fa93cf24ce12/langgraph_checkpoint-3.0.1-py3-none-any.whl", hash = "sha256:9b04a8d0edc0474ce4eaf30c5d731cee38f11ddff50a6177eead95b5c4e4220b", size = 46249, upload-time = "2025-11-04T21:55:46.472Z" },
+]
+
+[[package]]
+name = "langgraph-prebuilt"
+version = "1.0.5"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "langchain-core" },
+ { name = "langgraph-checkpoint" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/46/f9/54f8891b32159e4542236817aea2ee83de0de18bce28e9bdba08c7f93001/langgraph_prebuilt-1.0.5.tar.gz", hash = "sha256:85802675ad778cc7240fd02d47db1e0b59c0c86d8369447d77ce47623845db2d", size = 144453, upload-time = "2025-11-20T16:47:39.23Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/87/5e/aeba4a5b39fe6e874e0dd003a82da71c7153e671312671a8dacc5cb7c1af/langgraph_prebuilt-1.0.5-py3-none-any.whl", hash = "sha256:22369563e1848862ace53fbc11b027c28dd04a9ac39314633bb95f2a7e258496", size = 35072, upload-time = "2025-11-20T16:47:38.187Z" },
+]
+
+[[package]]
+name = "langgraph-sdk"
+version = "0.2.14"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "httpx" },
+ { name = "orjson" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/b9/c4/b37b892a408f0b4753b8ad49529c7b5994abab47005940300ab1af9d8a5c/langgraph_sdk-0.2.14.tar.gz", hash = "sha256:fab3dd713a9c7a9cc46dc4b2eb5e555bd0c07b185cfaf813d61b5356ee40886e", size = 130335, upload-time = "2025-12-06T00:23:31.527Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/f9/ff/c4d91a2d28a141a58dc8fea408041aff299f59563d43d0e0f458469e10cb/langgraph_sdk-0.2.14-py3-none-any.whl", hash = "sha256:e01ab9867d3b22d3b4ddd46fc0bab67b7684b25ab784a276684f331ca07efabf", size = 66464, upload-time = "2025-12-06T00:23:30.638Z" },
+]
+
+[[package]]
+name = "langsmith"
+version = "0.4.56"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "httpx" },
+ { name = "orjson", marker = "platform_python_implementation != 'PyPy'" },
+ { name = "packaging" },
+ { name = "pydantic" },
+ { name = "requests" },
+ { name = "requests-toolbelt" },
+ { name = "uuid-utils" },
+ { name = "zstandard" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/4b/e0/6d8a07b25a3ac308156707edaeffebbc30b2737bba8a75e65c40908beb94/langsmith-0.4.56.tar.gz", hash = "sha256:c3dc53509972689dbbc24f9ac92a095dcce00f76bb0db03ae385815945572540", size = 991755, upload-time = "2025-12-06T00:15:52.893Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/b8/6f/d5f9c4f1e03c91045d3675dc99df0682bc657952ad158c92c1f423de04f4/langsmith-0.4.56-py3-none-any.whl", hash = "sha256:f2c61d3f10210e78f16f77e3115f407d40f562ab00ac8c76927c7dd55b5c17b2", size = 411849, upload-time = "2025-12-06T00:15:50.828Z" },
+]
+
[[package]]
name = "lark"
version = "1.3.0"
@@ -1940,7 +2069,7 @@ wheels = [
[[package]]
name = "openai"
-version = "1.107.0"
+version = "2.9.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
@@ -1952,9 +2081,9 @@ dependencies = [
{ name = "tqdm" },
{ name = "typing-extensions" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/88/67/d6498de300f83ff57a79cb7aa96ef3bef8d6f070c3ded0f1b5b45442a6bc/openai-1.107.0.tar.gz", hash = "sha256:43e04927584e57d0e9e640ee0077c78baf8150098be96ebd5c512539b6c4e9a4", size = 566056, upload-time = "2025-09-08T19:25:47.604Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/09/48/516290f38745cc1e72856f50e8afed4a7f9ac396a5a18f39e892ab89dfc2/openai-2.9.0.tar.gz", hash = "sha256:b52ec65727fc8f1eed2fbc86c8eac0998900c7ef63aa2eb5c24b69717c56fa5f", size = 608202, upload-time = "2025-12-04T18:15:09.01Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/91/ed/e8a4fd20390f2858b95227c288df8fe0c835f7c77625f7583609161684ba/openai-1.107.0-py3-none-any.whl", hash = "sha256:3dcfa3cbb116bd6924b27913b8da28c4a787379ff60049588547a1013e6d6438", size = 950968, upload-time = "2025-09-08T19:25:45.552Z" },
+ { url = "https://files.pythonhosted.org/packages/59/fd/ae2da789cd923dd033c99b8d544071a827c92046b150db01cfa5cea5b3fd/openai-2.9.0-py3-none-any.whl", hash = "sha256:0d168a490fbb45630ad508a6f3022013c155a68fd708069b6a1a01a5e8f0ffad", size = 1030836, upload-time = "2025-12-04T18:15:07.063Z" },
]
[[package]]
@@ -2018,6 +2147,44 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/28/01/d6b274a0635be0468d4dbd9cafe80c47105937a0d42434e805e67cd2ed8b/orjson-3.11.3-cp314-cp314-win_arm64.whl", hash = "sha256:e8f6a7a27d7b7bec81bd5924163e9af03d49bbb63013f107b48eb5d16db711bc", size = 125985, upload-time = "2025-08-26T17:46:16.67Z" },
]
+[[package]]
+name = "ormsgpack"
+version = "1.12.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/6c/67/d5ef41c3b4a94400be801984ef7c7fc9623e1a82b643e74eeec367e7462b/ormsgpack-1.12.0.tar.gz", hash = "sha256:94be818fdbb0285945839b88763b269987787cb2f7ef280cad5d6ec815b7e608", size = 49959, upload-time = "2025-11-04T18:30:10.083Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a2/f2/c1036b2775fcc0cfa5fd618c53bcd3b862ee07298fb627f03af4c7982f84/ormsgpack-1.12.0-cp312-cp312-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:e0c1e08b64d99076fee155276097489b82cc56e8d5951c03c721a65a32f44494", size = 369538, upload-time = "2025-11-04T18:29:37.125Z" },
+ { url = "https://files.pythonhosted.org/packages/d9/ca/526c4ae02f3cb34621af91bf8282a10d666757c2e0c6ff391ff5d403d607/ormsgpack-1.12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fd43bcb299131690b8e0677af172020b2ada8e625169034b42ac0c13adf84aa", size = 195872, upload-time = "2025-11-04T18:29:38.34Z" },
+ { url = "https://files.pythonhosted.org/packages/7f/0f/83bb7968e9715f6a85be53d041b1e6324a05428f56b8b980dac866886871/ormsgpack-1.12.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f0149d595341e22ead340bf281b2995c4cc7dc8d522a6b5f575fe17aa407604", size = 206469, upload-time = "2025-11-04T18:29:39.749Z" },
+ { url = "https://files.pythonhosted.org/packages/02/e3/9e93ca1065f2d4af035804a842b1ff3025bab580c7918239bb225cd1fee2/ormsgpack-1.12.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f19a1b27d169deb553c80fd10b589fc2be1fc14cee779fae79fcaf40db04de2b", size = 208273, upload-time = "2025-11-04T18:29:40.769Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/d8/6d6ef901b3a8b8f3ab8836b135a56eb7f66c559003e251d9530bedb12627/ormsgpack-1.12.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6f28896942d655064940dfe06118b7ce1e3468d051483148bf02c99ec157483a", size = 377839, upload-time = "2025-11-04T18:29:42.092Z" },
+ { url = "https://files.pythonhosted.org/packages/4c/72/fcb704bfa4c2c3a37b647d597cc45a13cffc9d50baac635a9ad620731d29/ormsgpack-1.12.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:9396efcfa48b4abbc06e44c5dbc3c4574a8381a80cb4cd01eea15d28b38c554e", size = 471446, upload-time = "2025-11-04T18:29:43.133Z" },
+ { url = "https://files.pythonhosted.org/packages/84/f8/402e4e3eb997c2ee534c99bec4b5bb359c2a1f9edadf043e254a71e11378/ormsgpack-1.12.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:96586ed537a5fb386a162c4f9f7d8e6f76e07b38a990d50c73f11131e00ff040", size = 381783, upload-time = "2025-11-04T18:29:44.466Z" },
+ { url = "https://files.pythonhosted.org/packages/f0/8d/5897b700360bc00911b70ae5ef1134ee7abf5baa81a92a4be005917d3dfd/ormsgpack-1.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:e70387112fb3870e4844de090014212cdcf1342f5022047aecca01ec7de05d7a", size = 112943, upload-time = "2025-11-04T18:29:45.468Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/44/1e73649f79bb96d6cf9e5bcbac68b6216d238bba80af351c4c0cbcf7ee15/ormsgpack-1.12.0-cp312-cp312-win_arm64.whl", hash = "sha256:d71290a23de5d4829610c42665d816c661ecad8979883f3f06b2e3ab9639962e", size = 106688, upload-time = "2025-11-04T18:29:46.411Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/e8/35f11ce9313111488b26b3035e4cbe55caa27909c0b6c8b5b5cd59f9661e/ormsgpack-1.12.0-cp313-cp313-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:766f2f3b512d85cd375b26a8b1329b99843560b50b93d3880718e634ad4a5de5", size = 369574, upload-time = "2025-11-04T18:29:47.431Z" },
+ { url = "https://files.pythonhosted.org/packages/61/b0/77461587f412d4e598d3687bafe23455ed0f26269f44be20252eddaa624e/ormsgpack-1.12.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84b285b1f3f185aad7da45641b873b30acfd13084cf829cf668c4c6480a81583", size = 195893, upload-time = "2025-11-04T18:29:48.735Z" },
+ { url = "https://files.pythonhosted.org/packages/c6/67/e197ceb04c3b550589e5407fc9fdae10f4e2e2eba5fdac921a269e02e974/ormsgpack-1.12.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e23604fc79fe110292cb365f4c8232e64e63a34f470538be320feae3921f271b", size = 206503, upload-time = "2025-11-04T18:29:49.99Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/b1/7fa8ba82a25cef678983c7976f85edeef5014f5c26495f338258e6a3cf1c/ormsgpack-1.12.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc32b156c113a0fae2975051417d8d9a7a5247c34b2d7239410c46b75ce9348a", size = 208257, upload-time = "2025-11-04T18:29:51.007Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/b1/759e999390000d2589e6d0797f7265e6ec28378547075d28d3736248ab63/ormsgpack-1.12.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:94ac500dd10c20fa8b8a23bc55606250bfe711bf9716828d9f3d44dfd1f25668", size = 377852, upload-time = "2025-11-04T18:29:52.103Z" },
+ { url = "https://files.pythonhosted.org/packages/51/e7/0af737c94272494d9d84a3c29cc42c973ef7fd2342917020906596db863c/ormsgpack-1.12.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:c5201ff7ec24f721f813a182885a17064cffdbe46b2412685a52e6374a872c8f", size = 471456, upload-time = "2025-11-04T18:29:53.336Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/ba/c81f0aa4f19fbf457213395945b672e6fde3ce777e3587456e7f0fca2147/ormsgpack-1.12.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a9740bb3839c9368aacae1cbcfc474ee6976458f41cc135372b7255d5206c953", size = 381813, upload-time = "2025-11-04T18:29:54.394Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/15/429c72d64323503fd42cc4ca8398930ded8aa8b3470df8a86b3bbae7a35c/ormsgpack-1.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:8ed37f29772432048b58174e920a1d4c4cde0404a5d448d3d8bbcc95d86a6918", size = 112949, upload-time = "2025-11-04T18:29:55.371Z" },
+ { url = "https://files.pythonhosted.org/packages/55/b9/e72c451a40f8c57bfc229e0b8e536ecea7203c8f0a839676df2ffb605c62/ormsgpack-1.12.0-cp313-cp313-win_arm64.whl", hash = "sha256:b03994bbec5d6d42e03d6604e327863f885bde67aa61e06107ce1fa5bdd3e71d", size = 106689, upload-time = "2025-11-04T18:29:56.262Z" },
+ { url = "https://files.pythonhosted.org/packages/13/16/13eab1a75da531b359105fdee90dda0b6bd1ca0a09880250cf91d8bdfdea/ormsgpack-1.12.0-cp314-cp314-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:0f3981ba3cba80656012090337e548e597799e14b41e3d0b595ab5ab05a23d7f", size = 369620, upload-time = "2025-11-04T18:29:57.255Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/c1/cbcc38b7af4ce58d8893e56d3595c0c8dcd117093bf048f889cf351bdba0/ormsgpack-1.12.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:901f6f55184d6776dbd5183cbce14caf05bf7f467eef52faf9b094686980bf71", size = 195925, upload-time = "2025-11-04T18:29:58.34Z" },
+ { url = "https://files.pythonhosted.org/packages/5c/59/4fa4dc0681490e12b75333440a1c0fd9741b0ebff272b1db4a29d35c2021/ormsgpack-1.12.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e13b15412571422b711b40f45e3fe6d993ea3314b5e97d1a853fe99226c5effc", size = 206594, upload-time = "2025-11-04T18:29:59.329Z" },
+ { url = "https://files.pythonhosted.org/packages/39/67/249770896bc32bb91b22c30256961f935d0915cbcf6e289a7fc961d9b14c/ormsgpack-1.12.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91fa8a452553a62e5fb3fbab471e7faf7b3bec3c87a2f355ebf3d7aab290fe4f", size = 208307, upload-time = "2025-11-04T18:30:00.377Z" },
+ { url = "https://files.pythonhosted.org/packages/07/0a/e041a248cd72f2f4c07e155913e0a3ede4c86cf21a40ae6cd79f135f2847/ormsgpack-1.12.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:74ec101f69624695eec4ce7c953192d97748254abe78fb01b591f06d529e1952", size = 377844, upload-time = "2025-11-04T18:30:01.389Z" },
+ { url = "https://files.pythonhosted.org/packages/d8/71/6f7773e4ffda73a358ce4bba69b3e8bee9d40a7a06315e4c1cd7a3ea9d02/ormsgpack-1.12.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:9bbf7896580848326c1f9bd7531f264e561f98db7e08e15aa75963d83832c717", size = 471572, upload-time = "2025-11-04T18:30:02.486Z" },
+ { url = "https://files.pythonhosted.org/packages/65/29/af6769a4289c07acc71e7bda1d64fb31800563147d73142686e185e82348/ormsgpack-1.12.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:7567917da613b8f8d591c1674e411fd3404bea41ef2b9a0e0a1e049c0f9406d7", size = 381842, upload-time = "2025-11-04T18:30:03.799Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/dd/0a86195ee7a1a96c088aefc8504385e881cf56f4563ed81bafe21cbf1fb0/ormsgpack-1.12.0-cp314-cp314-win_amd64.whl", hash = "sha256:4e418256c5d8622b8bc92861936f7c6a0131355e7bcad88a42102ae8227f8a1c", size = 113008, upload-time = "2025-11-04T18:30:04.777Z" },
+ { url = "https://files.pythonhosted.org/packages/4c/57/fafc79e32f3087f6f26f509d80b8167516326bfea38d30502627c01617e0/ormsgpack-1.12.0-cp314-cp314-win_arm64.whl", hash = "sha256:433ace29aa02713554f714c62a4e4dcad0c9e32674ba4f66742c91a4c3b1b969", size = 106648, upload-time = "2025-11-04T18:30:05.708Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/cf/5d58d9b132128d2fe5d586355dde76af386554abef00d608f66b913bff1f/ormsgpack-1.12.0-cp314-cp314t-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:e57164be4ca34b64e210ec515059193280ac84df4d6f31a6fcbfb2fc8436de55", size = 369803, upload-time = "2025-11-04T18:30:06.728Z" },
+ { url = "https://files.pythonhosted.org/packages/67/42/968a2da361eaff2e4cbb17c82c7599787babf16684110ad70409646cc1e4/ormsgpack-1.12.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:904f96289deaa92fc6440b122edc27c5bdc28234edd63717f6d853d88c823a83", size = 195991, upload-time = "2025-11-04T18:30:07.713Z" },
+ { url = "https://files.pythonhosted.org/packages/03/f0/9696c6c6cf8ad35170f0be8d0ef3523cc258083535f6c8071cb8235ebb8b/ormsgpack-1.12.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b291d086e524a1062d57d1b7b5a8bcaaf29caebf0212fec12fd86240bd33633", size = 208316, upload-time = "2025-11-04T18:30:08.663Z" },
+]
+
[[package]]
name = "packaging"
version = "25.0"
@@ -3623,6 +3790,28 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" },
]
+[[package]]
+name = "uuid-utils"
+version = "0.12.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/0b/0e/512fb221e4970c2f75ca9dae412d320b7d9ddc9f2b15e04ea8e44710396c/uuid_utils-0.12.0.tar.gz", hash = "sha256:252bd3d311b5d6b7f5dfce7a5857e27bb4458f222586bb439463231e5a9cbd64", size = 20889, upload-time = "2025-12-01T17:29:55.494Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/8a/43/de5cd49a57b6293b911b6a9a62fc03e55db9f964da7d5882d9edbee1e9d2/uuid_utils-0.12.0-cp39-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:3b9b30707659292f207b98f294b0e081f6d77e1fbc760ba5b41331a39045f514", size = 603197, upload-time = "2025-12-01T17:29:30.104Z" },
+ { url = "https://files.pythonhosted.org/packages/02/fa/5fd1d8c9234e44f0c223910808cde0de43bb69f7df1349e49b1afa7f2baa/uuid_utils-0.12.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:add3d820c7ec14ed37317375bea30249699c5d08ff4ae4dbee9fc9bce3bfbf65", size = 305168, upload-time = "2025-12-01T17:29:31.384Z" },
+ { url = "https://files.pythonhosted.org/packages/c8/c6/8633ac9942bf9dc97a897b5154e5dcffa58816ec4dd780b3b12b559ff05c/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b8fce83ecb3b16af29c7809669056c4b6e7cc912cab8c6d07361645de12dd79", size = 340580, upload-time = "2025-12-01T17:29:32.362Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/88/8a61307b04b4da1c576373003e6d857a04dade52ab035151d62cb84d5cb5/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec921769afcb905035d785582b0791d02304a7850fbd6ce924c1a8976380dfc6", size = 346771, upload-time = "2025-12-01T17:29:33.708Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/fb/aab2dcf94b991e62aa167457c7825b9b01055b884b888af926562864398c/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6f3b060330f5899a92d5c723547dc6a95adef42433e9748f14c66859a7396664", size = 474781, upload-time = "2025-12-01T17:29:35.237Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/7a/dbd5e49c91d6c86dba57158bbfa0e559e1ddf377bb46dcfd58aea4f0d567/uuid_utils-0.12.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:908dfef7f0bfcf98d406e5dc570c25d2f2473e49b376de41792b6e96c1d5d291", size = 343685, upload-time = "2025-12-01T17:29:36.677Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/19/8c4b1d9f450159733b8be421a4e1fb03533709b80ed3546800102d085572/uuid_utils-0.12.0-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4c6a24148926bd0ca63e8a2dabf4cc9dc329a62325b3ad6578ecd60fbf926506", size = 366482, upload-time = "2025-12-01T17:29:37.979Z" },
+ { url = "https://files.pythonhosted.org/packages/82/43/c79a6e45687647f80a159c8ba34346f287b065452cc419d07d2212d38420/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:64a91e632669f059ef605f1771d28490b1d310c26198e46f754e8846dddf12f4", size = 523132, upload-time = "2025-12-01T17:29:39.293Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/a2/b2d75a621260a40c438aa88593827dfea596d18316520a99e839f7a5fb9d/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:93c082212470bb4603ca3975916c205a9d7ef1443c0acde8fbd1e0f5b36673c7", size = 614218, upload-time = "2025-12-01T17:29:40.315Z" },
+ { url = "https://files.pythonhosted.org/packages/13/6b/ba071101626edd5a6dabf8525c9a1537ff3d885dbc210540574a03901fef/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:431b1fb7283ba974811b22abd365f2726f8f821ab33f0f715be389640e18d039", size = 546241, upload-time = "2025-12-01T17:29:41.656Z" },
+ { url = "https://files.pythonhosted.org/packages/01/12/9a942b81c0923268e6d85bf98d8f0a61fcbcd5e432fef94fdf4ce2ef8748/uuid_utils-0.12.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2ffd7838c40149100299fa37cbd8bab5ee382372e8e65a148002a37d380df7c8", size = 511842, upload-time = "2025-12-01T17:29:43.107Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/a7/c326f5163dd48b79368b87d8a05f5da4668dd228a3f5ca9d79d5fee2fc40/uuid_utils-0.12.0-cp39-abi3-win32.whl", hash = "sha256:487f17c0fee6cbc1d8b90fe811874174a9b1b5683bf2251549e302906a50fed3", size = 179088, upload-time = "2025-12-01T17:29:44.492Z" },
+ { url = "https://files.pythonhosted.org/packages/38/92/41c8734dd97213ee1d5ae435cf4499705dc4f2751e3b957fd12376f61784/uuid_utils-0.12.0-cp39-abi3-win_amd64.whl", hash = "sha256:9598e7c9da40357ae8fffc5d6938b1a7017f09a1acbcc95e14af8c65d48c655a", size = 183003, upload-time = "2025-12-01T17:29:45.47Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/f9/52ab0359618987331a1f739af837d26168a4b16281c9c3ab46519940c628/uuid_utils-0.12.0-cp39-abi3-win_arm64.whl", hash = "sha256:c9bea7c5b2aa6f57937ebebeee4d4ef2baad10f86f1b97b58a3f6f34c14b4e84", size = 182975, upload-time = "2025-12-01T17:29:46.444Z" },
+]
+
[[package]]
name = "uvicorn"
version = "0.35.0"
@@ -3652,6 +3841,8 @@ dependencies = [
{ name = "edgartools" },
{ name = "fastapi" },
{ name = "func-timeout" },
+ { name = "langchain-openai" },
+ { name = "langgraph" },
{ name = "loguru" },
{ name = "markdown" },
{ name = "pydantic" },
@@ -3711,6 +3902,8 @@ requires-dist = [
{ name = "edgartools", specifier = ">=4.12.2" },
{ name = "fastapi", specifier = ">=0.104.0" },
{ name = "func-timeout", specifier = ">=4.3.5" },
+ { name = "langchain-openai", specifier = ">=1.1.1" },
+ { name = "langgraph", specifier = ">=1.0.4" },
{ name = "loguru", specifier = ">=0.7.3" },
{ name = "markdown", specifier = ">=3.9" },
{ name = "pydantic", specifier = ">=2.0.0" },
@@ -4056,3 +4249,60 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/36/9a/62a9ba3a919594605a07c34eee3068659bbd648e2fa0c4a86d876810b674/zope_interface-8.0.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:87e6b089002c43231fb9afec89268391bcc7a3b66e76e269ffde19a8112fb8d5", size = 264201, upload-time = "2025-09-25T06:26:27.797Z" },
{ url = "https://files.pythonhosted.org/packages/da/06/8fe88bd7edef60566d21ef5caca1034e10f6b87441ea85de4bbf9ea74768/zope_interface-8.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:64a43f5280aa770cbafd0307cb3d1ff430e2a1001774e8ceb40787abe4bb6658", size = 212273, upload-time = "2025-09-25T06:00:25.398Z" },
]
+
+[[package]]
+name = "zstandard"
+version = "0.25.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/fd/aa/3e0508d5a5dd96529cdc5a97011299056e14c6505b678fd58938792794b1/zstandard-0.25.0.tar.gz", hash = "sha256:7713e1179d162cf5c7906da876ec2ccb9c3a9dcbdffef0cc7f70c3667a205f0b", size = 711513, upload-time = "2025-09-14T22:15:54.002Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/82/fc/f26eb6ef91ae723a03e16eddb198abcfce2bc5a42e224d44cc8b6765e57e/zstandard-0.25.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7b3c3a3ab9daa3eed242d6ecceead93aebbb8f5f84318d82cee643e019c4b73b", size = 795738, upload-time = "2025-09-14T22:16:56.237Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/1c/d920d64b22f8dd028a8b90e2d756e431a5d86194caa78e3819c7bf53b4b3/zstandard-0.25.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:913cbd31a400febff93b564a23e17c3ed2d56c064006f54efec210d586171c00", size = 640436, upload-time = "2025-09-14T22:16:57.774Z" },
+ { url = "https://files.pythonhosted.org/packages/53/6c/288c3f0bd9fcfe9ca41e2c2fbfd17b2097f6af57b62a81161941f09afa76/zstandard-0.25.0-cp312-cp312-manylinux2010_i686.manylinux2014_i686.manylinux_2_12_i686.manylinux_2_17_i686.whl", hash = "sha256:011d388c76b11a0c165374ce660ce2c8efa8e5d87f34996aa80f9c0816698b64", size = 5343019, upload-time = "2025-09-14T22:16:59.302Z" },
+ { url = "https://files.pythonhosted.org/packages/1e/15/efef5a2f204a64bdb5571e6161d49f7ef0fffdbca953a615efbec045f60f/zstandard-0.25.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6dffecc361d079bb48d7caef5d673c88c8988d3d33fb74ab95b7ee6da42652ea", size = 5063012, upload-time = "2025-09-14T22:17:01.156Z" },
+ { url = "https://files.pythonhosted.org/packages/b7/37/a6ce629ffdb43959e92e87ebdaeebb5ac81c944b6a75c9c47e300f85abdf/zstandard-0.25.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:7149623bba7fdf7e7f24312953bcf73cae103db8cae49f8154dd1eadc8a29ecb", size = 5394148, upload-time = "2025-09-14T22:17:03.091Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/79/2bf870b3abeb5c070fe2d670a5a8d1057a8270f125ef7676d29ea900f496/zstandard-0.25.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:6a573a35693e03cf1d67799fd01b50ff578515a8aeadd4595d2a7fa9f3ec002a", size = 5451652, upload-time = "2025-09-14T22:17:04.979Z" },
+ { url = "https://files.pythonhosted.org/packages/53/60/7be26e610767316c028a2cbedb9a3beabdbe33e2182c373f71a1c0b88f36/zstandard-0.25.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5a56ba0db2d244117ed744dfa8f6f5b366e14148e00de44723413b2f3938a902", size = 5546993, upload-time = "2025-09-14T22:17:06.781Z" },
+ { url = "https://files.pythonhosted.org/packages/85/c7/3483ad9ff0662623f3648479b0380d2de5510abf00990468c286c6b04017/zstandard-0.25.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:10ef2a79ab8e2974e2075fb984e5b9806c64134810fac21576f0668e7ea19f8f", size = 5046806, upload-time = "2025-09-14T22:17:08.415Z" },
+ { url = "https://files.pythonhosted.org/packages/08/b3/206883dd25b8d1591a1caa44b54c2aad84badccf2f1de9e2d60a446f9a25/zstandard-0.25.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aaf21ba8fb76d102b696781bddaa0954b782536446083ae3fdaa6f16b25a1c4b", size = 5576659, upload-time = "2025-09-14T22:17:10.164Z" },
+ { url = "https://files.pythonhosted.org/packages/9d/31/76c0779101453e6c117b0ff22565865c54f48f8bd807df2b00c2c404b8e0/zstandard-0.25.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1869da9571d5e94a85a5e8d57e4e8807b175c9e4a6294e3b66fa4efb074d90f6", size = 4953933, upload-time = "2025-09-14T22:17:11.857Z" },
+ { url = "https://files.pythonhosted.org/packages/18/e1/97680c664a1bf9a247a280a053d98e251424af51f1b196c6d52f117c9720/zstandard-0.25.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:809c5bcb2c67cd0ed81e9229d227d4ca28f82d0f778fc5fea624a9def3963f91", size = 5268008, upload-time = "2025-09-14T22:17:13.627Z" },
+ { url = "https://files.pythonhosted.org/packages/1e/73/316e4010de585ac798e154e88fd81bb16afc5c5cb1a72eeb16dd37e8024a/zstandard-0.25.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f27662e4f7dbf9f9c12391cb37b4c4c3cb90ffbd3b1fb9284dadbbb8935fa708", size = 5433517, upload-time = "2025-09-14T22:17:16.103Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/60/dd0f8cfa8129c5a0ce3ea6b7f70be5b33d2618013a161e1ff26c2b39787c/zstandard-0.25.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:99c0c846e6e61718715a3c9437ccc625de26593fea60189567f0118dc9db7512", size = 5814292, upload-time = "2025-09-14T22:17:17.827Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/5f/75aafd4b9d11b5407b641b8e41a57864097663699f23e9ad4dbb91dc6bfe/zstandard-0.25.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:474d2596a2dbc241a556e965fb76002c1ce655445e4e3bf38e5477d413165ffa", size = 5360237, upload-time = "2025-09-14T22:17:19.954Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/8d/0309daffea4fcac7981021dbf21cdb2e3427a9e76bafbcdbdf5392ff99a4/zstandard-0.25.0-cp312-cp312-win32.whl", hash = "sha256:23ebc8f17a03133b4426bcc04aabd68f8236eb78c3760f12783385171b0fd8bd", size = 436922, upload-time = "2025-09-14T22:17:24.398Z" },
+ { url = "https://files.pythonhosted.org/packages/79/3b/fa54d9015f945330510cb5d0b0501e8253c127cca7ebe8ba46a965df18c5/zstandard-0.25.0-cp312-cp312-win_amd64.whl", hash = "sha256:ffef5a74088f1e09947aecf91011136665152e0b4b359c42be3373897fb39b01", size = 506276, upload-time = "2025-09-14T22:17:21.429Z" },
+ { url = "https://files.pythonhosted.org/packages/ea/6b/8b51697e5319b1f9ac71087b0af9a40d8a6288ff8025c36486e0c12abcc4/zstandard-0.25.0-cp312-cp312-win_arm64.whl", hash = "sha256:181eb40e0b6a29b3cd2849f825e0fa34397f649170673d385f3598ae17cca2e9", size = 462679, upload-time = "2025-09-14T22:17:23.147Z" },
+ { url = "https://files.pythonhosted.org/packages/35/0b/8df9c4ad06af91d39e94fa96cc010a24ac4ef1378d3efab9223cc8593d40/zstandard-0.25.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec996f12524f88e151c339688c3897194821d7f03081ab35d31d1e12ec975e94", size = 795735, upload-time = "2025-09-14T22:17:26.042Z" },
+ { url = "https://files.pythonhosted.org/packages/3f/06/9ae96a3e5dcfd119377ba33d4c42a7d89da1efabd5cb3e366b156c45ff4d/zstandard-0.25.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a1a4ae2dec3993a32247995bdfe367fc3266da832d82f8438c8570f989753de1", size = 640440, upload-time = "2025-09-14T22:17:27.366Z" },
+ { url = "https://files.pythonhosted.org/packages/d9/14/933d27204c2bd404229c69f445862454dcc101cd69ef8c6068f15aaec12c/zstandard-0.25.0-cp313-cp313-manylinux2010_i686.manylinux2014_i686.manylinux_2_12_i686.manylinux_2_17_i686.whl", hash = "sha256:e96594a5537722fdfb79951672a2a63aec5ebfb823e7560586f7484819f2a08f", size = 5343070, upload-time = "2025-09-14T22:17:28.896Z" },
+ { url = "https://files.pythonhosted.org/packages/6d/db/ddb11011826ed7db9d0e485d13df79b58586bfdec56e5c84a928a9a78c1c/zstandard-0.25.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bfc4e20784722098822e3eee42b8e576b379ed72cca4a7cb856ae733e62192ea", size = 5063001, upload-time = "2025-09-14T22:17:31.044Z" },
+ { url = "https://files.pythonhosted.org/packages/db/00/87466ea3f99599d02a5238498b87bf84a6348290c19571051839ca943777/zstandard-0.25.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:457ed498fc58cdc12fc48f7950e02740d4f7ae9493dd4ab2168a47c93c31298e", size = 5394120, upload-time = "2025-09-14T22:17:32.711Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/95/fc5531d9c618a679a20ff6c29e2b3ef1d1f4ad66c5e161ae6ff847d102a9/zstandard-0.25.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:fd7a5004eb1980d3cefe26b2685bcb0b17989901a70a1040d1ac86f1d898c551", size = 5451230, upload-time = "2025-09-14T22:17:34.41Z" },
+ { url = "https://files.pythonhosted.org/packages/63/4b/e3678b4e776db00f9f7b2fe58e547e8928ef32727d7a1ff01dea010f3f13/zstandard-0.25.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8e735494da3db08694d26480f1493ad2cf86e99bdd53e8e9771b2752a5c0246a", size = 5547173, upload-time = "2025-09-14T22:17:36.084Z" },
+ { url = "https://files.pythonhosted.org/packages/4e/d5/ba05ed95c6b8ec30bd468dfeab20589f2cf709b5c940483e31d991f2ca58/zstandard-0.25.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3a39c94ad7866160a4a46d772e43311a743c316942037671beb264e395bdd611", size = 5046736, upload-time = "2025-09-14T22:17:37.891Z" },
+ { url = "https://files.pythonhosted.org/packages/50/d5/870aa06b3a76c73eced65c044b92286a3c4e00554005ff51962deef28e28/zstandard-0.25.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:172de1f06947577d3a3005416977cce6168f2261284c02080e7ad0185faeced3", size = 5576368, upload-time = "2025-09-14T22:17:40.206Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/35/398dc2ffc89d304d59bc12f0fdd931b4ce455bddf7038a0a67733a25f550/zstandard-0.25.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3c83b0188c852a47cd13ef3bf9209fb0a77fa5374958b8c53aaa699398c6bd7b", size = 4954022, upload-time = "2025-09-14T22:17:41.879Z" },
+ { url = "https://files.pythonhosted.org/packages/9a/5c/36ba1e5507d56d2213202ec2b05e8541734af5f2ce378c5d1ceaf4d88dc4/zstandard-0.25.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1673b7199bbe763365b81a4f3252b8e80f44c9e323fc42940dc8843bfeaf9851", size = 5267889, upload-time = "2025-09-14T22:17:43.577Z" },
+ { url = "https://files.pythonhosted.org/packages/70/e8/2ec6b6fb7358b2ec0113ae202647ca7c0e9d15b61c005ae5225ad0995df5/zstandard-0.25.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:0be7622c37c183406f3dbf0cba104118eb16a4ea7359eeb5752f0794882fc250", size = 5433952, upload-time = "2025-09-14T22:17:45.271Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/01/b5f4d4dbc59ef193e870495c6f1275f5b2928e01ff5a81fecb22a06e22fb/zstandard-0.25.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:5f5e4c2a23ca271c218ac025bd7d635597048b366d6f31f420aaeb715239fc98", size = 5814054, upload-time = "2025-09-14T22:17:47.08Z" },
+ { url = "https://files.pythonhosted.org/packages/b2/e5/fbd822d5c6f427cf158316d012c5a12f233473c2f9c5fe5ab1ae5d21f3d8/zstandard-0.25.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f187a0bb61b35119d1926aee039524d1f93aaf38a9916b8c4b78ac8514a0aaf", size = 5360113, upload-time = "2025-09-14T22:17:48.893Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/e0/69a553d2047f9a2c7347caa225bb3a63b6d7704ad74610cb7823baa08ed7/zstandard-0.25.0-cp313-cp313-win32.whl", hash = "sha256:7030defa83eef3e51ff26f0b7bfb229f0204b66fe18e04359ce3474ac33cbc09", size = 436936, upload-time = "2025-09-14T22:17:52.658Z" },
+ { url = "https://files.pythonhosted.org/packages/d9/82/b9c06c870f3bd8767c201f1edbdf9e8dc34be5b0fbc5682c4f80fe948475/zstandard-0.25.0-cp313-cp313-win_amd64.whl", hash = "sha256:1f830a0dac88719af0ae43b8b2d6aef487d437036468ef3c2ea59c51f9d55fd5", size = 506232, upload-time = "2025-09-14T22:17:50.402Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/57/60c3c01243bb81d381c9916e2a6d9e149ab8627c0c7d7abb2d73384b3c0c/zstandard-0.25.0-cp313-cp313-win_arm64.whl", hash = "sha256:85304a43f4d513f5464ceb938aa02c1e78c2943b29f44a750b48b25ac999a049", size = 462671, upload-time = "2025-09-14T22:17:51.533Z" },
+ { url = "https://files.pythonhosted.org/packages/3d/5c/f8923b595b55fe49e30612987ad8bf053aef555c14f05bb659dd5dbe3e8a/zstandard-0.25.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e29f0cf06974c899b2c188ef7f783607dbef36da4c242eb6c82dcd8b512855e3", size = 795887, upload-time = "2025-09-14T22:17:54.198Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/09/d0a2a14fc3439c5f874042dca72a79c70a532090b7ba0003be73fee37ae2/zstandard-0.25.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:05df5136bc5a011f33cd25bc9f506e7426c0c9b3f9954f056831ce68f3b6689f", size = 640658, upload-time = "2025-09-14T22:17:55.423Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/7c/8b6b71b1ddd517f68ffb55e10834388d4f793c49c6b83effaaa05785b0b4/zstandard-0.25.0-cp314-cp314-manylinux2010_i686.manylinux_2_12_i686.manylinux_2_28_i686.whl", hash = "sha256:f604efd28f239cc21b3adb53eb061e2a205dc164be408e553b41ba2ffe0ca15c", size = 5379849, upload-time = "2025-09-14T22:17:57.372Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/86/a48e56320d0a17189ab7a42645387334fba2200e904ee47fc5a26c1fd8ca/zstandard-0.25.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:223415140608d0f0da010499eaa8ccdb9af210a543fac54bce15babbcfc78439", size = 5058095, upload-time = "2025-09-14T22:17:59.498Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/ad/eb659984ee2c0a779f9d06dbfe45e2dc39d99ff40a319895df2d3d9a48e5/zstandard-0.25.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2e54296a283f3ab5a26fc9b8b5d4978ea0532f37b231644f367aa588930aa043", size = 5551751, upload-time = "2025-09-14T22:18:01.618Z" },
+ { url = "https://files.pythonhosted.org/packages/61/b3/b637faea43677eb7bd42ab204dfb7053bd5c4582bfe6b1baefa80ac0c47b/zstandard-0.25.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ca54090275939dc8ec5dea2d2afb400e0f83444b2fc24e07df7fdef677110859", size = 6364818, upload-time = "2025-09-14T22:18:03.769Z" },
+ { url = "https://files.pythonhosted.org/packages/31/dc/cc50210e11e465c975462439a492516a73300ab8caa8f5e0902544fd748b/zstandard-0.25.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e09bb6252b6476d8d56100e8147b803befa9a12cea144bbe629dd508800d1ad0", size = 5560402, upload-time = "2025-09-14T22:18:05.954Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/ae/56523ae9c142f0c08efd5e868a6da613ae76614eca1305259c3bf6a0ed43/zstandard-0.25.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:a9ec8c642d1ec73287ae3e726792dd86c96f5681eb8df274a757bf62b750eae7", size = 4955108, upload-time = "2025-09-14T22:18:07.68Z" },
+ { url = "https://files.pythonhosted.org/packages/98/cf/c899f2d6df0840d5e384cf4c4121458c72802e8bda19691f3b16619f51e9/zstandard-0.25.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:a4089a10e598eae6393756b036e0f419e8c1d60f44a831520f9af41c14216cf2", size = 5269248, upload-time = "2025-09-14T22:18:09.753Z" },
+ { url = "https://files.pythonhosted.org/packages/1b/c0/59e912a531d91e1c192d3085fc0f6fb2852753c301a812d856d857ea03c6/zstandard-0.25.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:f67e8f1a324a900e75b5e28ffb152bcac9fbed1cc7b43f99cd90f395c4375344", size = 5430330, upload-time = "2025-09-14T22:18:11.966Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/1d/7e31db1240de2df22a58e2ea9a93fc6e38cc29353e660c0272b6735d6669/zstandard-0.25.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:9654dbc012d8b06fc3d19cc825af3f7bf8ae242226df5f83936cb39f5fdc846c", size = 5811123, upload-time = "2025-09-14T22:18:13.907Z" },
+ { url = "https://files.pythonhosted.org/packages/f6/49/fac46df5ad353d50535e118d6983069df68ca5908d4d65b8c466150a4ff1/zstandard-0.25.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:4203ce3b31aec23012d3a4cf4a2ed64d12fea5269c49aed5e4c3611b938e4088", size = 5359591, upload-time = "2025-09-14T22:18:16.465Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/38/f249a2050ad1eea0bb364046153942e34abba95dd5520af199aed86fbb49/zstandard-0.25.0-cp314-cp314-win32.whl", hash = "sha256:da469dc041701583e34de852d8634703550348d5822e66a0c827d39b05365b12", size = 444513, upload-time = "2025-09-14T22:18:20.61Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/43/241f9615bcf8ba8903b3f0432da069e857fc4fd1783bd26183db53c4804b/zstandard-0.25.0-cp314-cp314-win_amd64.whl", hash = "sha256:c19bcdd826e95671065f8692b5a4aa95c52dc7a02a4c5a0cac46deb879a017a2", size = 516118, upload-time = "2025-09-14T22:18:17.849Z" },
+ { url = "https://files.pythonhosted.org/packages/f0/ef/da163ce2450ed4febf6467d77ccb4cd52c4c30ab45624bad26ca0a27260c/zstandard-0.25.0-cp314-cp314-win_arm64.whl", hash = "sha256:d7541afd73985c630bafcd6338d2518ae96060075f9463d7dc14cfb33514383d", size = 476940, upload-time = "2025-09-14T22:18:19.088Z" },
+]
diff --git a/python/valuecell/agents/react_agent/__init__.py b/python/valuecell/agents/react_agent/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/python/valuecell/agents/react_agent/context.py b/python/valuecell/agents/react_agent/context.py
new file mode 100644
index 000000000..f8d6c6da6
--- /dev/null
+++ b/python/valuecell/agents/react_agent/context.py
@@ -0,0 +1,56 @@
+"""Task execution context for tool runtime."""
+
+from typing import Optional
+
+from langchain_core.callbacks import adispatch_custom_event
+from langchain_core.runnables import RunnableConfig
+
+
+class TaskContext:
+ """Context object passed to tools, encapsulating task metadata and event dispatch.
+
+ This context binds a task_id with the LangGraph config, allowing tools to
+ emit progress events and artifacts without polluting their parameter schemas.
+
+ Example:
+ ```python
+ async def my_tool(symbol: str, context: Optional[TaskContext] = None) -> str:
+ if context:
+ await context.emit_progress("Fetching data...")
+ # ... tool logic ...
+ return result
+ ```
+ """
+
+ def __init__(self, task_id: str, config: RunnableConfig):
+ """Initialize task context.
+
+ Args:
+ task_id: Unique identifier for the current task
+ config: LangGraph RunnableConfig for event dispatch
+ """
+ self.task_id = task_id
+ self._config = config
+
+ async def emit_progress(
+ self,
+ msg: str,
+ step: Optional[str] = None,
+ ) -> None:
+ """Emit a progress event linked to this specific task.
+
+ Args:
+ msg: Human-readable progress message
+ percent: Optional completion percentage (0-100)
+ step: Optional step identifier (e.g., "fetching_income")
+ """
+ if not msg.endswith("\n"):
+ msg += "\n"
+
+ payload = {
+ "type": "progress",
+ "task_id": self.task_id,
+ "msg": msg,
+ "step": step,
+ }
+ await adispatch_custom_event("tool_event", payload, config=self._config)
diff --git a/python/valuecell/agents/react_agent/demo/__init__.py b/python/valuecell/agents/react_agent/demo/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/python/valuecell/agents/react_agent/demo/index.html b/python/valuecell/agents/react_agent/demo/index.html
new file mode 100644
index 000000000..31db67883
--- /dev/null
+++ b/python/valuecell/agents/react_agent/demo/index.html
@@ -0,0 +1,384 @@
+
+
+
+
+
+ React Agent - Financial Assistant
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 🤖 Financial Agent
+
+
Powered by LangGraph & Agno
+
+
+ Session: ...
+
+
+
+
+
+
+
+
+
+
👋 Hello! I'm your AI financial analyst.
+
I can help you with deeper market research. Try asking:
+
+ - "Analyze AAPL's recent earnings"
+ - "Compare Microsoft and Google cloud growth"
+ - "Why is Tesla stock volatile?"
+
+
+
+
+
+
+
+
+
+
+
+ Thinking & Researching...
+
+
+
+
+
+
+
+
+
+
+
+ SYSTEM LOGS
+
+
+ IDLE
+
+
+
+
+
+
+ Waiting for events...
+
+
+
+
+
+
+
+
+
diff --git a/python/valuecell/agents/react_agent/demo/server.py b/python/valuecell/agents/react_agent/demo/server.py
new file mode 100644
index 000000000..0725fa90b
--- /dev/null
+++ b/python/valuecell/agents/react_agent/demo/server.py
@@ -0,0 +1,180 @@
+"""
+FastAPI server for React Agent with SSE (Server-Sent Events) streaming.
+Fixed for: Pydantic serialization, Router filtering, and Node observability.
+"""
+
+from __future__ import annotations
+
+import json
+from typing import Any
+
+import uvicorn
+from fastapi import FastAPI
+from fastapi.encoders import jsonable_encoder
+from fastapi.middleware.cors import CORSMiddleware
+from fastapi.responses import StreamingResponse
+from langchain_core.messages import AIMessage, HumanMessage
+from loguru import logger
+from pydantic import BaseModel
+
+from valuecell.agents.react_agent.graph import get_app
+
+app = FastAPI(title="React Agent API")
+
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=["*"],
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
+
+class ChatRequest(BaseModel):
+ message: str
+ thread_id: str
+
+
+def format_sse(event_type: str, data: Any) -> str:
+ """Format SSE message with proper JSON serialization for Pydantic objects."""
+ # jsonable_encoder converts Pydantic models to dicts automatically
+ clean_data = jsonable_encoder(data)
+ return f"data: {json.dumps({'type': event_type, 'data': clean_data})}\n\n"
+
+
+async def event_stream_generator(user_input: str, thread_id: str):
+ """
+ Convert LangGraph v2 event stream to frontend UI protocol.
+ """
+ try:
+ graph = get_app()
+ inputs = {"messages": [HumanMessage(content=user_input)]}
+ config = {"configurable": {"thread_id": thread_id}}
+
+ logger.info(f"Stream start: {thread_id}")
+
+ async for event in graph.astream_events(inputs, config=config, version="v2"):
+ kind = event.get("event", "")
+ node = event.get("metadata", {}).get("langgraph_node", "")
+ data = event.get("data") or {}
+
+ # --- Helper: Check if this is a valid node output (not a router string) ---
+ def is_real_node_output(d):
+ output = d.get("output")
+ # Routers return strings like "wait", "plan". Nodes return dicts or Messages.
+ if isinstance(output, str):
+ return False
+ return True
+
+ # =================================================================
+ # 1. OBSERVABILITY EVENTS (Planner, Executor, Critic)
+ # =================================================================
+
+ # PLANNER: Emit the task list
+ if kind == "on_chain_end" and node == "planner":
+ if is_real_node_output(data):
+ output = data.get("output", {})
+ # Ensure we have a plan
+ if isinstance(output, dict) and "plan" in output:
+ yield format_sse(
+ "planner_update",
+ {
+ "plan": output.get("plan"),
+ "reasoning": output.get("strategy_update"),
+ },
+ )
+
+ # EXECUTOR: Emit specific task results (text/data)
+ elif kind == "on_chain_end" and node == "executor":
+ if is_real_node_output(data):
+ output = data.get("output", {})
+ if isinstance(output, dict) and "completed_tasks" in output:
+ for task_id, res in output["completed_tasks"].items():
+ # res structure: {'task_id': 't1', 'ok': True, 'result': '...'}
+ yield format_sse(
+ "task_result",
+ {
+ "task_id": task_id,
+ "status": "success" if res.get("ok") else "error",
+ "result": res.get(
+ "result"
+ ), # This is the markdown text
+ },
+ )
+
+ # CRITIC: Emit approval/rejection logic
+ elif kind == "on_chain_end" and node == "critic":
+ if is_real_node_output(data):
+ output = data.get("output", {})
+ if isinstance(output, dict):
+ summary = output.get("_critic_summary")
+ if summary:
+ yield format_sse("critic_decision", summary)
+
+ # AGNO/TOOL LOGS: Intermediate progress
+ elif kind == "on_custom_event" and event.get("name") == "agno_event":
+ yield format_sse(
+ "tool_progress", {"node": node or "executor", "details": data}
+ )
+
+ # =================================================================
+ # 2. CHAT CONTENT EVENTS (Inquirer, Summarizer)
+ # =================================================================
+
+ # STREAMING CONTENT (Summarizer)
+ if kind == "on_chat_model_stream" and node == "summarizer":
+ chunk = data.get("chunk")
+ text = chunk.content if chunk else None
+ if text:
+ yield format_sse("content_token", {"delta": text})
+
+ # STATIC CONTENT (Inquirer / Fallback)
+ # Inquirer returns a full AIMessage at the end, not streamed
+ elif kind == "on_chain_end" and node == "inquirer":
+ if is_real_node_output(data):
+ output = data.get("output", {})
+ msgs = output.get("messages", [])
+ if msgs and isinstance(msgs, list):
+ last_msg = msgs[-1]
+ # Verify it's an AI message meant for the user
+ if isinstance(last_msg, AIMessage) and last_msg.content:
+ # Only emit if we haven't streamed this content already
+ # (Inquirer doesn't stream, so this is safe)
+ yield format_sse(
+ "content_token", {"delta": last_msg.content}
+ )
+
+ # =================================================================
+ # 3. UI STATE EVENTS
+ # =================================================================
+
+ elif kind == "on_chain_start" and node:
+ yield format_sse("step_change", {"step": node, "status": "started"})
+
+ elif kind == "on_chain_end" and node:
+ # Filter out routers for UI cleanliness
+ if is_real_node_output(data):
+ yield format_sse(
+ "step_change", {"step": node, "status": "completed"}
+ )
+
+ # End of stream
+ yield format_sse("done", {})
+ logger.info(f"Stream done: {thread_id}")
+
+ except Exception as exc:
+ logger.exception(f"Stream error: {exc}")
+ yield format_sse("error", {"message": str(exc)})
+
+
+@app.post("/chat/stream")
+async def chat_stream(request: ChatRequest):
+ return StreamingResponse(
+ event_stream_generator(request.message, request.thread_id),
+ media_type="text/event-stream",
+ headers={"Cache-Control": "no-cache", "X-Accel-Buffering": "no"},
+ )
+
+
+if __name__ == "__main__":
+ uvicorn.run("server:app", host="0.0.0.0", port=8009)
diff --git a/python/valuecell/agents/react_agent/graph.py b/python/valuecell/agents/react_agent/graph.py
new file mode 100644
index 000000000..72cad05ae
--- /dev/null
+++ b/python/valuecell/agents/react_agent/graph.py
@@ -0,0 +1,143 @@
+from __future__ import annotations
+
+import uuid
+from typing import Any
+
+from langchain_core.runnables import RunnableConfig
+
+from .nodes.critic import critic_node
+from .nodes.executor import executor_node
+from .nodes.inquirer import inquirer_node
+from .nodes.planner import planner_node
+from .nodes.summarizer import summarizer_node
+from .state import AgentState
+
+
+def _route_after_planner(state: AgentState):
+ """Route after planner based on is_final flag.
+
+ - If is_final=True: Route to critic for verification.
+ - If plan has tasks: Route to executor via Send.
+ - Otherwise: Route to critic as safety fallback.
+ """
+ try:
+ from langgraph.types import Send # type: ignore
+ except Exception as exc: # pragma: no cover
+ raise RuntimeError(
+ "LangGraph is required for the orchestrator. Install 'langgraph'."
+ ) from exc
+
+ is_final = state.get("is_final", False)
+ plan = state.get("plan") or []
+
+ # If planner claims done, verify with critic
+ if is_final:
+ return "critic"
+
+ # If planner produced tasks, execute them in parallel
+ if plan:
+ return [Send("executor", {"task": t}) for t in plan]
+
+ # Safety fallback: no tasks and not final -> go to critic
+ return "critic"
+
+
+async def _executor_entry(state: AgentState, config: RunnableConfig) -> dict[str, Any]:
+ """Entry adapter for executor: expects a `task` injected via Send().
+
+ Args:
+ state: Agent state containing task data
+ config: RunnableConfig injected by LangGraph
+ """
+ task = state.get("task") or {}
+ return await executor_node(state, task, config)
+
+
+def build_app() -> Any:
+ """Build and compile the LangGraph StateGraph with memory checkpointer."""
+ # Local imports to keep module import safe if langgraph isn't installed yet
+ try:
+ from langgraph.checkpoint.memory import MemorySaver # type: ignore
+ from langgraph.graph import END, START, StateGraph # type: ignore
+ except Exception as exc: # pragma: no cover - import-time guard
+ raise RuntimeError(
+ "LangGraph is required for the orchestrator. Install 'langgraph'."
+ ) from exc
+
+ graph = StateGraph(AgentState)
+
+ graph.add_node("inquirer", inquirer_node)
+ graph.add_node("planner", planner_node)
+ graph.add_node("executor", _executor_entry)
+ graph.add_node("critic", critic_node)
+ graph.add_node("summarizer", summarizer_node)
+
+ graph.add_edge(START, "inquirer")
+
+ def _route_after_inquirer(st: AgentState) -> str:
+ # After refactor: Inquirer now writes `current_intent` (natural language string)
+ # Route to planner when an intent is present, otherwise wait/end.
+ return "plan" if st.get("current_intent") else "wait"
+
+ graph.add_conditional_edges(
+ "inquirer", _route_after_inquirer, {"plan": "planner", "wait": END}
+ )
+
+ # After planning, route based on is_final and plan content
+ graph.add_conditional_edges("planner", _route_after_planner, {"critic": "critic"})
+
+ # After executor completion, go back to planner for next iteration
+ graph.add_edge("executor", "planner")
+
+ def _route_after_critic(st: AgentState) -> str:
+ na = st.get("next_action")
+ val = getattr(na, "value", na)
+ v = str(val).lower() if val is not None else "exit"
+ if v == "replan":
+ # Clear is_final flag to allow fresh planning cycle
+ st["is_final"] = False
+ return "replan"
+ # Critic approved: route to summarizer for final report
+ return "summarize"
+
+ graph.add_conditional_edges(
+ "critic", _route_after_critic, {"replan": "planner", "summarize": "summarizer"}
+ )
+
+ # Summarizer generates final report, then END
+ graph.add_edge("summarizer", END)
+
+ memory = MemorySaver()
+ app = graph.compile(checkpointer=memory)
+ return app
+
+
+# Lazy singleton accessor to avoid import-time dependency failures
+_APP_SINGLETON: Any | None = None
+
+
+def get_app() -> Any:
+ global _APP_SINGLETON
+ if _APP_SINGLETON is None:
+ _APP_SINGLETON = build_app()
+ return _APP_SINGLETON
+
+
+# Backwards-compat: expose `app` when available, else None until build
+app: Any | None = None
+
+
+async def astream_events(initial_state: dict[str, Any], config: dict | None = None):
+ """Stream LangGraph events (v2) from the compiled app.
+
+ Usage: async for ev in astream_events(state): ...
+ """
+ application = get_app()
+ # Ensure checkpointer receives required configurable keys.
+ cfg: dict = dict(config or {})
+ cfg.setdefault("thread_id", "main")
+ cfg.setdefault("checkpoint_ns", "react_agent")
+ cfg.setdefault("checkpoint_id", str(uuid.uuid4()))
+
+ async for ev in application.astream_events(initial_state, config=cfg, version="v2"):
+ yield ev
diff --git a/python/valuecell/agents/react_agent/models.py b/python/valuecell/agents/react_agent/models.py
new file mode 100644
index 000000000..c0a9a300a
--- /dev/null
+++ b/python/valuecell/agents/react_agent/models.py
@@ -0,0 +1,99 @@
+from __future__ import annotations
+
+from typing import Any, Dict, Literal, Optional
+
+from pydantic import BaseModel, Field, field_validator
+
+
+class Task(BaseModel):
+ id: str
+ tool_name: str
+ tool_args: dict[str, Any] = Field(default_factory=dict)
+ description: str = Field(
+ default="",
+ description="Optional human-friendly task description (from planner).",
+ )
+
+
+class FinancialIntent(BaseModel):
+ asset_symbols: Optional[list[str]] = None
+
+ @field_validator("asset_symbols", mode="before")
+ def _coerce_asset_symbols(cls, v):
+ """Allow a single string symbol to be provided and coerce to list[str].
+
+ Examples:
+ - "AAPL" -> ["AAPL"]
+ - ["AAPL", "MSFT"] -> ["AAPL", "MSFT"]
+ - None -> None
+ """
+ if v is None:
+ return None
+ # If a single string provided, wrap it
+ if isinstance(v, str):
+ return [v]
+ # If tuple, convert to list
+ if isinstance(v, tuple):
+ return list(v)
+ # Otherwise assume it's already an iterable/list-like
+ return v
+
+
+class ExecutorResult(BaseModel):
+ task_id: str
+ ok: bool = True
+ result: Any | None = None
+ tool_name: str | None = None
+ description: str = Field(
+ default="",
+ description="Human-friendly description of the task that produced this result",
+ )
+ error: Optional[str] = None
+ error_code: Optional[str] = None # e.g., ERR_NETWORK, ERR_INPUT
+
+
+ARG_VAL_TYPES = str | int | float | bool
+
+
+class PlannedTask(BaseModel):
+ tool_id: str = Field(description="The EXACT tool_id from the available tool list")
+ tool_args: Dict[str, ARG_VAL_TYPES | list[ARG_VAL_TYPES]] = Field(
+ default_factory=dict,
+ description="The arguments to pass to the tool. "
+ "MUST strictly match the 'Arguments' list in the tool definition. "
+ "DO NOT leave empty if the tool requires parameters. "
+ "Example: {'symbol': 'AAPL', 'period': '1y'}",
+ )
+ description: str = Field(description="Short description for logs")
+
+
+class ExecutionPlan(BaseModel):
+ """Output from the Planner for iterative batch planning."""
+
+ tasks: list[PlannedTask] = Field(
+ description="List of tasks to execute concurrently in this batch."
+ )
+ strategy_update: str = Field(
+ description="Brief reasoning about what has been done and what is left."
+ )
+ is_final: bool = Field(
+ default=False,
+ description="Set to True ONLY if the user's goal is fully satisfied.",
+ )
+
+
+class InquirerDecision(BaseModel):
+ """Simplified decision model: Natural language intent output."""
+
+ current_intent: str | None = Field(
+ default=None,
+ description="A single, comprehensive natural language sentence describing the user's immediate goal. "
+ "Resolve all context and pronouns from conversation history. "
+ "Example: 'Compare Apple and Tesla performance'.",
+ )
+ # Only PLAN and CHAT statuses; RESET removed to simplify flow
+ status: Literal["PLAN", "CHAT"] = Field(
+ description="PLAN: Need to execute tasks. CHAT: Casual conversation/greeting."
+ )
+ reasoning: str = Field(description="Brief thought process.")
+ response_to_user: str | None = Field(description="Direct response for CHAT status.")
diff --git a/python/valuecell/agents/react_agent/nodes/critic.py b/python/valuecell/agents/react_agent/nodes/critic.py
new file mode 100644
index 000000000..0d4f4224d
--- /dev/null
+++ b/python/valuecell/agents/react_agent/nodes/critic.py
@@ -0,0 +1,109 @@
+from __future__ import annotations
+
+import json
+from enum import Enum
+from typing import Any
+
+from agno.agent import Agent
+from agno.models.openrouter import OpenRouter
+from loguru import logger
+from pydantic import BaseModel, Field
+
+
+class NextAction(str, Enum):
+ EXIT = "exit"
+ REPLAN = "replan"
+
+
+class CriticDecision(BaseModel):
+ """Gatekeeper decision for iterative batch planning."""
+
+ approved: bool = Field(
+ description="True if goal is fully satisfied, False otherwise"
+ )
+ reason: str = Field(description="Short rationale for the decision")
+ feedback: str | None = Field(
+ default=None,
+ description="Specific feedback for Planner if rejected (what is missing)",
+ )
+
+
+async def critic_node(state: dict[str, Any]) -> dict[str, Any]:
+ """Gatekeeper: Verify if user's goal is fully satisfied.
+
+ Only runs when Planner sets is_final=True.
+ - If approved: Return next_action="exit"
+ - If rejected: Return critique_feedback and next_action="replan"
+ """
+ execution_history = state.get("execution_history") or []
+ current_intent = state.get("current_intent") or ""
+ is_final = state.get("is_final", False)
+
+ # Safety check: Critic should only run when planner claims done
+ if not is_final:
+ logger.warning("Critic invoked but is_final=False; defaulting to replan")
+ return {
+ "next_action": NextAction.REPLAN,
+ "critique_feedback": "Planner has not completed the workflow.",
+ }
+
+ history_text = "\n\n".join(execution_history) if execution_history else "(Empty)"
+
+ system_prompt = (
+ "You are a gatekeeper critic for an iterative financial planning system.\n\n"
+ "**Your Role**: Compare the User's Request (current_intent) with the Execution History.\n"
+ "- If the goal is fully satisfied, approve (approved=True).\n"
+ "- If something is missing or incomplete, reject (approved=False) and provide specific feedback.\n\n"
+ "**Decision Criteria**:\n"
+ "1. All requested tasks completed successfully.\n"
+ "2. No critical errors that prevent goal satisfaction.\n"
+ "3. Results align with user's intent (current_intent).\n"
+ "4. **Synthesis Phase**: If sufficient research/data-gathering tasks are complete to answer the user's request, "
+ "APPROVE the plan. The system will synthesize the final response from the execution history. "
+ "Do NOT demand an explicit 'generate_report' or 'create_plan' task when the necessary data is already available.\n"
+ )
+
+ user_msg = f"""# TARGET GOAL (User Intent):
+"{current_intent}"
+
+# ACTUAL EXECUTION LOG:
+{history_text}
+
+# INSTRUCTION:
+Check if the "ACTUAL EXECUTION LOG" provides enough evidence to fulfill the "TARGET GOAL"
+"""
+
+ try:
+ agent = Agent(
+ model=OpenRouter(id="google/gemini-2.5-flash"),
+ instructions=[system_prompt],
+ markdown=False,
+ output_schema=CriticDecision,
+ debug_mode=True,
+ add_datetime_to_context=True,
+ )
+ response = await agent.arun(user_msg)
+ decision: CriticDecision = response.content
+
+ if decision.approved:
+ logger.info("Critic APPROVED: {r}", r=decision.reason)
+ return {
+ "next_action": NextAction.EXIT,
+ "_critic_summary": {"approved": True, "reason": decision.reason},
+ }
+ else:
+ logger.info("Critic REJECTED: {r}", r=decision.reason)
+ return {
+ "next_action": NextAction.REPLAN,
+ "critique_feedback": decision.feedback or decision.reason,
+ "is_final": False, # Reset is_final to allow re-planning
+ "_critic_summary": {"approved": False, "reason": decision.reason},
+ }
+ except Exception as exc:
+ logger.warning("Critic agent error: {err}", err=str(exc))
+ # On error, default to replan for safety
+ return {
+ "next_action": NextAction.REPLAN,
+ "critique_feedback": f"Critic error: {str(exc)[:100]}",
+ "is_final": False,
+ }
diff --git a/python/valuecell/agents/react_agent/nodes/executor.py b/python/valuecell/agents/react_agent/nodes/executor.py
new file mode 100644
index 000000000..7f9ee884e
--- /dev/null
+++ b/python/valuecell/agents/react_agent/nodes/executor.py
@@ -0,0 +1,177 @@
+from __future__ import annotations
+
+import json
+from typing import Any, Callable
+
+from langchain_core.callbacks import adispatch_custom_event
+from langchain_core.runnables import RunnableConfig
+from loguru import logger
+from pydantic import BaseModel
+
+from ...research_agent.sources import (
+ search_crypto_people,
+ search_crypto_projects,
+ search_crypto_vcs,
+ web_search,
+)
+from ..context import TaskContext
+from ..models import ExecutorResult
+from ..state import AgentState
+from ..tool_registry import registry
+from ..tools.alphavantage import (
+ get_financial_metrics,
+ get_market_sentiment,
+ get_stock_profile,
+)
+
+# from ..tools.research import research
+
+_TOOLS_REGISTERED = False
+
+
+def ensure_default_tools_registered() -> None:
+ global _TOOLS_REGISTERED
+ if _TOOLS_REGISTERED:
+ return
+
+ # _register_tool("research", research)
+ _register_tool("search_crypto_people", search_crypto_people)
+ _register_tool("search_crypto_projects", search_crypto_projects)
+ _register_tool("search_crypto_vcs", search_crypto_vcs)
+ _register_tool("web_search", web_search)
+ _register_tool("get_financial_metrics", get_financial_metrics)
+ _register_tool("get_stock_profile", get_stock_profile)
+ _register_tool("get_market_sentiment", get_market_sentiment)
+
+ _TOOLS_REGISTERED = True
+
+
+def _register_tool(
+ tool_id: str,
+ func: Callable[..., Any],
+ description: str | None = None,
+ *,
+ args_schema: type[BaseModel] | None = None,
+) -> None:
+ try:
+ registry.register(
+ tool_id,
+ func,
+ description,
+ args_schema=args_schema,
+ )
+ except ValueError:
+ # Already registered; ignore to keep idempotent
+ pass
+
+
+async def executor_node(
+ state: AgentState, task: dict[str, Any], config: RunnableConfig
+) -> dict[str, Any]:
+ """Execute a single task and return execution summary for history.
+
+ Args:
+ state: Current agent state
+ task: Task dictionary containing id, tool_name, tool_args, description
+ config: RunnableConfig injected by LangGraph for event dispatch
+
+ Returns:
+ - completed_tasks: {task_id: ExecutorResult}
+ - execution_history: [concise summary string]
+ """
+ task_id = task.get("id") or ""
+ task_description = task.get("description") or ""
+ tool_name = task.get("tool_name") or ""
+ tool_args = task.get("tool_args") or {}
+ task_brief = (
+ f"Task `{task_description}` (id={task_id}, tool={tool_name}, args={tool_args})"
+ )
+
+ logger.info("Executor start: {task_brief}", task_brief=task_brief)
+
+ # Idempotency guard: if this task is already completed, no-op
+ completed_snapshot = (state.get("completed_tasks") or {}).keys()
+ if task_id and task_id in completed_snapshot:
+ logger.info(
+ "Executor skip (already completed): {task_brief}", task_brief=task_brief
+ )
+ return {}
+
+ try:
+ # Create task context binding task_id and config
+ ctx = TaskContext(task_id=task_id, config=config)
+
+ # Pass state and context to registry.execute
+ runtime_args = {"state": state, "context": ctx}
+ result = await registry.execute(tool_name, tool_args, runtime_args=runtime_args)
+ exec_res = ExecutorResult(
+ task_id=task_id,
+ ok=True,
+ result=result,
+ tool_name=tool_name,
+ description=task_description,
+ )
+
+ if isinstance(result, (dict, list)):
+ result_str = json.dumps(result, ensure_ascii=False)
+ else:
+ result_str = str(result)
+
+ if len(result_str) > 800:
+ result_preview = result_str[:800] + "... (truncated)"
+ else:
+ result_preview = result_str
+
+ summary = f"""
+ {task_description}
+ {"SUCCESS" if exec_res.ok else "FAILURE"}
+
+{result_preview}
+
+
+"""
+ except Exception as exc:
+ logger.warning("Executor error: {err}", err=str(exc))
+ exec_res = ExecutorResult(
+ task_id=task_id,
+ ok=False,
+ error=str(exc),
+ error_code="ERR_EXEC",
+ tool_name=tool_name,
+ description=task_description,
+ )
+ summary = f"{task_brief} failed: {str(exc)[:50]}"
+
+ # Return delta for completed_tasks and execution_history
+ completed_delta = {task_id: exec_res.model_dump()}
+
+ # Emit a task-done event so the LangGraph event stream clearly shows completion
+ try:
+ await adispatch_custom_event(
+ "agno_event",
+ {"type": "task_done", "task_id": task_id, "ok": exec_res.ok},
+ )
+ except Exception:
+ pass
+
+ return {
+ "completed_tasks": completed_delta,
+ "execution_history": [summary],
+ }
+
+
+# TODO: display progress updates from within execution
+async def _emit_progress(percent: int, msg: str) -> None:
+ try:
+ payload = {
+ "type": "progress",
+ "payload": {"percent": percent, "msg": msg},
+ "node": "executor",
+ }
+ await adispatch_custom_event("agno_event", payload)
+ except Exception:
+ # progress emission is non-critical
+ pass
+
+
+ensure_default_tools_registered()
diff --git a/python/valuecell/agents/react_agent/nodes/inquirer.py b/python/valuecell/agents/react_agent/nodes/inquirer.py
new file mode 100644
index 000000000..1576de80a
--- /dev/null
+++ b/python/valuecell/agents/react_agent/nodes/inquirer.py
@@ -0,0 +1,240 @@
+from __future__ import annotations
+
+from typing import Any
+
+from agno.agent import Agent
+from agno.models.openrouter import OpenRouter
+from langchain_core.messages import AIMessage, SystemMessage
+from loguru import logger
+
+from ..models import InquirerDecision
+
+
+# TODO: summarize with LLM
+# TODO: add user memory
+def _compress_history(history: list[str]) -> str:
+ """Compress long execution history to prevent token explosion.
+
+ Args:
+ history: List of execution history strings
+
+ Returns:
+ Single compressed summary string
+ """
+ # Simple compression: Keep first 3 and last 3 entries
+ if len(history) <= 6:
+ return "\n".join(history)
+
+ compressed = [
+ "[Execution History - Compressed]",
+ *history[:3],
+ f"... ({len(history) - 6} steps omitted) ...",
+ *history[-3:],
+ ]
+ return "\n".join(compressed)
+
+
+def _trim_messages(messages: list, max_messages: int = 10) -> list:
+ """Keep only the last N messages to prevent token overflow.
+
+ Always preserves system messages.
+ """
+ if len(messages) <= max_messages:
+ return messages
+
+ # Separate system messages from others
+ system_msgs = [m for m in messages if isinstance(m, SystemMessage)]
+ other_msgs = [m for m in messages if not isinstance(m, SystemMessage)]
+
+ # Keep last N-len(system_msgs) of other messages
+ trimmed_others = other_msgs[-(max_messages - len(system_msgs)) :]
+
+ return system_msgs + trimmed_others
+
+
+async def inquirer_node(state: dict[str, Any]) -> dict[str, Any]:
+ """Smart Inquirer: Extracts natural language intent from conversation.
+
+ Produces a single comprehensive sentence describing user's immediate goal.
+ Resolves pronouns and context using conversation and execution history.
+
+ Multi-turn conversation logic:
+ 1. **New Task**: User starts new analysis -> Generate clear intent
+ 2. **Follow-up**: User asks about prior results -> Resolve references and generate focused intent
+ 3. **Chat**: User casual talk (e.g., "Thanks") -> Direct response, no planning
+
+ Inputs: state["messages"], state["current_intent"], state["execution_history"].
+ Outputs: Updated state with current_intent (natural language string) or chat response.
+ """
+ messages = state.get("messages") or []
+ current_intent = state.get("current_intent")
+ execution_history = state.get("execution_history") or []
+ turns = int(state.get("inquirer_turns") or 0)
+
+ # Trim messages to prevent token overflow
+ trimmed_messages = _trim_messages(messages, max_messages=10)
+
+ logger.info(
+ "Inquirer start: turns={t}, current_intent={i}, history_len={h}",
+ t=turns,
+ i=current_intent or "None",
+ h=len(execution_history),
+ )
+
+ # Extract recent execution history for context (last 3 items)
+ recent_history = execution_history[-3:] if execution_history else []
+ history_context = (
+ "\n".join(recent_history) if recent_history else "(No execution history yet)"
+ )
+
+ system_prompt = (
+ "You are the **Intent Interpreter** for a Financial Advisor Assistant.\n"
+ "Your job is to translate the conversation history into a single, unambiguous instruction for the Planner.\n\n"
+ f"# CONTEXT SNAPSHOT:\n"
+ f"- **Last Active Intent**: {current_intent or 'None'}\n"
+ f"- **Recent Actions**: {history_context}\n\n"
+ "# OUTPUT INSTRUCTIONS:\n"
+ "1. **current_intent**: A standalone natural language sentence describing exactly what to do next. MUST resolve all pronouns (it, they, that) using context.\n"
+ "2. **status**: 'PLAN' (if analysis needed) or 'CHAT' (if casual greeting).\n\n"
+ "# DECISION PATTERNS:\n\n"
+ "## 1. CHAT (No Analysis Needed)\n"
+ "- **Input**: 'Hello', 'Thanks', 'Okay'.\n"
+ "- **Output**: status='CHAT', response_to_user='[Polite Reply]'\n\n"
+ "## 2. PLAN (Analysis Needed) -> Output `current_intent`\n\n"
+ "### Case A: Starting Fresh / Switching Topic\n"
+ "- Input: 'Analyze Apple', 'Forget Apple, look at Tesla'.\n"
+ "- Action: Output the new intent directly.\n"
+ "- Example: 'Analyze Apple stock price and fundamentals'\n\n"
+ "### Case B: Refining / Comparing (Context Merging)\n"
+ "- **Context**: Analyzing Apple\n"
+ "- Input: 'Compare with Microsoft'\n"
+ "- **Rule**: Combine old + new. Do NOT drop the old asset unless told to.\n"
+ "- Example: 'Compare Apple and Microsoft stock performance'\n\n"
+ "### Case C: Follow-up Questions (Pronoun Resolution)\n"
+ "- **Context**: Analyzing Apple\n"
+ "- Input: 'Why did **it** drop?'\n"
+ "- **Rule**: Replace 'it' with the context subject.\n"
+ "- Example: 'Find reasons for Apple stock price drop'\n\n"
+ "### Case D: Deep Dive (Specifics)\n"
+ "- **Context**: Apple revenue is up 10%\n"
+ "- Input: 'Tell me more about **that revenue growth**'\n"
+ "- **Rule**: Be specific. Don't just say 'Analyze Apple'.\n"
+ "- Example: 'Analyze details and drivers of Apple's recent revenue growth'\n"
+ )
+
+ # Build user message from conversation history
+ message_strs = []
+ for m in trimmed_messages:
+ role = getattr(m, "type", "unknown")
+ content = getattr(m, "content", str(m))
+ message_strs.append(f"[{role}]: {content}")
+
+ conversation_text = (
+ "\n\n".join(message_strs) if message_strs else "(No conversation yet)"
+ )
+ user_msg = (
+ "# Conversation History:\n"
+ f"{conversation_text}\n\n"
+ "# Execution Context:\n"
+ f"Recent execution summary is already injected in CURRENT STATE above. "
+ f"Use it to understand what data/analysis has already been completed."
+ )
+
+ try:
+ agent = Agent(
+ model=OpenRouter(id="google/gemini-2.5-flash"),
+ instructions=[system_prompt],
+ markdown=False,
+ output_schema=InquirerDecision,
+ debug_mode=True,
+ add_datetime_to_context=True,
+ )
+ response = await agent.arun(user_msg)
+ decision: InquirerDecision = response.content
+
+ logger.info(
+ "Inquirer decision: status={s}, intent={i}, reason={r}",
+ s=decision.status,
+ i=decision.current_intent,
+ r=decision.reasoning,
+ )
+
+ # --- Simplified State Update Logic: Direct Application ---
+ updates: dict[str, Any] = {}
+
+ # CASE 1: CHAT - Direct response, no planning
+ if decision.status == "CHAT":
+ return {
+ "messages": [
+ AIMessage(content=decision.response_to_user or "Understood.")
+ ],
+ "current_intent": None, # Signal to route to END
+ "inquirer_turns": 0,
+ }
+
+ # NOTE: RESET status removed. Intent switches are represented as PLAN
+ # with a new `current_intent`. The Planner will decide whether to reuse
+ # history or re-fetch data as appropriate.
+
+ # CASE 3: PLAN - Apply the current intent directly
+ if decision.current_intent:
+ updates["current_intent"] = decision.current_intent
+ logger.info(
+ "Inquirer: PLAN - Intent set to: {i}",
+ i=decision.current_intent,
+ )
+ elif current_intent:
+ # Fallback: LLM didn't return intent but we have existing context
+ updates["current_intent"] = current_intent
+ logger.info("Inquirer: PLAN - Preserving existing intent")
+ else:
+ # No intent at all - shouldn't happen in PLAN status
+ logger.warning("Inquirer: PLAN with no intent - asking for clarification")
+ return {
+ "current_intent": None,
+ "inquirer_turns": 0,
+ "messages": [
+ AIMessage(
+ content="I didn't quite understand. What would you like to analyze?"
+ )
+ ],
+ }
+
+ # Force replanning
+ updates["is_final"] = False
+
+ # History Compression (Garbage Collection)
+ current_history = state.get("execution_history") or []
+ if len(current_history) > 20:
+ logger.warning(
+ "Execution history too long ({n} entries), compressing...",
+ n=len(current_history),
+ )
+ compressed = _compress_history(current_history)
+ updates["execution_history"] = [compressed]
+
+ updates["inquirer_turns"] = 0 # Reset turn counter
+ return updates
+
+ except Exception as exc:
+ logger.exception("Inquirer LLM error: {err}", err=str(exc))
+
+ # Graceful fallback
+ if current_intent:
+ # If we have an intent, assume user wants to continue
+ return {
+ "current_intent": current_intent,
+ "inquirer_turns": 0,
+ "is_final": False,
+ }
+ else:
+ # Ask user to retry
+ return {
+ "inquirer_turns": 0,
+ "current_intent": None,
+ "messages": [
+ AIMessage(
+ content="I didn't quite understand. Could you tell me what you'd like to analyze?"
+ )
+ ],
+ }
diff --git a/python/valuecell/agents/react_agent/nodes/planner.py b/python/valuecell/agents/react_agent/nodes/planner.py
new file mode 100644
index 000000000..55bceb725
--- /dev/null
+++ b/python/valuecell/agents/react_agent/nodes/planner.py
@@ -0,0 +1,144 @@
+from __future__ import annotations
+
+from typing import Any
+
+from agno.agent import Agent
+from agno.models.openrouter import OpenRouter
+from langchain_core.messages import AIMessage, HumanMessage
+from loguru import logger
+
+from valuecell.utils.uuid import generate_task_id
+
+from ..models import ExecutionPlan, Task
+from ..tool_registry import registry
+
+
+# TODO: route human-in-the-loop feedback to user
+async def planner_node(state: dict[str, Any]) -> dict[str, Any]:
+ """Iterative batch planner: generates the IMMEDIATE next batch of tasks.
+
+ Uses natural language current_intent as the primary instruction.
+ Looks at execution_history to understand what has been done,
+ and critique_feedback to fix any issues.
+ """
+ current_intent = state.get("current_intent") or "General financial analysis"
+ execution_history = state.get("execution_history") or []
+ critique_feedback = state.get("critique_feedback")
+
+ logger.info(
+ "Planner start: intent='{i}', history_len={h}",
+ i=current_intent,
+ h=len(execution_history),
+ )
+
+ # Build iterative planning prompt
+ tool_context = registry.get_prompt_context()
+
+ history_text = (
+ "\n\n".join(execution_history) if execution_history else "(No history yet)"
+ )
+ feedback_text = (
+ f"\n\n**Critic Feedback**: {critique_feedback}" if critique_feedback else ""
+ )
+
+ # 1. Extract recent conversation (last Assistant + User messages)
+ messages_list = state.get("messages", []) or []
+ recent_msgs: list[tuple[str, str]] = []
+ for m in messages_list:
+ # support both Message objects and plain dicts
+ if isinstance(m, (HumanMessage, AIMessage)):
+ role = "User" if isinstance(m, HumanMessage) else "Assistant"
+ recent_msgs.append((role, m.content))
+
+ # Keep only the last 3 relevant messages (AI/User pairs preferred)
+ recent_msgs = recent_msgs[-3:]
+ if recent_msgs:
+ context_str = "\n\n".join(f"{r}: {c}" for r, c in recent_msgs)
+ recent_context_text = f"**RECENT CONVERSATION**:\n{context_str}\n(Use this context to resolve references. If user asks about a phrase mentioned by the Assistant, target your research to verify or expand on that claim.)\n\n"
+ else:
+ recent_context_text = ""
+
+ system_prompt_text = (
+ "You are an iterative financial planning agent.\n\n"
+ f"**CURRENT GOAL**: {current_intent}\n\n"
+ "**Your Role**: Decide the **IMMEDIATE next batch** of tasks to achieve this goal.\n\n"
+ f"**Available Tools**:\n{tool_context}\n\n"
+ "**Planning Rules**:\n"
+ "1. **Iterative Planning**: Plan only the next step(s), not the entire workflow.\n"
+ "2. **Context Awareness**: Read the Execution History carefully. Don't repeat completed work.\n"
+ "3. **Relevance & Freshness**:\n"
+ " - If user asks 'latest', 'today', or 'recent news' -> Check if history data is fresh (from current turn).\n"
+ " - If history only has old/generic data from previous turns, GENERATE NEW TASKS.\n"
+ " - Be skeptical of old data. When in doubt, fetch fresh data rather than stale data.\n"
+ "4. **Concrete Arguments**: tool_args must contain only literal values (no placeholders).\n"
+ "5. **Parallel Execution**: Tasks in the same batch run concurrently.\n"
+ "6. **Completion Signal**: Return `tasks=[]` and `is_final=True` only when the goal is fully satisfied.\n"
+ "7. **Critique Integration**: If Critic Feedback is present, address the issues mentioned.\n\n"
+ f"{recent_context_text}**Execution History**:\n{history_text}{feedback_text}\n"
+ )
+
+ user_msg = f"Current Goal: {current_intent}"
+
+ # TODO: organize plan like a TODO list
+ try:
+ agent = Agent(
+ model=OpenRouter(id="google/gemini-2.5-flash"),
+ instructions=[system_prompt_text],
+ markdown=False,
+ output_schema=ExecutionPlan,
+ debug_mode=True,
+ add_datetime_to_context=True,
+ )
+ response = await agent.arun(user_msg)
+ plan_obj: ExecutionPlan = response.content
+
+ planned_tasks = plan_obj.tasks
+ strategy_update = plan_obj.strategy_update
+ is_final = plan_obj.is_final
+
+ logger.info(
+ "Planner produced {} tasks, is_final={}, strategy: {}",
+ len(planned_tasks),
+ is_final,
+ strategy_update,
+ )
+ except Exception as exc:
+ logger.warning("Planner Agno error: {err}", err=str(exc))
+ planned_tasks = []
+ strategy_update = "No plan produced due to Agno/LLM error."
+ is_final = False
+
+ # Validate tool registration and convert to internal Task models
+ tasks: list[Task] = []
+ available = {tool.tool_id for tool in registry.list_tools()}
+ for pt in planned_tasks:
+ if pt.tool_id not in available:
+ raise ValueError(f"Planner produced unknown tool_id: {pt.tool_id}")
+
+ tasks.append(
+ Task(
+ id=generate_task_id(),
+ tool_name=pt.tool_id,
+ tool_args=pt.tool_args,
+ description=pt.description or "No description provided by planner",
+ )
+ )
+
+ _validate_plan(tasks)
+
+ # Clear critique_feedback after consuming it
+ return {
+ "plan": [t.model_dump() for t in tasks],
+ "strategy_update": strategy_update,
+ "is_final": is_final,
+ "critique_feedback": None, # Clear after consuming
+ }
+
+
+def _validate_plan(tasks: list[Task]) -> None:
+ """Basic validation: check for duplicate task IDs."""
+ ids = set()
+ for t in tasks:
+ if t.id in ids:
+ raise ValueError(f"Duplicate task id: {t.id}")
+ ids.add(t.id)
diff --git a/python/valuecell/agents/react_agent/nodes/summarizer.py b/python/valuecell/agents/react_agent/nodes/summarizer.py
new file mode 100644
index 000000000..9abbc2902
--- /dev/null
+++ b/python/valuecell/agents/react_agent/nodes/summarizer.py
@@ -0,0 +1,170 @@
+from __future__ import annotations
+
+import json
+import os
+from typing import Any
+
+from langchain_core.messages import AIMessage
+from langchain_core.prompts import ChatPromptTemplate
+from langchain_openai import ChatOpenAI
+from loguru import logger
+
+from valuecell.utils import i18n_utils
+
+from ..state import AgentState
+
+
+async def summarizer_node(state: AgentState) -> dict[str, Any]:
+ """
+ Generate a polished final report using LangChain native model for streaming.
+
+ Uses natural language current_intent to understand user's goal.
+ """
+ current_intent = state.get("current_intent") or "General financial analysis"
+ completed_tasks = state.get("completed_tasks") or {}
+ user_context = state.get("user_context") or {}
+ current_datetime = i18n_utils.format_utc_datetime(i18n_utils.get_utc_now())
+
+ logger.info(
+ "Summarizer start: intent='{i}', tasks={t}",
+ i=current_intent,
+ t=len(completed_tasks),
+ )
+
+ # 1. Extract context
+ data_summary = _extract_key_results(completed_tasks)
+
+ # 2. Build prompt with current_intent and adaptive formatting
+ # Note: intent analysis (is_comparison, is_question) can be used in future
+ # to select conditional structure; for now provide flexible formatting guidelines
+ system_template = """
+You are a concise Financial Assistant for beginner investors.
+Your goal is to synthesize execution results to answer the user's specific goal.
+
+**User's Current Goal**:
+{current_intent}
+
+**User Context**:
+{user_context}
+
+**Current Date and Time**: {current_datetime}
+
+**Available Data** (Execution Results):
+{data_summary}
+
+**Strict Constraints**:
+1. **Source of Truth**: Use the data provided in "Available Data" above as your single source.
+2. **Length Limit**: Keep the total response under 400 words. Be ruthless with cutting fluff.
+3. **Relevance Check**: Ensure you address the user's stated goal completely.
+4. **Completeness Check**: You MUST surface data errors explicitly.
+ - If data is missing or mismatched (e.g. "content seems to be AMD" when user asked for "AAPL"),
+ you MUST write: "⚠️ Data Retrieval Issue: [Details]"
+5. **No Generic Intros**: Start directly with the answer.
+6. **Adaptive Structure**:
+ - **General Analysis**: Use "Key Findings → Analysis → Risk Note" structure.
+ - **Comparison**: Use "Side-by-Side" approach. Highlight key differences and similarities.
+ - **Specific Question**: Answer DIRECTLY. No forced headers if not relevant.
+7. **Markdown**: Always use Markdown. Bold key metrics or information.
+8. **Truthfulness**: If data is missing, state it explicitly: "Data not available for [X]".
+"""
+
+ prompt = ChatPromptTemplate.from_messages(
+ [("system", system_template), ("human", "Please generate the final report.")]
+ )
+
+ # 3. Initialize LangChain Model (Native Streaming Support)
+ # Using ChatOpenAI to connect to OpenRouter (compatible API)
+ llm = ChatOpenAI(
+ model="google/gemini-2.5-flash",
+ base_url="https://openrouter.ai/api/v1",
+ api_key=os.getenv("OPENROUTER_API_KEY"), # Ensure ENV is set
+ temperature=0,
+ streaming=True, # Crucial for astream_events
+ )
+
+ chain = prompt | llm
+
+ try:
+ # 4. Invoke Chain
+ # LangGraph automatically captures 'on_chat_model_stream' events here
+ response = await chain.ainvoke(
+ {
+ "current_intent": current_intent,
+ "data_summary": data_summary,
+ "user_context": user_context,
+ "current_datetime": current_datetime,
+ }
+ )
+
+ report_content = response.content
+ logger.info("Summarizer completed: len={l}", l=len(report_content))
+
+ return {
+ "messages": [AIMessage(content=report_content)],
+ "is_final": True,
+ "_summarizer_complete": True,
+ }
+
+ except Exception as exc:
+ logger.exception("Summarizer error: {err}", err=str(exc))
+ return {
+ "messages": [
+ AIMessage(
+ content="I encountered an error generating the report. Please check the execution logs."
+ )
+ ],
+ "is_final": True,
+ }
+
+
+def _extract_key_results(completed_tasks: dict[str, Any]) -> str:
+ """Extract results with JSON formatting and error highlighting.
+
+ Prefers JSON for structured data (dicts/lists) for better LLM comprehension.
+ Falls back to string representation for simple values.
+ """
+ if not completed_tasks:
+ return "(No results available)"
+
+ lines = []
+ for task_id, task_data in completed_tasks.items():
+ if not isinstance(task_data, dict):
+ continue
+
+ result = task_data.get("result")
+ desc = task_data.get("description") or ""
+
+ # Handle errors reported by Executor
+ if task_data.get("error"):
+ error_msg = task_data["error"]
+ error_code = task_data.get("error_code", "")
+ error_info = f"**Error**: {error_msg}"
+ if error_code:
+ error_info += f" (Code: {error_code})"
+ lines.append(f"### Task {task_id} [FAILED]\n{error_info}")
+ continue
+
+ if not result:
+ continue
+
+ # Prefer JSON formatting for structured data; fallback to str() for simple values
+ if isinstance(result, (dict, list)):
+ try:
+ preview = json.dumps(result, ensure_ascii=False, indent=2)
+ except (TypeError, ValueError):
+ preview = str(result)
+ else:
+ preview = str(result)
+
+ # Slightly higher truncation limit to preserve structured data context
+ if len(preview) > 1000:
+ preview = preview[:1000] + "\n... (truncated)"
+
+ # Build header with description (critical for Summarizer to understand task purpose)
+ header = f"### Task {task_id}"
+ if desc:
+ header += f": {desc}"
+
+ lines.append(f"{header}\n{preview}")
+
+ return "\n\n".join(lines)
diff --git a/python/valuecell/agents/react_agent/state.py b/python/valuecell/agents/react_agent/state.py
new file mode 100644
index 000000000..f15813e06
--- /dev/null
+++ b/python/valuecell/agents/react_agent/state.py
@@ -0,0 +1,39 @@
+from __future__ import annotations
+
+import operator
+from operator import ior
+from typing import Annotated, Any, List, TypedDict
+
+from langchain_core.messages import BaseMessage
+
+from .models import ARG_VAL_TYPES
+
+
+class AgentState(TypedDict, total=False):
+ # Conversation and intent
+ messages: Annotated[List[BaseMessage], operator.add]
+ current_intent: str | None # Natural language description of user's immediate goal
+ inquirer_turns: int
+
+ # Planning (iterative batch planning)
+ plan: list[dict[str, Any]] | None # Current batch of tasks
+ strategy_update: str | None # Latest planner reasoning summary
+
+ # Execution results (merged across parallel executors)
+ completed_tasks: Annotated[dict[str, Any], ior]
+
+ # Iterative planning: growing list of execution summaries
+ execution_history: Annotated[list[str], list.__add__]
+
+ # Feedback from Critic to guide next planning iteration
+ critique_feedback: str | None
+
+ # Flag to signal Planner believes goal is complete
+ is_final: bool
+
+ # Critic decision
+ next_action: Any | None
+ _critic_summary: Any | None
+
+ # User context / preferences (optional)
+ user_context: dict[str, ARG_VAL_TYPES] | None
diff --git a/python/valuecell/agents/react_agent/tool_registry.py b/python/valuecell/agents/react_agent/tool_registry.py
new file mode 100644
index 000000000..40713fc52
--- /dev/null
+++ b/python/valuecell/agents/react_agent/tool_registry.py
@@ -0,0 +1,249 @@
+from __future__ import annotations
+
+import inspect
+from collections.abc import Callable
+from typing import Any, Optional, Type
+
+from loguru import logger
+from pydantic import BaseModel, create_model
+
+CallableType = Callable[..., Any]
+
+
+class ToolDefinition(BaseModel):
+ """Describe a callable tool with metadata for planning and execution."""
+
+ tool_id: str
+ name: str
+ description: str
+ args_schema: Type[BaseModel] | None
+ func: CallableType
+ is_agent: bool = False
+
+ class Config:
+ arbitrary_types_allowed = True
+
+
+class ToolRegistry:
+ """Registry that keeps tool metadata and offers unified execution."""
+
+ def __init__(self) -> None:
+ self._registry: dict[str, ToolDefinition] = {}
+
+ def register(
+ self,
+ tool_id: str,
+ func: CallableType,
+ description: str | None = None,
+ *,
+ args_schema: Type[BaseModel] | None = None,
+ name: Optional[str] = None,
+ is_agent: bool = False,
+ ) -> None:
+ """Register a callable tool with optional schema reflection."""
+ if tool_id in self._registry:
+ raise ValueError(f"Tool '{tool_id}' already registered")
+ # Infer description from function docstring if missing
+ if description is None:
+ if getattr(func, "__doc__", None):
+ description = func.__doc__.strip().split("\n")[0]
+ else:
+ description = f"Execute tool {tool_id}."
+
+ schema = args_schema or self._infer_schema(func)
+ tool_name = name or tool_id.replace("_", " ").title()
+ definition = ToolDefinition(
+ tool_id=tool_id,
+ name=tool_name,
+ description=description,
+ args_schema=schema,
+ func=func,
+ is_agent=is_agent,
+ )
+ self._registry[tool_id] = definition
+ logger.info("Tool registered: {tool_id}", tool_id=tool_id)
+
+ def get_tool(self, tool_id: str) -> ToolDefinition:
+ """Return the tool definition or raise if missing."""
+ try:
+ return self._registry[tool_id]
+ except KeyError as exc:
+ raise ValueError(f"Tool '{tool_id}' not found") from exc
+
+ async def execute(
+ self,
+ tool_id: str,
+ tool_args: dict[str, Any] | None = None,
+ *,
+ runtime_args: dict[str, Any] | None = None,
+ ) -> Any:
+ """Execute a registered tool with validated arguments."""
+ tool_def = self.get_tool(tool_id)
+ args = tool_args or {}
+ params = self._validate_args(tool_def.args_schema, args)
+ if runtime_args:
+ params.update(self._filter_runtime_args(tool_def.func, runtime_args))
+
+ result = await self._call(tool_def.func, params)
+ return result
+
+ def list_tools(self) -> list[ToolDefinition]:
+ """Return registered tools sorted by identifier."""
+ return [self._registry[k] for k in sorted(self._registry.keys())]
+
+ def get_prompt_context(self) -> str:
+ """Generate a planner-friendly summary of available tools."""
+ lines: list[str] = ["Available Tools:"]
+
+ def _json_type_to_py(t: Any, prop: dict) -> str:
+ # Map JSON schema types to concise Python-like types
+ if isinstance(t, list):
+ # e.g., ["string","null"] -> Optional[str]
+ non_null = [x for x in t if x != "null"]
+ if len(non_null) == 1:
+ return f"Optional[{_json_type_to_py(non_null[0], prop)}]"
+ return "Any"
+ if t == "string":
+ return "str"
+ if t == "integer":
+ return "int"
+ if t == "number":
+ return "float"
+ if t == "boolean":
+ return "bool"
+ if t == "object":
+ return "dict"
+ if t == "array":
+ items = prop.get("items") or {}
+ item_type = items.get("type")
+ if not item_type and items.get("anyOf"):
+ # try first anyOf type
+ ao = items.get("anyOf")[0]
+ item_type = ao.get("type")
+ py_item = _json_type_to_py(item_type or "any", items)
+ return f"List[{py_item}]"
+ if t == "null":
+ return "None"
+ return "Any"
+
+ for tool in self.list_tools():
+ # Tool header
+ lines.append(f"- {tool.tool_id}: {tool.description}")
+
+ # Build concise signature from args_schema when available
+ if tool.args_schema:
+ try:
+ schema = tool.args_schema.model_json_schema()
+ props = schema.get("properties", {})
+ except Exception:
+ props = {}
+
+ if not props:
+ lines.append(" Arguments: ()")
+ continue
+
+ parts: list[str] = []
+ for name, prop in props.items():
+ ptype = prop.get("type")
+ # handle 'anyOf' at property level (e.g., [string, null])
+ if not ptype and prop.get("anyOf"):
+ # pick first non-null
+ types = [p.get("type") for p in prop.get("anyOf")]
+ ptype = types if types else None
+
+ py_type = _json_type_to_py(ptype or "any", prop)
+ default = prop.get("default")
+ if default is not None:
+ # represent strings with quotes, others as-is
+ if isinstance(default, str):
+ parts.append(f"{name}: {py_type} = '{default}'")
+ else:
+ parts.append(f"{name}: {py_type} = {default}")
+ else:
+ parts.append(f"{name}: {py_type}")
+
+ sig = ", ".join(parts)
+ lines.append(f" Arguments: ({sig})")
+ else:
+ lines.append(" Arguments: ()")
+
+ return "\n".join(lines)
+
+ @staticmethod
+ def _validate_args(
+ schema: Type[BaseModel] | None, args: dict[str, Any]
+ ) -> dict[str, Any]:
+ if schema is None:
+ return dict(args)
+ validated = schema(**args)
+ return validated.model_dump()
+
+ @staticmethod
+ def _filter_runtime_args(
+ func: CallableType, runtime_args: dict[str, Any]
+ ) -> dict[str, Any]:
+ try:
+ signature = inspect.signature(func)
+ except (TypeError, ValueError):
+ return dict(runtime_args)
+
+ accepted: dict[str, Any] = {}
+ for key, value in runtime_args.items():
+ if key in signature.parameters:
+ accepted[key] = value
+ return accepted
+
+ @staticmethod
+ async def _call(func: CallableType, params: dict[str, Any]) -> Any:
+ try:
+ result = func(**params)
+ except TypeError:
+ if params:
+ result = func(params)
+ else:
+ result = func()
+ return await ToolRegistry._maybe_await(result)
+
+ @staticmethod
+ async def _maybe_await(value: Any) -> Any:
+ if inspect.isawaitable(value):
+ return await value
+ return value
+
+ @staticmethod
+ def _infer_schema(func: CallableType) -> Type[BaseModel] | None:
+ try:
+ signature = inspect.signature(func)
+ except (TypeError, ValueError):
+ return None
+
+ fields: dict[str, tuple[type[Any], Any]] = {}
+ for name, param in signature.parameters.items():
+ # Skip self, cls and context (runtime-injected parameters)
+ if name in {"self", "cls", "context"}:
+ continue
+ if param.kind in {
+ inspect.Parameter.VAR_POSITIONAL,
+ inspect.Parameter.VAR_KEYWORD,
+ }:
+ return None
+ annotation = (
+ param.annotation
+ if param.annotation is not inspect.Signature.empty
+ else Any
+ )
+ default = (
+ param.default if param.default is not inspect.Signature.empty else ...
+ )
+ fields[name] = (annotation, default)
+
+ if not fields:
+ return None
+
+ model_name = f"{func.__name__.capitalize()}Args"
+ return create_model(model_name, **fields)
+
+
+registry = ToolRegistry()
+
+__all__ = ["ToolDefinition", "ToolRegistry", "registry"]
diff --git a/python/valuecell/agents/react_agent/tools/__init__.py b/python/valuecell/agents/react_agent/tools/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/python/valuecell/agents/react_agent/tools/alphavantage.py b/python/valuecell/agents/react_agent/tools/alphavantage.py
new file mode 100644
index 000000000..ee5acaf36
--- /dev/null
+++ b/python/valuecell/agents/react_agent/tools/alphavantage.py
@@ -0,0 +1,510 @@
+import asyncio
+import os
+from datetime import datetime
+from typing import Any, Literal, Optional
+
+import httpx
+import pandas as pd
+from loguru import logger
+
+from ..context import TaskContext
+
+
+async def _fetch_alpha_vantage(
+ function: str, symbol: str | None = None, extra_params: dict | None = None
+) -> dict[str, Any]:
+ """
+ Robust fetcher for AlphaVantage API.
+
+ Features:
+ - Handles 'symbol' vs 'tickers' parameter automatically.
+ - Detects API-level errors (Rate Limits, Invalid Token) even on HTTP 200.
+ - Merges extra parameters (e.g. limit, sort, outputsize).
+ """
+ api_key = os.getenv("ALPHA_VANTAGE_API_KEY")
+ if not api_key:
+ logger.error("Missing ALPHA_VANTAGE_API_KEY environment variable")
+ return {"error": "Configuration error: API key missing"}
+
+ base_url = "https://www.alphavantage.co/query"
+
+ # 1. Build Query Parameters
+ params = {"function": function, "apikey": api_key}
+
+ # Handle Symbol Mapping
+ # NEWS_SENTIMENT uses 'tickers', most others use 'symbol'
+ if symbol:
+ if function == "NEWS_SENTIMENT":
+ params["tickers"] = symbol
+ else:
+ params["symbol"] = symbol
+
+ if extra_params:
+ params.update(extra_params)
+
+ # 2. Execute Request
+ try:
+ async with httpx.AsyncClient(timeout=15.0) as client:
+ logger.debug(f"AlphaVantage Request: {function} for {symbol or 'General'}")
+
+ resp = await client.get(base_url, params=params)
+ resp.raise_for_status()
+ data = resp.json()
+
+ # 3. AlphaVantage Specific Error Handling
+ # AlphaVantage returns 200 OK even for errors, we must check the keys.
+
+ # Case A: Rate Limit Hit (Common on free tier)
+ # Usually contains "Note" or "Information"
+ if "Note" in data or "Information" in data:
+ msg = data.get("Note") or data.get("Information")
+ logger.warning(f"AlphaVantage Rate Limit/Info: {msg}")
+ # Optional: Implement retry logic here if needed
+ return {"error": f"API Rate Limit reached: {msg}"}
+
+ # Case B: Invalid API Call
+ if "Error Message" in data:
+ msg = data["Error Message"]
+ logger.error(f"AlphaVantage API Error: {msg}")
+ return {"error": f"Invalid API call: {msg}"}
+
+ # Case C: Empty Result (Symbol not found)
+ if not data:
+ return {"error": "No data returned (Symbol might be invalid)"}
+
+ return data
+
+ except httpx.TimeoutException:
+ logger.error("AlphaVantage Request Timed out")
+ return {"error": "Data provider request timed out"}
+
+ except Exception as exc:
+ logger.exception(f"AlphaVantage Unhandled Error: {exc}")
+ return {"error": str(exc)}
+
+
+async def get_financial_metrics(
+ symbol: str,
+ period: Literal["annual", "quarterly"] = "annual",
+ limit: int = 4,
+ context: Optional[TaskContext] = None,
+) -> str:
+ """
+ Retrieves detailed financial metrics for a stock symbol using AlphaVantage.
+ Automatically calculates ratios like Margins, ROE, and Debt/Equity.
+
+ Args:
+ symbol: The stock ticker (e.g., 'IBM', 'AAPL').
+ period: 'annual' for yearly reports, 'quarterly' for recent quarters.
+ limit: Number of periods to return (default 4). Keep low to save tokens.
+ """
+
+ # Sequentially fetch endpoints with short delays to avoid AlphaVantage "burst" rate-limiting.
+ try:
+ # Emit progress event: starting income statement fetch
+ if context:
+ await context.emit_progress(
+ f"Fetching Income Statement for {symbol}...", step="fetching_income"
+ )
+
+ # 1) Income Statement
+ data_inc = await _fetch_alpha_vantage(
+ symbol=symbol, function="INCOME_STATEMENT"
+ )
+ await asyncio.sleep(1.5)
+
+ # Emit progress event: starting balance sheet fetch
+ if context:
+ await context.emit_progress(
+ "Fetching Balance Sheet...", step="fetching_balance"
+ )
+
+ # 2) Balance Sheet
+ data_bal = await _fetch_alpha_vantage(symbol=symbol, function="BALANCE_SHEET")
+ await asyncio.sleep(1.5)
+
+ # Emit progress event: starting cash flow fetch
+ if context:
+ await context.emit_progress("Fetching Cash Flow...", step="fetching_cash")
+
+ # 3) Cash Flow
+ data_cash = await _fetch_alpha_vantage(symbol=symbol, function="CASH_FLOW")
+
+ # If any endpoint returned an API-level error, surface it for clarity.
+ if isinstance(data_inc, dict) and "error" in data_inc:
+ return f"API Error (Income): {data_inc['error']}"
+ if isinstance(data_bal, dict) and "error" in data_bal:
+ return f"API Error (Balance): {data_bal['error']}"
+ if isinstance(data_cash, dict) and "error" in data_cash:
+ return f"API Error (Cash): {data_cash['error']}"
+
+ # Normalize any remaining error-bearing responses into empty dicts for downstream logic
+ data_inc = (
+ data_inc if not (isinstance(data_inc, dict) and "error" in data_inc) else {}
+ )
+ data_bal = (
+ data_bal if not (isinstance(data_bal, dict) and "error" in data_bal) else {}
+ )
+ data_cash = (
+ data_cash
+ if not (isinstance(data_cash, dict) and "error" in data_cash)
+ else {}
+ )
+
+ except Exception as e:
+ return f"API Error: {str(e)}"
+
+ report_key = "annualReports" if period == "annual" else "quarterlyReports"
+
+ if not data_inc.get(report_key):
+ return f"No {period} financial data found for {symbol}."
+
+ def to_df(data_dict):
+ reports = data_dict.get(report_key, [])
+ if not reports:
+ return pd.DataFrame()
+ df = pd.DataFrame(reports)
+ df = df.replace("None", pd.NA)
+ if "fiscalDateEnding" in df.columns:
+ df["fiscalDateEnding"] = pd.to_datetime(df["fiscalDateEnding"])
+ df.set_index("fiscalDateEnding", inplace=True)
+ return df
+
+ df_inc = to_df(data_inc)
+ df_bal = to_df(data_bal)
+ df_cash = to_df(data_cash)
+
+ df_merged = pd.concat([df_inc, df_bal, df_cash], axis=1)
+ df_merged = df_merged.loc[:, ~df_merged.columns.duplicated()]
+ df_merged.sort_index(ascending=False, inplace=True)
+ df_final = df_merged.head(limit).copy()
+
+ cols_to_convert = df_final.columns.drop("reportedCurrency", errors="ignore")
+ for col in cols_to_convert:
+ df_final[col] = pd.to_numeric(df_final[col], errors="coerce")
+
+ try:
+ # Profitability
+ df_final["Gross Margin %"] = (
+ df_final["grossProfit"] / df_final["totalRevenue"]
+ ) * 100
+ df_final["Net Margin %"] = (
+ df_final["netIncome"] / df_final["totalRevenue"]
+ ) * 100
+
+ # Balance Sheet Health
+ # debt = shortTerm + longTerm
+ # 1) Data endpoints to request in parallel from AlphaVantage
+ total_debt = df_final.get(
+ "shortLongTermDebtTotal",
+ df_final.get("shortTermDebt", 0) + df_final.get("longTermDebt", 0),
+ )
+ df_final["Total Debt"] = total_debt
+ df_final["Debt/Equity"] = total_debt / df_final["totalShareholderEquity"]
+
+ # Cash Flow
+ # Free Cash Flow = Operating Cash Flow - Capital Expenditures
+ df_final["Free Cash Flow"] = (
+ df_final["operatingCashflow"] - df_final["capitalExpenditures"]
+ )
+
+ except KeyError:
+ pass
+
+ df_display = df_final.T
+ metrics_map = {
+ "totalRevenue": "Revenue",
+ "grossProfit": "Gross Profit",
+ "netIncome": "Net Income",
+ "Gross Margin %": "Gross Margin %",
+ "Net Margin %": "Net Margin %",
+ "reportedEPS": "EPS",
+ "totalAssets": "Total Assets",
+ "totalShareholderEquity": "Total Equity",
+ "Total Debt": "Total Debt",
+ "Debt/Equity": "Debt/Equity Ratio",
+ "operatingCashflow": "Operating Cash Flow",
+ "Free Cash Flow": "Free Cash Flow",
+ }
+ existing_metrics = [m for m in metrics_map.keys() if m in df_display.index]
+ df_display = df_display.loc[existing_metrics]
+ df_display.rename(index=metrics_map, inplace=True)
+
+ def fmt_val(val, metric_name):
+ if pd.isna(val):
+ return "-"
+ if "%" in metric_name or "Ratio" in metric_name:
+ return f"{val:.2f}" + ("%" if "%" in metric_name else "x")
+ if abs(val) >= 1e9:
+ return f"${val / 1e9:.2f}B"
+ if abs(val) >= 1e6:
+ return f"${val / 1e6:.2f}M"
+ return f"{val:,.0f}"
+
+ for col in df_display.columns:
+ df_display[col] = df_display.apply(
+ lambda row: fmt_val(row[col], row.name), axis=1
+ )
+
+ df_display.columns = [d.strftime("%Y-%m-%d") for d in df_display.columns]
+
+ md_table = df_display.to_markdown()
+
+ return (
+ f"### Financial Metrics ({period.title()}, Last {limit} periods)\n\n{md_table}"
+ )
+
+
+async def get_stock_profile(symbol: str, context: Optional[TaskContext] = None) -> str:
+ """
+ Retrieves a comprehensive profile for a stock symbol.
+ Includes company description, sector, real-time price, valuation metrics (PE, Market Cap),
+ and analyst ratings.
+ """
+ # Note: fetching is performed sequentially below to avoid rate limits
+
+ # Fetch sequentially with a short pause to avoid AlphaVantage burst detection
+ try:
+ # Emit progress event: starting quote fetch
+ if context:
+ await context.emit_progress(
+ f"Fetching real-time quote for {symbol}...", step="fetching_quote"
+ )
+
+ # 1. Global Quote
+ quote_data = await _fetch_alpha_vantage(symbol=symbol, function="GLOBAL_QUOTE")
+ # Pause to avoid rapid-fire requests triggering rate limits
+ await asyncio.sleep(1.5)
+
+ # Emit progress event: starting overview fetch
+ if context:
+ await context.emit_progress(
+ "Fetching company overview...", step="fetching_overview"
+ )
+
+ # 2. Overview
+ overview_data = await _fetch_alpha_vantage(symbol=symbol, function="OVERVIEW")
+ except Exception as e:
+ return f"Error fetching profile for {symbol}: {str(e)}"
+
+ # --- Parse the quote response ---
+ # AlphaVantage formats GLOBAL_QUOTE keys like '01. symbol', '05. price'.
+ # clean_quote extracts the human-friendly key (text after the numeric prefix).
+ def clean_quote(q: dict) -> dict:
+ # Return a mapping like {'price': '123.45', 'volume': '123456'}
+ return {k.split(". ")[1]: v for k, v in q.get("Global Quote", {}).items()}
+
+ quote = clean_quote(quote_data)
+ overview = (
+ overview_data
+ if not (isinstance(overview_data, dict) and "error" in overview_data)
+ else {}
+ )
+
+ # If neither quote nor overview has data, return early
+ if not quote and not overview:
+ return f"No profile data found for {symbol}."
+
+ # Helper to format large numbers into human-friendly strings
+ def fmt_num(val):
+ if not val or val == "None":
+ return "-"
+ try:
+ f = float(val)
+ if abs(f) >= 1e9:
+ return f"${f / 1e9:.2f}B"
+ if abs(f) >= 1e6:
+ return f"${f / 1e6:.2f}M"
+ return f"{f:,.2f}"
+ except Exception:
+ return val
+
+ # --- Assemble Markdown profile ---
+ # Header / basic company info
+ name = overview.get("Name", symbol)
+ sector = overview.get("Sector", "-")
+ industry = overview.get("Industry", "-")
+ desc = overview.get("Description", "No description available.")
+ # Truncate long descriptions to save tokens
+ if len(desc) > 300:
+ desc = desc[:300] + "..."
+
+ profile_md = f"### Stock Profile: {name} ({symbol})\n\n"
+ profile_md += f"**Sector**: {sector} | **Industry**: {industry}\n\n"
+ profile_md += f"**Description**: {desc}\n\n"
+
+ # --- Market snapshot table ---
+ # Format price, change, market cap, volume and 52-week range
+ price = fmt_num(quote.get("price"))
+ change_pct = quote.get("change percent", "-")
+ # Choose a simple textual indicator for trend (avoid emoji)
+ trend = "Up" if change_pct and "-" not in change_pct else "Down"
+
+ mkt_cap = fmt_num(overview.get("MarketCapitalization"))
+ vol = fmt_num(quote.get("volume"))
+
+ range_52w = (
+ f"{fmt_num(overview.get('52WeekLow'))} - {fmt_num(overview.get('52WeekHigh'))}"
+ )
+
+ profile_md += "**Market Snapshot**\n"
+ profile_md += "| Price | Change | Market Cap | Volume | 52W Range |\n"
+ profile_md += "|---|---|---|---|---|\n"
+ profile_md += (
+ f"| {price} | {trend} {change_pct} | {mkt_cap} | {vol} | {range_52w} |\n\n"
+ )
+
+ # --- Valuation & Financials ---
+ pe = overview.get("PERatio", "-")
+ peg = overview.get("PEGRatio", "-")
+ eps = overview.get("EPS", "-")
+ div_yield = overview.get("DividendYield", "0")
+ try:
+ div_yield_pct = f"{float(div_yield) * 100:.2f}%"
+ except Exception:
+ div_yield_pct = "-"
+
+ beta = overview.get("Beta", "-")
+ profit_margin = overview.get("ProfitMargin", "-")
+ try:
+ pm_pct = f"{float(profit_margin) * 100:.1f}%"
+ except Exception:
+ pm_pct = "-"
+
+ profile_md += "**Valuation & Financials**\n"
+ profile_md += "| PE Ratio | PEG | EPS | Div Yield | Beta | Profit Margin |\n"
+ profile_md += "|---|---|---|---|---|---|\n"
+ profile_md += f"| {pe} | {peg} | {eps} | {div_yield_pct} | {beta} | {pm_pct} |\n\n"
+
+ # --- Analyst Ratings (if provided) ---
+ target = overview.get("AnalystTargetPrice")
+ buy = overview.get("AnalystRatingBuy", "0")
+ hold = overview.get("AnalystRatingHold", "0")
+ sell = overview.get("AnalystRatingSell", "0")
+
+ if target and target != "None":
+ profile_md += f"**Analyst Consensus**: Target Price ${target} (Buy: {buy}, Hold: {hold}, Sell: {sell})"
+
+ return profile_md
+
+
+async def get_market_sentiment(
+ symbol: str,
+ limit: int = 10,
+ context: Optional[TaskContext] = None,
+) -> str:
+ """
+ Retrieves and summarizes market sentiment and news for a specific stock symbol.
+ Uses AlphaVantage News Sentiment API to get sentiment scores and summaries.
+
+ Args:
+ symbol: Stock ticker (e.g., 'AAPL', 'TSLA').
+ limit: Max number of news items to analyze (default 10).
+ """
+ # 1. Fetch data
+ # Note: 'tickers' param filters news mentioning this symbol
+ data = await _fetch_alpha_vantage(
+ function="NEWS_SENTIMENT",
+ symbol=None,
+ extra_params={"tickers": symbol, "limit": str(limit)},
+ )
+
+ feed = data.get("feed", [])
+ if not feed:
+ return f"No recent news found for {symbol}."
+
+ # 2. Filter and Process News
+ # We only want news where the ticker is RELEVANT (score > 0.1)
+ relevant_news = []
+ total_sentiment_score = 0.0
+ valid_count = 0
+
+ for item in feed:
+ if context:
+ await context.emit_progress(
+ f"Analyzing news: [{item.get('title', '')}]({item.get('url', '')})...",
+ step="processing_news",
+ )
+
+ # Find sentiment for OUR symbol within the list of tickers mentioned in this article
+ ticker_meta = next(
+ (t for t in item.get("ticker_sentiment", []) if t["ticker"] == symbol), None
+ )
+
+ # Fallback: if symbol not explicitly in list (rare), use overall sentiment
+ sentiment_score = (
+ float(ticker_meta["ticker_sentiment_score"])
+ if ticker_meta
+ else item.get("overall_sentiment_score", 0)
+ )
+ relevance = float(ticker_meta["relevance_score"]) if ticker_meta else 0
+
+ # Filter noise: Skip low relevance articles
+ if relevance < 0.1:
+ continue
+
+ valid_count += 1
+ total_sentiment_score += sentiment_score
+
+ # Format date: 20251211T001038 -> 2025-12-11
+ pub_date = item.get("time_published", "")[:8]
+ try:
+ pub_date = datetime.strptime(pub_date, "%Y%m%d").strftime("%Y-%m-%d")
+ except Exception:
+ pass
+
+ relevant_news.append(
+ {
+ "title": item.get("title"),
+ "summary": item.get("summary"),
+ "source": item.get("source"),
+ "date": pub_date,
+ "url": item.get("url"),
+ "sentiment_label": item.get(
+ "overall_sentiment_label"
+ ), # Use overall label for readability
+ "score": sentiment_score,
+ }
+ )
+
+ if not relevant_news:
+ return f"Found news, but none were highly relevant to {symbol}."
+
+ # 3. Calculate Aggregated Sentiment
+ if context:
+ await context.emit_progress(
+ "Calculating aggregate market sentiment...", step="aggregating_sentiment"
+ )
+
+ avg_score = total_sentiment_score / valid_count if valid_count > 0 else 0
+
+ # Map score to label (based on AlphaVantage definition)
+ if avg_score <= -0.15:
+ aggregate_label = "Bearish 🐻"
+ elif avg_score >= 0.15:
+ aggregate_label = "Bullish 🐂"
+ else:
+ aggregate_label = "Neutral 😐"
+
+ # 4. Construct Markdown Output
+ # Header
+ md = f"### Market Sentiment for {symbol}\n"
+ md += f"**Overall Signal**: {aggregate_label} (Avg Score: {avg_score:.2f})\n"
+ md += f"**Analysis Basis**: {len(relevant_news)} relevant articles\n\n"
+
+ # Top News List (Markdown)
+ md += "**Top Relevant News:**\n"
+ for news in relevant_news[:5]: # Show top 5 to save space
+ label_icon = (
+ "🟢"
+ if "Bullish" in news["sentiment_label"]
+ else "🔴"
+ if "Bearish" in news["sentiment_label"]
+ else "⚪"
+ )
+
+ md += f"- **{news['date']}** [{news['source']}]\n"
+ md += f" [{news['title']}]({news['url']})\n"
+ md += f" *Sentiment:* {label_icon} {news['sentiment_label']} | *Summary:* {news['summary'][:150]}...\n\n"
+
+ return md
diff --git a/python/valuecell/agents/react_agent/tools/research.py b/python/valuecell/agents/react_agent/tools/research.py
new file mode 100644
index 000000000..44645e0ac
--- /dev/null
+++ b/python/valuecell/agents/react_agent/tools/research.py
@@ -0,0 +1,91 @@
+import os
+
+from agno.agent import Agent
+from agno.db.in_memory import InMemoryDb
+from edgar import set_identity
+from loguru import logger
+
+from valuecell.agents.research_agent.prompts import (
+ KNOWLEDGE_AGENT_EXPECTED_OUTPUT,
+ KNOWLEDGE_AGENT_INSTRUCTION,
+)
+from valuecell.agents.research_agent.sources import (
+ fetch_ashare_filings,
+ fetch_event_sec_filings,
+ fetch_periodic_sec_filings,
+)
+
+# from valuecell.utils.env import agent_debug_mode_enabled
+
+research_agent: None | Agent = None
+
+
+def build_research_agent() -> Agent:
+ import valuecell.utils.model as model_utils_mod
+ from valuecell.agents.research_agent.knowledge import get_knowledge
+
+ tools = [
+ fetch_periodic_sec_filings,
+ fetch_event_sec_filings,
+ fetch_ashare_filings,
+ ]
+ # Configure EDGAR identity only when SEC_EMAIL is present
+ sec_email = os.getenv("SEC_EMAIL")
+ if sec_email:
+ set_identity(sec_email)
+ else:
+ logger.warning(
+ "SEC_EMAIL not set; EDGAR identity is not configured for ResearchAgent."
+ )
+
+ # Lazily obtain knowledge; disable search if unavailable
+ knowledge = get_knowledge()
+ return Agent(
+ model=model_utils_mod.get_model_for_agent("research_agent"),
+ instructions=[KNOWLEDGE_AGENT_INSTRUCTION],
+ expected_output=KNOWLEDGE_AGENT_EXPECTED_OUTPUT,
+ tools=tools,
+ knowledge=knowledge,
+ db=InMemoryDb(),
+ # context
+ search_knowledge=knowledge is not None,
+ add_datetime_to_context=True,
+ # configuration
+ # debug_mode=agent_debug_mode_enabled(),
+ )
+
+
+def get_research_agent() -> Agent:
+ """Lazily create and cache the ResearchAgent instance."""
+ global research_agent
+ if research_agent is None:
+ research_agent = build_research_agent()
+ return research_agent
+
+
+async def research(query: str) -> str:
+ """
+ Perform asynchronous research using the cached ResearchAgent.
+
+ The ResearchAgent is configured with a set of research tools
+ (SEC/ASHARE filings fetchers, web search, and crypto-related search
+ functions), an optional knowledge source, and an in-memory DB for
+ short-lived context. The agent may call multiple tools internally and
+ composes their outputs into a single human-readable string.
+
+ The returned value is the agent's aggregated textual answer. Callers
+ should treat the response as plain text suitable for display or further
+ downstream natural-language processing.
+
+ :param query: The natural-language research query or prompt to submit to
+ the ResearchAgent (for example, "Summarize recent SEC filings for
+ AAPL").
+ :type query: str
+ :return: A string containing the agent's aggregated research result.
+ :rtype: str
+ :raises RuntimeError: If the underlying agent fails or returns an
+ unexpected error while executing the query.
+ """
+ agent = get_research_agent()
+ result = await agent.arun(query)
+ return result.content
diff --git a/python/valuecell/core/coordinate/orchestrator.py b/python/valuecell/core/coordinate/orchestrator.py
index 7f8fc1e9c..8bcd0a318 100644
--- a/python/valuecell/core/coordinate/orchestrator.py
+++ b/python/valuecell/core/coordinate/orchestrator.py
@@ -1,6 +1,7 @@
import asyncio
-from typing import AsyncGenerator, Dict, Optional
+from typing import Any, AsyncGenerator, Dict, List, Optional
+from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from loguru import logger
from valuecell.core.constants import ORIGINAL_USER_INPUT, PLANNING_TASK
@@ -8,17 +9,14 @@
from valuecell.core.event import EventResponseService
from valuecell.core.plan import PlanService
from valuecell.core.plan.models import ExecutionPlan
-from valuecell.core.super_agent import (
- SuperAgentDecision,
- SuperAgentOutcome,
- SuperAgentService,
-)
+from valuecell.core.super_agent import SuperAgentService
from valuecell.core.task import TaskExecutor
from valuecell.core.types import (
BaseResponse,
StreamResponseEvent,
UserInput,
)
+from valuecell.utils import i18n_utils
from valuecell.utils.uuid import generate_task_id, generate_thread_id, generate_uuid
from .services import AgentServiceBundle
@@ -95,6 +93,387 @@ def __init__(
# ==================== Public API Methods ====================
+ async def stream_react_agent(
+ self, user_input: UserInput, _response_thread_id: str
+ ) -> AsyncGenerator[BaseResponse, None]:
+ """
+ Stream React Agent (LangGraph) execution as standardized protocol events.
+
+ This function orchestrates the React Agent's multi-node graph execution
+ and converts internal LangGraph events into the frontend event protocol.
+
+ Event Mappings:
+ - Planner output -> MESSAGE_CHUNK (TODO: Consider REASONING)
+ - Executor tasks -> TOOL_CALL_STARTED/COMPLETED (paired with consistent IDs)
+ - Critic feedback -> MESSAGE_CHUNK (TODO: Consider REASONING)
+ - Summarizer/Inquirer text -> MESSAGE_CHUNK
+
+ Args:
+ user_input: User input containing query and conversation context
+
+ Yields:
+ BaseResponse: Standardized protocol events for frontend consumption
+ """
+ from valuecell.agents.react_agent.graph import get_app
+
+ conversation_id = user_input.meta.conversation_id
+
+ # ID Mapping:
+ # - LangGraph thread_id = conversation_id (for persistence)
+ # - EventService thread_id = freshly generated (transient stream session)
+ graph_thread_id = conversation_id
+ root_task_id = generate_task_id()
+
+ logger.info(
+ "stream_react_agent: starting React Agent stream for conversation {}",
+ conversation_id,
+ )
+
+ graph = get_app()
+ user_context = {
+ "language": i18n_utils.get_current_language(),
+ "timezone": i18n_utils.get_current_timezone(),
+ }
+
+ # Check if LangGraph checkpoint exists for this conversation.
+ # MemorySaver is in-memory only, so checkpoint exists only within the same
+ # application session. After restart, we need to rebuild history from database.
+ checkpoint_exists = await self._check_langgraph_checkpoint(
+ graph, graph_thread_id
+ )
+
+ if checkpoint_exists:
+ # Checkpoint exists: only pass the new user message.
+ # LangGraph will restore previous state and append the new message via operator.add
+ messages = [HumanMessage(content=user_input.query)]
+ logger.info(
+ "stream_react_agent: checkpoint exists for conversation {}, using new message only",
+ conversation_id,
+ )
+ else:
+ # No checkpoint: rebuild full message history from database.
+ # This happens after application restart when MemorySaver is empty.
+ messages = await self._load_conversation_messages(conversation_id)
+ logger.info(
+ "stream_react_agent: no checkpoint for conversation {}, loaded {} messages from database",
+ conversation_id,
+ len(messages),
+ )
+
+ inputs = {
+ "messages": messages,
+ "user_context": user_context,
+ }
+ config = {"configurable": {"thread_id": graph_thread_id}}
+
+ # Note: executor task pairing will read tool info from executor output.
+ # No STARTED->COMPLETED mapping stored here (executor provides `tool`/`tool_name`).
+
+ def is_real_node_output(d: dict) -> bool:
+ """Filter out router string outputs (e.g., 'wait', 'plan')."""
+ output = d.get("output")
+ return not isinstance(output, str)
+
+ try:
+ async for event in graph.astream_events(
+ inputs, config=config, version="v2"
+ ):
+ kind = event.get("event", "")
+ node = event.get("metadata", {}).get("langgraph_node", "")
+ data = event.get("data") or {}
+ logger.debug(f"stream_react_agent: event received: {event}")
+
+ # =================================================================
+ # 1. PLANNER -> REASONING
+ # =================================================================
+ if kind == "on_chain_end" and node == "planner":
+ if is_real_node_output(data):
+ output = data.get("output", {})
+ if isinstance(output, dict) and "plan" in output:
+ plan = output.get("plan", [])
+ reasoning = output.get("strategy_update") or "..."
+
+ # Generate stable item_id for this planner reasoning block
+ planner_item_id = generate_task_id()
+
+ # REASONING_STARTED
+ yield await self.event_service.emit(
+ self.event_service.factory.reasoning(
+ conversation_id=conversation_id,
+ thread_id=_response_thread_id,
+ task_id=root_task_id,
+ event=StreamResponseEvent.REASONING_STARTED,
+ item_id=planner_item_id,
+ content=None,
+ agent_name="Planner",
+ )
+ )
+
+ # Format plan as markdown
+ plan_md = f"\n\n**📅 Plan Updated:**\n*{reasoning}*\n"
+ for task in plan:
+ desc = task.get("description", "No description")
+ plan_md += f"- {desc}\n"
+
+ # REASONING (content)
+ yield await self.event_service.emit(
+ self.event_service.factory.reasoning(
+ conversation_id=conversation_id,
+ thread_id=_response_thread_id,
+ task_id=root_task_id,
+ event=StreamResponseEvent.REASONING,
+ item_id=planner_item_id,
+ content=plan_md,
+ agent_name="Planner",
+ )
+ )
+
+ # REASONING_COMPLETED
+ yield await self.event_service.emit(
+ self.event_service.factory.reasoning(
+ conversation_id=conversation_id,
+ thread_id=_response_thread_id,
+ task_id=root_task_id,
+ event=StreamResponseEvent.REASONING_COMPLETED,
+ item_id=planner_item_id,
+ content=None,
+ agent_name="Planner",
+ )
+ )
+
+ # =================================================================
+ # 2. EXECUTOR -> TOOL_CALL (STARTED & COMPLETED)
+ # =================================================================
+
+ # ---------------------------------------------------------
+ # Case A: Executor STARTED
+ # ---------------------------------------------------------
+ elif kind == "on_chain_start" and node == "executor":
+ task_data = data.get("input", {}).get("task", {})
+ task_id = task_data.get("id")
+ raw_tool_name = task_data.get("tool_name", "unknown_tool")
+ task_description = task_data.get("description", "")
+
+ if task_id:
+ yield await self.event_service.emit(
+ self.event_service.factory.reasoning(
+ conversation_id=conversation_id,
+ thread_id=_response_thread_id,
+ task_id=root_task_id,
+ event=StreamResponseEvent.REASONING_STARTED,
+ item_id=task_id,
+ content=None,
+ agent_name="Executor",
+ )
+ )
+
+ title_text = f"**Executing Task:** {task_description} (`{raw_tool_name}`)\n\n"
+ yield await self.event_service.emit(
+ self.event_service.factory.reasoning(
+ conversation_id=conversation_id,
+ thread_id=_response_thread_id,
+ task_id=root_task_id,
+ event=StreamResponseEvent.REASONING,
+ item_id=task_id,
+ content=title_text,
+ agent_name="Executor",
+ )
+ )
+
+ # ---------------------------------------------------------
+ # Case B: Intermediate Progress (Tool Events)
+ # ---------------------------------------------------------
+ elif kind == "on_custom_event" and event.get("name") == "tool_event":
+ payload = data
+ if payload.get("type") == "progress":
+ progress_task_id = payload.get("task_id")
+ msg = payload.get("msg", "")
+ step = payload.get("step")
+
+ # Format progress message
+ progress_parts = []
+ if step:
+ progress_parts.append(f"[{step}]")
+ progress_parts.append(msg)
+ progress_text = f"> {' '.join(progress_parts)}\n"
+
+ if progress_task_id:
+ yield await self.event_service.emit(
+ self.event_service.factory.reasoning(
+ conversation_id=conversation_id,
+ thread_id=_response_thread_id,
+ task_id=root_task_id,
+ event=StreamResponseEvent.REASONING,
+ item_id=progress_task_id,
+ content=progress_text,
+ agent_name="Executor",
+ )
+ )
+
+ # ---------------------------------------------------------
+ # Case C: Executor COMPLETED
+ # ---------------------------------------------------------
+ elif kind == "on_chain_end" and node == "executor":
+ if is_real_node_output(data):
+ output = data.get("output", {})
+ if isinstance(output, dict) and "completed_tasks" in output:
+ for task_id_key, res in output["completed_tasks"].items():
+ # Extract result
+ if isinstance(res, dict):
+ raw_result = res.get("result") or str(res)
+ else:
+ raw_result = str(res)
+
+ # Truncate result if too long
+ result_preview = str(raw_result)
+ final_text = f"\n✅ **Result:**\n\n{result_preview}\n"
+ yield await self.event_service.emit(
+ self.event_service.factory.reasoning(
+ conversation_id=conversation_id,
+ thread_id=_response_thread_id,
+ task_id=root_task_id,
+ event=StreamResponseEvent.REASONING,
+ item_id=task_id_key,
+ content=final_text,
+ agent_name="Executor",
+ )
+ )
+
+ yield await self.event_service.emit(
+ self.event_service.factory.reasoning(
+ conversation_id=conversation_id,
+ thread_id=_response_thread_id,
+ task_id=root_task_id,
+ event=StreamResponseEvent.REASONING_COMPLETED,
+ item_id=task_id_key,
+ content=None,
+ agent_name="Executor",
+ )
+ )
+
+ # =================================================================
+ # 3. CRITIC -> REASONING
+ # =================================================================
+ elif kind == "on_chain_end" and node == "critic":
+ if is_real_node_output(data):
+ output = data.get("output", {})
+ if isinstance(output, dict):
+ summary = output.get("_critic_summary")
+ if summary:
+ approved = summary.get("approved", False)
+ icon = "✅" if approved else "🚧"
+ reason = summary.get("reason") or summary.get(
+ "feedback", ""
+ )
+
+ # Generate stable item_id for this critic reasoning block
+ critic_item_id = generate_task_id()
+
+ # REASONING_STARTED
+ yield await self.event_service.emit(
+ self.event_service.factory.reasoning(
+ conversation_id=conversation_id,
+ thread_id=_response_thread_id,
+ task_id=root_task_id,
+ event=StreamResponseEvent.REASONING_STARTED,
+ item_id=critic_item_id,
+ content=None,
+ agent_name="Critic",
+ )
+ )
+
+ critic_md = (
+ f"\n\n**{icon} Critic Decision:** {reason}\n\n"
+ )
+
+ # REASONING (content)
+ yield await self.event_service.emit(
+ self.event_service.factory.reasoning(
+ conversation_id=conversation_id,
+ thread_id=_response_thread_id,
+ task_id=root_task_id,
+ event=StreamResponseEvent.REASONING,
+ item_id=critic_item_id,
+ content=critic_md,
+ agent_name="Critic",
+ )
+ )
+
+ # REASONING_COMPLETED
+ yield await self.event_service.emit(
+ self.event_service.factory.reasoning(
+ conversation_id=conversation_id,
+ thread_id=_response_thread_id,
+ task_id=root_task_id,
+ event=StreamResponseEvent.REASONING_COMPLETED,
+ item_id=critic_item_id,
+ content=None,
+ agent_name="Critic",
+ )
+ )
+
+ # =================================================================
+ # 4. SUMMARIZER / INQUIRER -> MESSAGE_CHUNK
+ # =================================================================
+
+ # Summarizer: Streaming content
+ elif kind == "on_chat_model_stream" and node == "summarizer":
+ chunk = data.get("chunk")
+ text = chunk.content if chunk else None
+ if text:
+ yield await self.event_service.emit(
+ self.event_service.factory.message_response_general(
+ event=StreamResponseEvent.MESSAGE_CHUNK,
+ conversation_id=conversation_id,
+ thread_id=_response_thread_id,
+ task_id=root_task_id,
+ content=text,
+ agent_name="Summarizer",
+ )
+ )
+
+ # Inquirer: Static content (full message at end)
+ elif kind == "on_chain_end" and node == "inquirer":
+ if is_real_node_output(data):
+ output = data.get("output", {})
+ msgs = output.get("messages", [])
+ if msgs and isinstance(msgs, list):
+ last_msg = msgs[-1]
+ if isinstance(last_msg, AIMessage) and last_msg.content:
+ yield await self.event_service.emit(
+ self.event_service.factory.message_response_general(
+ event=StreamResponseEvent.MESSAGE_CHUNK,
+ conversation_id=conversation_id,
+ thread_id=_response_thread_id,
+ task_id=root_task_id,
+ content=last_msg.content,
+ agent_name="Inquirer",
+ )
+ )
+
+ logger.info(
+ "stream_react_agent: completed React Agent stream for conversation {}",
+ conversation_id,
+ )
+
+ except Exception as exc:
+ logger.exception(
+ f"stream_react_agent: execution failed for conversation {conversation_id}: {exc}"
+ )
+ # Emit error message
+ yield await self.event_service.emit(
+ self.event_service.factory.message_response_general(
+ event=StreamResponseEvent.MESSAGE_CHUNK,
+ conversation_id=conversation_id,
+ thread_id=_response_thread_id,
+ task_id=root_task_id,
+ content=f"⚠️ System Error: {str(exc)}",
+ agent_name="System",
+ )
+ )
+
+ # ==================== Public API Methods ====================
+
async def process_user_input(
self, user_input: UserInput
) -> AsyncGenerator[BaseResponse, None]:
@@ -308,76 +687,10 @@ async def _handle_new_request(
# 1) Super Agent triage phase (pre-planning) - skip if target agent is specified
if user_input.target_agent_name == self.super_agent_service.name:
- # Emit reasoning_started before streaming reasoning content
- sa_task_id = generate_task_id()
- sa_reasoning_item_id = generate_uuid("reasoning")
- yield await self.event_service.emit(
- self.event_service.factory.reasoning(
- conversation_id,
- thread_id,
- task_id=sa_task_id,
- event=StreamResponseEvent.REASONING_STARTED,
- agent_name=self.super_agent_service.name,
- item_id=sa_reasoning_item_id,
- ),
- )
-
- # Stream reasoning content and collect final outcome
- super_outcome: SuperAgentOutcome | None = None
- async for item in self.super_agent_service.run(user_input):
- if isinstance(item, str):
- # Yield reasoning chunk
- yield await self.event_service.emit(
- self.event_service.factory.reasoning(
- conversation_id,
- thread_id,
- task_id=sa_task_id,
- event=StreamResponseEvent.REASONING,
- content=item,
- agent_name=self.super_agent_service.name,
- item_id=sa_reasoning_item_id,
- ),
- )
- else:
- # SuperAgentOutcome received
- super_outcome = item
-
- # Emit reasoning_completed
- yield await self.event_service.emit(
- self.event_service.factory.reasoning(
- conversation_id,
- thread_id,
- task_id=sa_task_id,
- event=StreamResponseEvent.REASONING_COMPLETED,
- agent_name=self.super_agent_service.name,
- item_id=sa_reasoning_item_id,
- ),
- )
-
- # Fallback if no outcome was received
- if super_outcome is None:
- super_outcome = SuperAgentOutcome(
- decision=SuperAgentDecision.HANDOFF_TO_PLANNER,
- enriched_query=user_input.query,
- reason="No outcome received from SuperAgent",
- )
-
- if super_outcome.answer_content:
- ans = self.event_service.factory.message_response_general(
- StreamResponseEvent.MESSAGE_CHUNK,
- conversation_id,
- thread_id,
- task_id=generate_task_id(),
- content=super_outcome.answer_content,
- agent_name=self.super_agent_service.name,
- )
- yield await self.event_service.emit(ans)
- if super_outcome.decision == SuperAgentDecision.ANSWER:
- return
+ async for response in self.stream_react_agent(user_input, thread_id):
+ yield response
- if super_outcome.decision == SuperAgentDecision.HANDOFF_TO_PLANNER:
- user_input.target_agent_name = ""
- user_input.query = super_outcome.enriched_query
+ return
# 2) Planner phase (existing logic)
# Create planning task with user input callback
@@ -509,6 +822,132 @@ async def _monitor_planning_task(
async for response in self.task_executor.execute_plan(plan, thread_id):
yield response
+ async def _check_langgraph_checkpoint(self, graph: Any, thread_id: str) -> bool:
+ """Check if a LangGraph checkpoint exists for the given thread_id.
+
+ Args:
+ graph: The compiled LangGraph app
+ thread_id: The thread_id to check
+
+ Returns:
+ True if checkpoint exists, False otherwise
+ """
+ try:
+ # Access the checkpointer and try to get the latest checkpoint
+ checkpointer = graph.checkpointer
+ if not checkpointer:
+ return False
+
+ # Get the latest checkpoint for this thread
+ config = {"configurable": {"thread_id": thread_id}}
+ checkpoint = await checkpointer.aget(config)
+
+ # If checkpoint exists and has state, return True
+ return checkpoint is not None
+ except Exception as exc:
+ logger.warning(
+ "Failed to check LangGraph checkpoint for thread {}: {}",
+ thread_id,
+ exc,
+ )
+ # On error, assume no checkpoint exists (safe fallback: load from DB)
+ return False
+
+ async def _load_conversation_messages(
+ self, conversation_id: str
+ ) -> List[BaseMessage]:
+ """Load historical messages from database.
+
+ Since LangGraph's MemorySaver is in-memory only, we need to reconstruct
+ message history from the database when the application restarts.
+
+ Args:
+ conversation_id: The conversation ID to load messages for
+
+ Returns:
+ List of BaseMessage objects (HumanMessage, AIMessage) with current query appended
+ """
+ from valuecell.core.types import Role, StreamResponseEvent
+
+ try:
+ # Fetch all conversation items
+ items = await self.conversation_service.get_conversation_items(
+ conversation_id=conversation_id
+ )
+
+ messages: List[BaseMessage] = []
+
+ # Convert stored items to LangChain messages.
+ # Strategy:
+ # - USER messages: Load from THREAD_STARTED events (user queries)
+ # - AGENT messages: Load only from Summarizer's MESSAGE_CHUNK events
+ # (final conversational responses, not Planner/Critic/Executor reasoning)
+
+ def _extract_content(payload) -> str | None:
+ """Normalize payload into a plain text content string.
+
+ Handles cases where `payload` is:
+ - a JSON string (extract `content` or `text`)
+ - an object with `.content` attribute
+ - None
+ - anything else (stringify as fallback)
+ """
+ if isinstance(payload, str):
+ try:
+ import json
+
+ parsed = json.loads(payload)
+ if isinstance(parsed, dict):
+ return parsed.get("content") or parsed.get("text")
+ return str(parsed)
+ except Exception:
+ return payload
+
+ if payload is None:
+ return None
+
+ if hasattr(payload, "content"):
+ try:
+ return payload.content
+ except Exception:
+ return None
+
+ try:
+ return str(payload)
+ except Exception:
+ return None
+
+ for item in items:
+ if item.role == Role.USER:
+ content = _extract_content(item.payload)
+ if content:
+ messages.append(HumanMessage(content=content))
+
+ elif item.role == Role.AGENT:
+ if (
+ item.event == StreamResponseEvent.MESSAGE_CHUNK.value
+ and item.agent_name in {"Summarizer", "Inquirer"}
+ ):
+ content = _extract_content(item.payload)
+ if content:
+ messages.append(AIMessage(content=content))
+
+ logger.info(
+ "Loaded {} historical messages for conversation {}",
+ len(messages),
+ conversation_id,
+ )
+
+ except Exception as exc:
+ logger.warning(
+ "Failed to load conversation history for {}: {}. Starting with empty history.",
+ conversation_id,
+ exc,
+ )
+ messages = []
+
+ return messages
+
def _validate_execution_context(
self, context: ExecutionContext, user_id: str
) -> bool:
diff --git a/python/valuecell/core/event/buffer.py b/python/valuecell/core/event/buffer.py
index 28c295eb0..50d9d295f 100644
--- a/python/valuecell/core/event/buffer.py
+++ b/python/valuecell/core/event/buffer.py
@@ -32,8 +32,10 @@ class SaveItem:
metadata: Optional[ResponseMetadata] = None
-# conversation_id, thread_id, task_id, event
-BufferKey = Tuple[str, Optional[str], Optional[str], object]
+# conversation_id, thread_id, task_id, event, optional item_id (for REASONING), agent_name
+BufferKey = Tuple[
+ str, Optional[str], Optional[str], object, Optional[str], Optional[str]
+]
class BufferEntry:
@@ -114,26 +116,34 @@ def annotate(self, resp: BaseResponse) -> BaseResponse:
For REASONING events, if the caller has already set an item_id, it is
preserved to allow correlation of reasoning_started/reasoning/reasoning_completed.
- MESSAGE_CHUNK events always use the buffer to get a stable paragraph item_id.
+ The item_id is also included in the BufferKey to separate parallel tasks.
+ MESSAGE_CHUNK events use agent_name in the BufferKey to separate messages
+ from different agents (Planner, Critic, Summarizer, etc.).
"""
data: UnifiedResponseData = resp.data
ev = resp.event
if ev in self._buffered_events:
- # For REASONING events, trust the caller's item_id (set by orchestrator)
- # and skip buffer-based id assignment. MESSAGE_CHUNK always uses buffer.
- # TODO: consider when no item_id is set for REASONING, especially in remote agent calls
+ # For REASONING events with caller-provided item_id, include it in the key
+ # to ensure parallel tasks have separate buffers
+ buffer_item_id = None
if ev == StreamResponseEvent.REASONING and data.item_id:
- return resp
+ buffer_item_id = data.item_id
+
key: BufferKey = (
data.conversation_id,
data.thread_id,
data.task_id,
ev,
+ buffer_item_id,
+ data.agent_name, # Include agent_name to separate different message sources
)
entry = self._buffers.get(key)
if not entry:
- # Start a new paragraph buffer with a fresh paragraph item_id
- entry = BufferEntry(role=data.role, agent_name=data.agent_name)
+ # Start a new paragraph buffer with caller's item_id or fresh one
+ entry_item_id = buffer_item_id if buffer_item_id else None
+ entry = BufferEntry(
+ item_id=entry_item_id, role=data.role, agent_name=data.agent_name
+ )
self._buffers[key] = entry
if entry.agent_name is None and data.agent_name:
entry.agent_name = data.agent_name
@@ -173,13 +183,21 @@ def ingest(self, resp: BaseResponse) -> List[SaveItem]:
out.append(self._make_save_item_from_response(resp))
return out
- # Buffered: accumulate by (ctx + event)
+ # Buffered: accumulate by (ctx + event + optional item_id + agent_name)
if ev in self._buffered_events:
- key: BufferKey = (*ctx, ev)
+ # For REASONING events with item_id, include it in key to separate parallel tasks
+ buffer_item_id = None
+ if ev == StreamResponseEvent.REASONING and data.item_id:
+ buffer_item_id = data.item_id
+
+ key: BufferKey = (*ctx, ev, buffer_item_id, data.agent_name)
entry = self._buffers.get(key)
if not entry:
# If annotate() wasn't called, create an entry now.
- entry = BufferEntry(role=data.role, agent_name=data.agent_name)
+ entry_item_id = buffer_item_id if buffer_item_id else None
+ entry = BufferEntry(
+ item_id=entry_item_id, role=data.role, agent_name=data.agent_name
+ )
self._buffers[key] = entry
elif entry.agent_name is None and data.agent_name:
entry.agent_name = data.agent_name
@@ -225,7 +243,7 @@ def _collect_task_keys(
) -> List[BufferKey]:
keys: List[BufferKey] = []
for key in list(self._buffers.keys()):
- k_conv, k_thread, k_task, k_event = key
+ k_conv, k_thread, k_task, k_event, k_item_id, k_agent_name = key
if (
k_conv == conversation_id
and (thread_id is None or k_thread == thread_id)
@@ -246,7 +264,7 @@ def _finalize_keys(self, keys: List[BufferKey]) -> List[SaveItem]:
out.append(
SaveItem(
item_id=entry.item_id,
- event=key[3],
+ event=key[3], # event is at index 3
conversation_id=key[0],
thread_id=key[1],
task_id=key[2],