From 24bf2bc07af36a21967b52f487bf57c9cb0cea38 Mon Sep 17 00:00:00 2001 From: Harish Kotra Date: Thu, 9 Jan 2025 11:01:32 +0530 Subject: [PATCH] added a new page for node operators, restructured the navigation to merge install and uninstall pages --- .../agent-frameworks-and-apps/index.html | 4 +- build/1.0.0/category/creator-guide/index.html | 4 +- .../category/domain-operator-guide/index.html | 4 +- .../index.html | 4 +- build/1.0.0/category/how-do-i-/index.html | 4 +- .../1.0.0/category/knowledge-bases/index.html | 4 +- .../category/node-operator-guide/index.html | 4 +- build/1.0.0/category/tutorial/index.html | 4 +- build/1.0.0/category/user-guide/index.html | 4 +- .../creator-guide/finetune/intro/index.html | 4 +- .../finetune/llamacpp/index.html | 4 +- .../knowledge/concepts/index.html | 4 +- .../creator-guide/knowledge/csv/index.html | 4 +- .../knowledge/firecrawl/index.html | 4 +- .../knowledge/markdown/index.html | 4 +- .../creator-guide/knowledge/pdf/index.html | 4 +- .../creator-guide/knowledge/text/index.html | 4 +- .../knowledge/web-tool/index.html | 4 +- .../1.0.0/domain-guide/quick-start/index.html | 4 +- build/1.0.0/intro/index.html | 4 +- build/1.0.0/litepaper/index.html | 4 +- build/1.0.0/node-guide/cli-options/index.html | 4 +- build/1.0.0/node-guide/customize/index.html | 4 +- .../node-guide/install_uninstall/index.html | 4 +- build/1.0.0/node-guide/quick-start/index.html | 4 +- build/1.0.0/node-guide/register/index.html | 4 +- .../node-guide/system-requirements/index.html | 4 +- build/1.0.0/node-guide/tasks/aws/index.html | 4 +- build/1.0.0/node-guide/tasks/cuda/index.html | 4 +- .../1.0.0/node-guide/tasks/docker/index.html | 4 +- build/1.0.0/node-guide/tasks/local/index.html | 4 +- .../node-guide/tasks/multiple/index.html | 4 +- .../1.0.0/node-guide/tasks/protect/index.html | 4 +- .../node-guide/troubleshooting/index.html | 4 +- build/1.0.0/search-index.json | 2 +- build/1.0.0/tutorial/coinbase/index.html | 4 +- build/1.0.0/tutorial/eliza/index.html | 4 +- build/1.0.0/tutorial/tool-call/index.html | 4 +- .../tutorial/translator-agent/index.html | 4 +- .../1.0.0/user-guide/api-reference/index.html | 4 +- .../user-guide/apps/agent-zero/index.html | 4 +- .../user-guide/apps/anything_llm/index.html | 4 +- .../1.0.0/user-guide/apps/codegpt/index.html | 4 +- .../1.0.0/user-guide/apps/continue/index.html | 4 +- build/1.0.0/user-guide/apps/cursor/index.html | 4 +- build/1.0.0/user-guide/apps/dify/index.html | 4 +- .../apps/flowiseai-tool-call/index.html | 4 +- .../user-guide/apps/flowiseai/index.html | 4 +- .../user-guide/apps/gpt-planner/index.html | 4 +- build/1.0.0/user-guide/apps/intro/index.html | 4 +- .../user-guide/apps/llamacoder/index.html | 4 +- .../user-guide/apps/llamaedgebook/index.html | 4 +- .../user-guide/apps/llamaparse/index.html | 4 +- .../user-guide/apps/llamatutor/index.html | 4 +- .../1.0.0/user-guide/apps/lobechat/index.html | 4 +- .../1.0.0/user-guide/apps/obsidian/index.html | 4 +- .../user-guide/apps/openwebui/index.html | 4 +- .../1.0.0/user-guide/apps/stockbot/index.html | 4 +- .../apps/translation-agent/index.html | 4 +- build/1.0.0/user-guide/apps/zed/index.html | 4 +- build/1.0.0/user-guide/mynode/index.html | 4 +- build/1.0.0/user-guide/nodes/index.html | 4 +- build/404.html | 4 +- .../agent-integrations/agent-zero/index.html | 6 +-- .../anything_llm/index.html | 6 +-- build/agent-integrations/codegpt/index.html | 6 +-- build/agent-integrations/continue/index.html | 6 +-- build/agent-integrations/cursor/index.html | 6 +-- build/agent-integrations/dify/index.html | 6 +-- .../flowiseai-tool-call/index.html | 6 +-- build/agent-integrations/flowiseai/index.html | 6 +-- .../agent-integrations/gpt-planner/index.html | 6 +-- build/agent-integrations/index.html | 6 +-- build/agent-integrations/intro/index.html | 6 +-- .../agent-integrations/llamacoder/index.html | 6 +-- .../llamaedgebook/index.html | 6 +-- .../agent-integrations/llamaparse/index.html | 6 +-- .../agent-integrations/llamatutor/index.html | 6 +-- build/agent-integrations/lobechat/index.html | 6 +-- build/agent-integrations/obsidian/index.html | 6 +-- build/agent-integrations/openwebui/index.html | 6 +-- build/agent-integrations/stockbot/index.html | 6 +-- .../translation-agent/index.html | 6 +-- build/agent-integrations/zed/index.html | 6 +-- build/assets/js/0e384e19.10d71f54.js | 1 - build/assets/js/0e384e19.a778aabe.js | 1 + build/assets/js/22dd74f7.0991a1e6.js | 1 + build/assets/js/22dd74f7.2a9f9a5e.js | 1 - build/assets/js/2a2a0c40.93de8b12.js | 1 + build/assets/js/2a2a0c40.e8fec0bf.js | 1 - build/assets/js/69e61f52.29367845.js | 1 - build/assets/js/69e61f52.4b0d5810.js | 1 + build/assets/js/a4285c2a.9b02c89e.js | 1 - build/assets/js/d737b437.5d58a880.js | 1 - build/assets/js/d737b437.c3aa8eba.js | 1 + build/assets/js/d7c32174.14ef84f4.js | 1 + build/assets/js/d7c32174.866dcd49.js | 1 - build/assets/js/ffb8877b.85ee165a.js | 1 + build/assets/js/ffb8877b.9f08552a.js | 1 - build/assets/js/main.8cf9f2f7.js | 2 - build/assets/js/main.e9b7aa74.js | 2 + ...CENSE.txt => main.e9b7aa74.js.LICENSE.txt} | 0 build/assets/js/runtime~main.85113ed9.js | 1 + build/assets/js/runtime~main.a303bb81.js | 1 - .../advanced-deployment-options/index.html | 6 +-- build/domain-guide/quick-start/index.html | 6 +-- build/domain-operator-guide/index.html | 6 +-- build/faqs/index.html | 6 +-- .../aws/index.html | 6 +-- .../cuda/index.html | 6 +-- .../docker/index.html | 6 +-- .../local/index.html | 6 +-- .../multiple/index.html | 6 +-- .../protect/index.html | 6 +-- .../getting-started/api-reference/index.html | 6 +-- .../getting-started/authentication/index.html | 6 +-- build/getting-started/cli-options/index.html | 6 +-- build/getting-started/customize/index.html | 6 +-- build/getting-started/index.html | 8 +-- build/getting-started/install/index.html | 18 +++++-- build/getting-started/mynode/index.html | 6 +-- build/getting-started/quick-start/index.html | 6 +-- build/getting-started/register/index.html | 6 +-- .../system-requirements/index.html | 8 +-- .../troubleshooting/index.html | 8 +-- build/getting-started/uninstall/index.html | 37 ------------- .../getting-started/what-is-a-node/index.html | 8 +-- build/guides-and-tutorials/index.html | 6 +-- build/index.html | 4 +- build/intro/index.html | 8 +-- build/knowledge-bases/how-to/csv/index.html | 6 +-- .../how-to/firecrawl/index.html | 6 +-- build/knowledge-bases/how-to/index.html | 6 +-- .../how-to/markdown/index.html | 6 +-- build/knowledge-bases/how-to/pdf/index.html | 6 +-- build/knowledge-bases/how-to/text/index.html | 6 +-- build/knowledge-bases/index.html | 6 +-- build/knowledge-bases/intro/index.html | 6 +-- build/knowledge-bases/web-tool/index.html | 6 +-- build/litepaper/index.html | 6 +-- build/markdown-page/index.html | 4 +- build/nodes/index.html | 6 +-- build/search-index.json | 2 +- build/search/index.html | 4 +- build/sitemap.xml | 2 +- build/tutorial/coinbase/index.html | 6 +-- build/tutorial/concepts/index.html | 6 +-- build/tutorial/eliza/index.html | 6 +-- build/tutorial/llamacpp/index.html | 6 +-- .../prompt-engineering-tool/index.html | 6 +-- build/tutorial/tool-call/index.html | 6 +-- build/tutorial/translator-agent/index.html | 6 +-- docs/getting-started/_category_.json | 2 +- docs/getting-started/install.md | 18 ++++++- docs/getting-started/uninstall.md | 31 ----------- docs/node-operators.md | 52 +++++++++++++++++++ src/css/custom.css | 2 +- 157 files changed, 418 insertions(+), 411 deletions(-) delete mode 100644 build/assets/js/0e384e19.10d71f54.js create mode 100644 build/assets/js/0e384e19.a778aabe.js create mode 100644 build/assets/js/22dd74f7.0991a1e6.js delete mode 100644 build/assets/js/22dd74f7.2a9f9a5e.js create mode 100644 build/assets/js/2a2a0c40.93de8b12.js delete mode 100644 build/assets/js/2a2a0c40.e8fec0bf.js delete mode 100644 build/assets/js/69e61f52.29367845.js create mode 100644 build/assets/js/69e61f52.4b0d5810.js delete mode 100644 build/assets/js/a4285c2a.9b02c89e.js delete mode 100644 build/assets/js/d737b437.5d58a880.js create mode 100644 build/assets/js/d737b437.c3aa8eba.js create mode 100644 build/assets/js/d7c32174.14ef84f4.js delete mode 100644 build/assets/js/d7c32174.866dcd49.js create mode 100644 build/assets/js/ffb8877b.85ee165a.js delete mode 100644 build/assets/js/ffb8877b.9f08552a.js delete mode 100644 build/assets/js/main.8cf9f2f7.js create mode 100644 build/assets/js/main.e9b7aa74.js rename build/assets/js/{main.8cf9f2f7.js.LICENSE.txt => main.e9b7aa74.js.LICENSE.txt} (100%) create mode 100644 build/assets/js/runtime~main.85113ed9.js delete mode 100644 build/assets/js/runtime~main.a303bb81.js delete mode 100644 build/getting-started/uninstall/index.html delete mode 100644 docs/getting-started/uninstall.md create mode 100644 docs/node-operators.md diff --git a/build/1.0.0/category/agent-frameworks-and-apps/index.html b/build/1.0.0/category/agent-frameworks-and-apps/index.html index 3e35863..dae5a7d 100644 --- a/build/1.0.0/category/agent-frameworks-and-apps/index.html +++ b/build/1.0.0/category/agent-frameworks-and-apps/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/category/creator-guide/index.html b/build/1.0.0/category/creator-guide/index.html index 907af45..570ad35 100644 --- a/build/1.0.0/category/creator-guide/index.html +++ b/build/1.0.0/category/creator-guide/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/category/domain-operator-guide/index.html b/build/1.0.0/category/domain-operator-guide/index.html index 88d8aac..173464f 100644 --- a/build/1.0.0/category/domain-operator-guide/index.html +++ b/build/1.0.0/category/domain-operator-guide/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/category/gaianet-node-with-finetuned-llms/index.html b/build/1.0.0/category/gaianet-node-with-finetuned-llms/index.html index 244af70..3b9be1d 100644 --- a/build/1.0.0/category/gaianet-node-with-finetuned-llms/index.html +++ b/build/1.0.0/category/gaianet-node-with-finetuned-llms/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/category/how-do-i-/index.html b/build/1.0.0/category/how-do-i-/index.html index 188b210..0e7a2e5 100644 --- a/build/1.0.0/category/how-do-i-/index.html +++ b/build/1.0.0/category/how-do-i-/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/category/knowledge-bases/index.html b/build/1.0.0/category/knowledge-bases/index.html index b6e3a57..ff503d3 100644 --- a/build/1.0.0/category/knowledge-bases/index.html +++ b/build/1.0.0/category/knowledge-bases/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/category/node-operator-guide/index.html b/build/1.0.0/category/node-operator-guide/index.html index 7586513..e305eb2 100644 --- a/build/1.0.0/category/node-operator-guide/index.html +++ b/build/1.0.0/category/node-operator-guide/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/category/tutorial/index.html b/build/1.0.0/category/tutorial/index.html index 874d7cc..c2e1d93 100644 --- a/build/1.0.0/category/tutorial/index.html +++ b/build/1.0.0/category/tutorial/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/category/user-guide/index.html b/build/1.0.0/category/user-guide/index.html index e3b0f9b..c35abbf 100644 --- a/build/1.0.0/category/user-guide/index.html +++ b/build/1.0.0/category/user-guide/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/creator-guide/finetune/intro/index.html b/build/1.0.0/creator-guide/finetune/intro/index.html index b399016..a476cfc 100644 --- a/build/1.0.0/creator-guide/finetune/intro/index.html +++ b/build/1.0.0/creator-guide/finetune/intro/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/creator-guide/finetune/llamacpp/index.html b/build/1.0.0/creator-guide/finetune/llamacpp/index.html index 8f89ffa..19165e5 100644 --- a/build/1.0.0/creator-guide/finetune/llamacpp/index.html +++ b/build/1.0.0/creator-guide/finetune/llamacpp/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/creator-guide/knowledge/concepts/index.html b/build/1.0.0/creator-guide/knowledge/concepts/index.html index 3258e96..ec770d1 100644 --- a/build/1.0.0/creator-guide/knowledge/concepts/index.html +++ b/build/1.0.0/creator-guide/knowledge/concepts/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/creator-guide/knowledge/csv/index.html b/build/1.0.0/creator-guide/knowledge/csv/index.html index 2bb464e..2059ebb 100644 --- a/build/1.0.0/creator-guide/knowledge/csv/index.html +++ b/build/1.0.0/creator-guide/knowledge/csv/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/creator-guide/knowledge/firecrawl/index.html b/build/1.0.0/creator-guide/knowledge/firecrawl/index.html index 906ea1a..4a37270 100644 --- a/build/1.0.0/creator-guide/knowledge/firecrawl/index.html +++ b/build/1.0.0/creator-guide/knowledge/firecrawl/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/creator-guide/knowledge/markdown/index.html b/build/1.0.0/creator-guide/knowledge/markdown/index.html index 9b4d0f1..6c02275 100644 --- a/build/1.0.0/creator-guide/knowledge/markdown/index.html +++ b/build/1.0.0/creator-guide/knowledge/markdown/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/creator-guide/knowledge/pdf/index.html b/build/1.0.0/creator-guide/knowledge/pdf/index.html index 6805699..6efd6aa 100644 --- a/build/1.0.0/creator-guide/knowledge/pdf/index.html +++ b/build/1.0.0/creator-guide/knowledge/pdf/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/creator-guide/knowledge/text/index.html b/build/1.0.0/creator-guide/knowledge/text/index.html index ff78e08..30aa276 100644 --- a/build/1.0.0/creator-guide/knowledge/text/index.html +++ b/build/1.0.0/creator-guide/knowledge/text/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/creator-guide/knowledge/web-tool/index.html b/build/1.0.0/creator-guide/knowledge/web-tool/index.html index 0a91e80..e0ffa92 100644 --- a/build/1.0.0/creator-guide/knowledge/web-tool/index.html +++ b/build/1.0.0/creator-guide/knowledge/web-tool/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/domain-guide/quick-start/index.html b/build/1.0.0/domain-guide/quick-start/index.html index 3fb41be..c26d8e8 100644 --- a/build/1.0.0/domain-guide/quick-start/index.html +++ b/build/1.0.0/domain-guide/quick-start/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/intro/index.html b/build/1.0.0/intro/index.html index 642ee42..b56d2f2 100644 --- a/build/1.0.0/intro/index.html +++ b/build/1.0.0/intro/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/litepaper/index.html b/build/1.0.0/litepaper/index.html index a9cc10c..f4f4ec5 100644 --- a/build/1.0.0/litepaper/index.html +++ b/build/1.0.0/litepaper/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/node-guide/cli-options/index.html b/build/1.0.0/node-guide/cli-options/index.html index e07b584..262f982 100644 --- a/build/1.0.0/node-guide/cli-options/index.html +++ b/build/1.0.0/node-guide/cli-options/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/node-guide/customize/index.html b/build/1.0.0/node-guide/customize/index.html index 56b4002..3a9d41f 100644 --- a/build/1.0.0/node-guide/customize/index.html +++ b/build/1.0.0/node-guide/customize/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/node-guide/install_uninstall/index.html b/build/1.0.0/node-guide/install_uninstall/index.html index 28589b5..2124e6a 100644 --- a/build/1.0.0/node-guide/install_uninstall/index.html +++ b/build/1.0.0/node-guide/install_uninstall/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/node-guide/quick-start/index.html b/build/1.0.0/node-guide/quick-start/index.html index ae0bd4d..38f1d1a 100644 --- a/build/1.0.0/node-guide/quick-start/index.html +++ b/build/1.0.0/node-guide/quick-start/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/node-guide/register/index.html b/build/1.0.0/node-guide/register/index.html index 23c1d2d..75c0c2d 100644 --- a/build/1.0.0/node-guide/register/index.html +++ b/build/1.0.0/node-guide/register/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/node-guide/system-requirements/index.html b/build/1.0.0/node-guide/system-requirements/index.html index c151551..aa7ce01 100644 --- a/build/1.0.0/node-guide/system-requirements/index.html +++ b/build/1.0.0/node-guide/system-requirements/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/node-guide/tasks/aws/index.html b/build/1.0.0/node-guide/tasks/aws/index.html index 4c1590f..4d7bd4e 100644 --- a/build/1.0.0/node-guide/tasks/aws/index.html +++ b/build/1.0.0/node-guide/tasks/aws/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/node-guide/tasks/cuda/index.html b/build/1.0.0/node-guide/tasks/cuda/index.html index ccffe00..1f7bcd6 100644 --- a/build/1.0.0/node-guide/tasks/cuda/index.html +++ b/build/1.0.0/node-guide/tasks/cuda/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/node-guide/tasks/docker/index.html b/build/1.0.0/node-guide/tasks/docker/index.html index dcddc77..97f7cfe 100644 --- a/build/1.0.0/node-guide/tasks/docker/index.html +++ b/build/1.0.0/node-guide/tasks/docker/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/node-guide/tasks/local/index.html b/build/1.0.0/node-guide/tasks/local/index.html index b9e0a7d..21b1f0e 100644 --- a/build/1.0.0/node-guide/tasks/local/index.html +++ b/build/1.0.0/node-guide/tasks/local/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/node-guide/tasks/multiple/index.html b/build/1.0.0/node-guide/tasks/multiple/index.html index fe43517..482b670 100644 --- a/build/1.0.0/node-guide/tasks/multiple/index.html +++ b/build/1.0.0/node-guide/tasks/multiple/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/node-guide/tasks/protect/index.html b/build/1.0.0/node-guide/tasks/protect/index.html index b7ba9a6..098aa85 100644 --- a/build/1.0.0/node-guide/tasks/protect/index.html +++ b/build/1.0.0/node-guide/tasks/protect/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/node-guide/troubleshooting/index.html b/build/1.0.0/node-guide/troubleshooting/index.html index 003ddd7..a5c7b48 100644 --- a/build/1.0.0/node-guide/troubleshooting/index.html +++ b/build/1.0.0/node-guide/troubleshooting/index.html @@ -19,8 +19,8 @@ - - + + diff --git a/build/1.0.0/search-index.json b/build/1.0.0/search-index.json index 30e1e87..c94767a 100644 --- a/build/1.0.0/search-index.json +++ b/build/1.0.0/search-index.json @@ -1 +1 @@ -[{"documents":[{"i":555,"t":"Gaia nodes with long-term knowledge","u":"/1.0.0/creator-guide/knowledge/concepts","b":["Creator Guide","Knowledge bases"]},{"i":567,"t":"Knowledge base from source / summary pairs","u":"/1.0.0/creator-guide/knowledge/csv","b":["Creator Guide","Knowledge bases"]},{"i":581,"t":"llama.cpp","u":"/1.0.0/creator-guide/finetune/llamacpp","b":["Creator Guide","GaiaNet Node with finetuned LLMs"]},{"i":593,"t":"Fine-tune LLMs","u":"/1.0.0/creator-guide/finetune/intro","b":["Creator Guide","GaiaNet Node with finetuned LLMs"]},{"i":595,"t":"Knowledge base from a URL","u":"/1.0.0/creator-guide/knowledge/firecrawl","b":["Creator Guide","Knowledge bases"]},{"i":601,"t":"Knowledge base from a markdown file","u":"/1.0.0/creator-guide/knowledge/markdown","b":["Creator Guide","Knowledge bases"]},{"i":615,"t":"Knowledge base from a plain text file","u":"/1.0.0/creator-guide/knowledge/text","b":["Creator Guide","Knowledge bases"]},{"i":629,"t":"Knowledge base from a PDF file","u":"/1.0.0/creator-guide/knowledge/pdf","b":["Creator Guide","Knowledge bases"]},{"i":638,"t":"Quick Start with Launching Gaia Domain","u":"/1.0.0/domain-guide/quick-start","b":["Domain Operator Guide"]},{"i":642,"t":"GaiaNet: GenAI Agent Network","u":"/1.0.0/litepaper","b":[]},{"i":659,"t":"Customize Your GaiaNet Node","u":"/1.0.0/node-guide/customize","b":["Node Operator Guide"]},{"i":673,"t":"GaiaNet CLI options","u":"/1.0.0/node-guide/cli-options","b":["Node Operator Guide"]},{"i":689,"t":"Quick start with GaiaNet Node","u":"/1.0.0/node-guide/quick-start","b":["Node Operator Guide"]},{"i":697,"t":"Install and uninstall","u":"/1.0.0/node-guide/install_uninstall","b":["Node Operator Guide"]},{"i":713,"t":"Joining the Gaia Protocol","u":"/1.0.0/node-guide/register","b":["Node Operator Guide"]},{"i":727,"t":"System requirements","u":"/1.0.0/node-guide/system-requirements","b":["Node Operator Guide"]},{"i":737,"t":"Start a node on AWS using AMI images","u":"/1.0.0/node-guide/tasks/aws","b":["Node Operator Guide","How do I ..."]},{"i":743,"t":"Start a node with Docker","u":"/1.0.0/node-guide/tasks/docker","b":["Node Operator Guide","How do I ..."]},{"i":755,"t":"Protect the server process","u":"/1.0.0/node-guide/tasks/protect","b":["Node Operator Guide","How do I ..."]},{"i":761,"t":"Install multiple nodes on a single machine","u":"/1.0.0/node-guide/tasks/multiple","b":["Node Operator Guide","How do I ..."]},{"i":763,"t":"Run a local-only node","u":"/1.0.0/node-guide/tasks/local","b":["Node Operator Guide","How do I ..."]},{"i":765,"t":"Install CUDA on Linux","u":"/1.0.0/node-guide/tasks/cuda","b":["Node Operator Guide","How do I ..."]},{"i":774,"t":"Troubleshooting","u":"/1.0.0/node-guide/troubleshooting","b":["Node Operator Guide"]},{"i":793,"t":"Working with Coinbase AgentKit","u":"/1.0.0/tutorial/coinbase","b":["Tutorial"]},{"i":799,"t":"Working with eliza","u":"/1.0.0/tutorial/eliza","b":["Tutorial"]},{"i":805,"t":"Agent Zero","u":"/1.0.0/user-guide/apps/agent-zero","b":["User Guide","Agent frameworks and apps"]},{"i":823,"t":"Agentic translation on GaiaNet","u":"/1.0.0/tutorial/translator-agent","b":["Tutorial"]},{"i":849,"t":"Anything LLM","u":"/1.0.0/user-guide/apps/anything_llm","b":["User Guide","Agent frameworks and apps"]},{"i":853,"t":"Calling external tools","u":"/1.0.0/tutorial/tool-call","b":["Tutorial"]},{"i":863,"t":"CodeGPT","u":"/1.0.0/user-guide/apps/codegpt","b":["User Guide","Agent frameworks and apps"]},{"i":873,"t":"AI coding assistant: Continue","u":"/1.0.0/user-guide/apps/continue","b":["User Guide","Agent frameworks and apps"]},{"i":883,"t":"Dify + GaiaNet","u":"/1.0.0/user-guide/apps/dify","b":["User Guide","Agent frameworks and apps"]},{"i":887,"t":"Cursor AI IDE","u":"/1.0.0/user-guide/apps/cursor","b":["User Guide","Agent frameworks and apps"]},{"i":895,"t":"API Reference","u":"/1.0.0/user-guide/api-reference","b":["User Guide"]},{"i":911,"t":"FlowiseAI tool call","u":"/1.0.0/user-guide/apps/flowiseai-tool-call","b":["User Guide","Agent frameworks and apps"]},{"i":921,"t":"LlamaCoder","u":"/1.0.0/user-guide/apps/llamacoder","b":["User Guide","Agent frameworks and apps"]},{"i":927,"t":"OpenAI ecosystem apps","u":"/1.0.0/user-guide/apps/intro","b":["User Guide","Agent frameworks and apps"]},{"i":933,"t":"LlamaEdgeBook","u":"/1.0.0/user-guide/apps/llamaedgebook","b":["User Guide","Agent frameworks and apps"]},{"i":937,"t":"LlamaTutor","u":"/1.0.0/user-guide/apps/llamatutor","b":["User Guide","Agent frameworks and apps"]},{"i":943,"t":"LobeChat","u":"/1.0.0/user-guide/apps/lobechat","b":["User Guide","Agent frameworks and apps"]},{"i":947,"t":"Open WebUI","u":"/1.0.0/user-guide/apps/openwebui","b":["User Guide","Agent frameworks and apps"]},{"i":957,"t":"Obsidian","u":"/1.0.0/user-guide/apps/obsidian","b":["User Guide","Agent frameworks and apps"]},{"i":982,"t":"Stockbot","u":"/1.0.0/user-guide/apps/stockbot","b":["User Guide","Agent frameworks and apps"]},{"i":988,"t":"Translation Agent + GaiaNet","u":"/1.0.0/user-guide/apps/translation-agent","b":["User Guide","Agent frameworks and apps"]},{"i":996,"t":"Use my GaiaNet node","u":"/1.0.0/user-guide/mynode","b":["User Guide"]},{"i":1002,"t":"Zed","u":"/1.0.0/user-guide/apps/zed","b":["User Guide","Agent frameworks and apps"]},{"i":1010,"t":"Public GaiaNet nodes","u":"/1.0.0/user-guide/nodes","b":["User Guide"]},{"i":1033,"t":"LlamaCloud","u":"/1.0.0/user-guide/apps/llamaparse","b":["User Guide","Agent frameworks and apps"]},{"i":1039,"t":"Build a knowledge base using Gaia web tool","u":"/1.0.0/creator-guide/knowledge/web-tool","b":["Creator Guide","Knowledge bases"]},{"i":1047,"t":"A planning agent","u":"/1.0.0/user-guide/apps/gpt-planner","b":["User Guide","Agent frameworks and apps"]},{"i":1053,"t":"FlowiseAI RAG chat","u":"/1.0.0/user-guide/apps/flowiseai","b":["User Guide","Agent frameworks and apps"]},{"i":1073,"t":"Overview","u":"/1.0.0/intro","b":[]}],"index":{"version":"2.3.9","fields":["t"],"fieldVectors":[["t/555",[0,1.869,1,1.303,2,2.702,3,2.702,4,1.482]],["t/567",[4,1.482,5,1.59,6,2.702,7,2.702,8,2.702]],["t/581",[9,4.04,10,4.04]],["t/593",[11,3.468,12,3.468,13,2.971]],["t/595",[4,1.902,5,2.041,14,3.468]],["t/601",[4,1.666,5,1.788,15,3.037,16,2.315]],["t/615",[4,1.482,5,1.59,16,2.06,17,2.702,18,2.702]],["t/629",[4,1.666,5,1.788,16,2.315,19,3.037]],["t/638",[0,1.869,20,2.315,21,1.869,22,2.702,23,2.702]],["t/642",[24,1.465,25,3.037,26,1.93,27,3.037]],["t/659",[1,1.672,24,1.672,28,3.468]],["t/673",[24,1.672,29,3.468,30,3.468]],["t/689",[1,1.465,20,2.602,21,2.101,24,1.465]],["t/697",[31,3.08,32,4.04]],["t/713",[0,2.399,33,3.468,34,3.468]],["t/727",[35,4.04,36,4.04]],["t/737",[1,1.173,21,1.683,37,2.433,38,1.855,39,2.433,40,2.433]],["t/743",[1,1.672,21,2.399,41,3.468]],["t/755",[42,3.468,43,3.468,44,3.468]],["t/761",[1,1.303,31,2.06,45,2.702,46,2.702,47,2.702]],["t/763",[1,1.672,48,3.468,49,3.468]],["t/765",[31,2.643,50,3.468,51,3.468]],["t/774",[52,4.839]],["t/793",[53,2.971,54,3.468,55,3.468]],["t/799",[53,3.461,56,4.04]],["t/805",[26,2.568,57,4.04]],["t/823",[24,1.672,26,2.204,58,2.971]],["t/849",[13,3.461,59,4.04]],["t/853",[60,2.971,61,3.468,62,2.643]],["t/863",[63,4.839]],["t/873",[64,2.602,65,3.037,66,3.037,67,3.037]],["t/883",[24,1.948,68,4.04]],["t/887",[64,2.971,69,3.468,70,3.468]],["t/895",[71,4.04,72,4.04]],["t/911",[60,2.971,62,2.643,73,2.971]],["t/921",[74,4.839]],["t/927",[75,3.468,76,3.468,77,3.468]],["t/933",[78,4.839]],["t/937",[79,4.839]],["t/943",[80,4.839]],["t/947",[81,4.04,82,4.04]],["t/957",[83,4.839]],["t/982",[84,4.839]],["t/988",[24,1.672,26,2.204,58,2.971]],["t/996",[1,1.672,24,1.672,38,2.643]],["t/1002",[85,4.839]],["t/1010",[1,1.672,24,1.672,86,3.468]],["t/1033",[87,4.839]],["t/1039",[0,1.531,4,1.214,5,1.303,38,1.687,62,1.687,88,2.213,89,2.213]],["t/1047",[26,2.568,90,4.04]],["t/1053",[73,2.971,91,3.468,92,3.468]],["t/1073",[93,4.839]]],"invertedIndex":[["agent",{"_index":26,"t":{"642":{"position":[[15,5]]},"805":{"position":[[0,5]]},"823":{"position":[[0,7]]},"988":{"position":[[12,5]]},"1047":{"position":[[11,5]]}}}],["agentkit",{"_index":55,"t":{"793":{"position":[[22,8]]}}}],["ai",{"_index":64,"t":{"873":{"position":[[0,2]]},"887":{"position":[[7,2]]}}}],["ami",{"_index":39,"t":{"737":{"position":[[26,3]]}}}],["anyth",{"_index":59,"t":{"849":{"position":[[0,8]]}}}],["api",{"_index":71,"t":{"895":{"position":[[0,3]]}}}],["app",{"_index":77,"t":{"927":{"position":[[17,4]]}}}],["assist",{"_index":66,"t":{"873":{"position":[[10,9]]}}}],["aw",{"_index":37,"t":{"737":{"position":[[16,3]]}}}],["base",{"_index":5,"t":{"567":{"position":[[10,4]]},"595":{"position":[[10,4]]},"601":{"position":[[10,4]]},"615":{"position":[[10,4]]},"629":{"position":[[10,4]]},"1039":{"position":[[18,4]]}}}],["build",{"_index":88,"t":{"1039":{"position":[[0,5]]}}}],["call",{"_index":60,"t":{"853":{"position":[[0,7]]},"911":{"position":[[15,4]]}}}],["chat",{"_index":92,"t":{"1053":{"position":[[14,4]]}}}],["cli",{"_index":29,"t":{"673":{"position":[[8,3]]}}}],["code",{"_index":65,"t":{"873":{"position":[[3,6]]}}}],["codegpt",{"_index":63,"t":{"863":{"position":[[0,7]]}}}],["coinbas",{"_index":54,"t":{"793":{"position":[[13,8]]}}}],["continu",{"_index":67,"t":{"873":{"position":[[21,8]]}}}],["cpp",{"_index":10,"t":{"581":{"position":[[6,3]]}}}],["cuda",{"_index":50,"t":{"765":{"position":[[8,4]]}}}],["cursor",{"_index":69,"t":{"887":{"position":[[0,6]]}}}],["custom",{"_index":28,"t":{"659":{"position":[[0,9]]}}}],["difi",{"_index":68,"t":{"883":{"position":[[0,4]]}}}],["docker",{"_index":41,"t":{"743":{"position":[[18,6]]}}}],["domain",{"_index":23,"t":{"638":{"position":[[32,6]]}}}],["ecosystem",{"_index":76,"t":{"927":{"position":[[7,9]]}}}],["eliza",{"_index":56,"t":{"799":{"position":[[13,5]]}}}],["extern",{"_index":61,"t":{"853":{"position":[[8,8]]}}}],["file",{"_index":16,"t":{"601":{"position":[[31,4]]},"615":{"position":[[33,4]]},"629":{"position":[[26,4]]}}}],["fine",{"_index":11,"t":{"593":{"position":[[0,4]]}}}],["flowiseai",{"_index":73,"t":{"911":{"position":[[0,9]]},"1053":{"position":[[0,9]]}}}],["gaia",{"_index":0,"t":{"555":{"position":[[0,4]]},"638":{"position":[[27,4]]},"713":{"position":[[12,4]]},"1039":{"position":[[29,4]]}}}],["gaianet",{"_index":24,"t":{"642":{"position":[[0,7]]},"659":{"position":[[15,7]]},"673":{"position":[[0,7]]},"689":{"position":[[17,7]]},"823":{"position":[[23,7]]},"883":{"position":[[7,7]]},"988":{"position":[[21,7]]},"996":{"position":[[7,7]]},"1010":{"position":[[7,7]]}}}],["genai",{"_index":25,"t":{"642":{"position":[[9,5]]}}}],["id",{"_index":70,"t":{"887":{"position":[[10,3]]}}}],["imag",{"_index":40,"t":{"737":{"position":[[30,6]]}}}],["instal",{"_index":31,"t":{"697":{"position":[[0,7]]},"761":{"position":[[0,7]]},"765":{"position":[[0,7]]}}}],["join",{"_index":33,"t":{"713":{"position":[[0,7]]}}}],["knowledg",{"_index":4,"t":{"555":{"position":[[26,9]]},"567":{"position":[[0,9]]},"595":{"position":[[0,9]]},"601":{"position":[[0,9]]},"615":{"position":[[0,9]]},"629":{"position":[[0,9]]},"1039":{"position":[[8,9]]}}}],["launch",{"_index":22,"t":{"638":{"position":[[17,9]]}}}],["linux",{"_index":51,"t":{"765":{"position":[[16,5]]}}}],["llama",{"_index":9,"t":{"581":{"position":[[0,5]]}}}],["llamacloud",{"_index":87,"t":{"1033":{"position":[[0,10]]}}}],["llamacod",{"_index":74,"t":{"921":{"position":[[0,10]]}}}],["llamaedgebook",{"_index":78,"t":{"933":{"position":[[0,13]]}}}],["llamatutor",{"_index":79,"t":{"937":{"position":[[0,10]]}}}],["llm",{"_index":13,"t":{"593":{"position":[[10,4]]},"849":{"position":[[9,3]]}}}],["lobechat",{"_index":80,"t":{"943":{"position":[[0,8]]}}}],["local",{"_index":49,"t":{"763":{"position":[[6,5]]}}}],["long",{"_index":2,"t":{"555":{"position":[[16,4]]}}}],["machin",{"_index":47,"t":{"761":{"position":[[35,7]]}}}],["markdown",{"_index":15,"t":{"601":{"position":[[22,8]]}}}],["multipl",{"_index":45,"t":{"761":{"position":[[8,8]]}}}],["network",{"_index":27,"t":{"642":{"position":[[21,7]]}}}],["node",{"_index":1,"t":{"555":{"position":[[5,5]]},"659":{"position":[[23,4]]},"689":{"position":[[25,4]]},"737":{"position":[[8,4]]},"743":{"position":[[8,4]]},"761":{"position":[[17,5]]},"763":{"position":[[17,4]]},"996":{"position":[[15,4]]},"1010":{"position":[[15,5]]}}}],["obsidian",{"_index":83,"t":{"957":{"position":[[0,8]]}}}],["open",{"_index":81,"t":{"947":{"position":[[0,4]]}}}],["openai",{"_index":75,"t":{"927":{"position":[[0,6]]}}}],["option",{"_index":30,"t":{"673":{"position":[[12,7]]}}}],["overview",{"_index":93,"t":{"1073":{"position":[[0,8]]}}}],["pair",{"_index":8,"t":{"567":{"position":[[37,5]]}}}],["pdf",{"_index":19,"t":{"629":{"position":[[22,3]]}}}],["plain",{"_index":17,"t":{"615":{"position":[[22,5]]}}}],["plan",{"_index":90,"t":{"1047":{"position":[[2,8]]}}}],["process",{"_index":44,"t":{"755":{"position":[[19,7]]}}}],["protect",{"_index":42,"t":{"755":{"position":[[0,7]]}}}],["protocol",{"_index":34,"t":{"713":{"position":[[17,8]]}}}],["public",{"_index":86,"t":{"1010":{"position":[[0,6]]}}}],["quick",{"_index":20,"t":{"638":{"position":[[0,5]]},"689":{"position":[[0,5]]}}}],["rag",{"_index":91,"t":{"1053":{"position":[[10,3]]}}}],["refer",{"_index":72,"t":{"895":{"position":[[4,9]]}}}],["requir",{"_index":36,"t":{"727":{"position":[[7,12]]}}}],["run",{"_index":48,"t":{"763":{"position":[[0,3]]}}}],["server",{"_index":43,"t":{"755":{"position":[[12,6]]}}}],["singl",{"_index":46,"t":{"761":{"position":[[28,6]]}}}],["sourc",{"_index":6,"t":{"567":{"position":[[20,6]]}}}],["start",{"_index":21,"t":{"638":{"position":[[6,5]]},"689":{"position":[[6,5]]},"737":{"position":[[0,5]]},"743":{"position":[[0,5]]}}}],["stockbot",{"_index":84,"t":{"982":{"position":[[0,8]]}}}],["summari",{"_index":7,"t":{"567":{"position":[[29,7]]}}}],["system",{"_index":35,"t":{"727":{"position":[[0,6]]}}}],["term",{"_index":3,"t":{"555":{"position":[[21,4]]}}}],["text",{"_index":18,"t":{"615":{"position":[[28,4]]}}}],["tool",{"_index":62,"t":{"853":{"position":[[17,5]]},"911":{"position":[[10,4]]},"1039":{"position":[[38,4]]}}}],["translat",{"_index":58,"t":{"823":{"position":[[8,11]]},"988":{"position":[[0,11]]}}}],["troubleshoot",{"_index":52,"t":{"774":{"position":[[0,15]]}}}],["tune",{"_index":12,"t":{"593":{"position":[[5,4]]}}}],["uninstal",{"_index":32,"t":{"697":{"position":[[12,9]]}}}],["url",{"_index":14,"t":{"595":{"position":[[22,3]]}}}],["us",{"_index":38,"t":{"737":{"position":[[20,5]]},"996":{"position":[[0,3]]},"1039":{"position":[[23,5]]}}}],["web",{"_index":89,"t":{"1039":{"position":[[34,3]]}}}],["webui",{"_index":82,"t":{"947":{"position":[[5,5]]}}}],["work",{"_index":53,"t":{"793":{"position":[[0,7]]},"799":{"position":[[0,7]]}}}],["zed",{"_index":85,"t":{"1002":{"position":[[0,3]]}}}],["zero",{"_index":57,"t":{"805":{"position":[[6,4]]}}}]],"pipeline":["stemmer"]}},{"documents":[{"i":557,"t":"Workflow for creating knowledge embeddings","u":"/1.0.0/creator-guide/knowledge/concepts","h":"#workflow-for-creating-knowledge-embeddings","p":555},{"i":559,"t":"Lifecycle of a user query on a knowledge-supplemented LLM","u":"/1.0.0/creator-guide/knowledge/concepts","h":"#lifecycle-of-a-user-query-on-a-knowledge-supplemented-llm","p":555},{"i":561,"t":"Ask a question","u":"/1.0.0/creator-guide/knowledge/concepts","h":"#ask-a-question","p":555},{"i":563,"t":"Retrieve similar embeddings","u":"/1.0.0/creator-guide/knowledge/concepts","h":"#retrieve-similar-embeddings","p":555},{"i":565,"t":"Response to the user query","u":"/1.0.0/creator-guide/knowledge/concepts","h":"#response-to-the-user-query","p":555},{"i":569,"t":"Prerequisites","u":"/1.0.0/creator-guide/knowledge/csv","h":"#prerequisites","p":567},{"i":571,"t":"Start a vector database","u":"/1.0.0/creator-guide/knowledge/csv","h":"#start-a-vector-database","p":567},{"i":573,"t":"Create the vector collection snapshot","u":"/1.0.0/creator-guide/knowledge/csv","h":"#create-the-vector-collection-snapshot","p":567},{"i":575,"t":"Options","u":"/1.0.0/creator-guide/knowledge/csv","h":"#options","p":567},{"i":577,"t":"Create a vector snapshot","u":"/1.0.0/creator-guide/knowledge/csv","h":"#create-a-vector-snapshot","p":567},{"i":579,"t":"Next steps","u":"/1.0.0/creator-guide/knowledge/csv","h":"#next-steps","p":567},{"i":583,"t":"Build the fine-tune utility from llama.cpp","u":"/1.0.0/creator-guide/finetune/llamacpp","h":"#build-the-fine-tune-utility-from-llamacpp","p":581},{"i":585,"t":"Get the base model","u":"/1.0.0/creator-guide/finetune/llamacpp","h":"#get-the-base-model","p":581},{"i":587,"t":"Create a question and answer set for fine-tuning","u":"/1.0.0/creator-guide/finetune/llamacpp","h":"#create-a-question-and-answer-set-for-fine-tuning","p":581},{"i":589,"t":"Finetune!","u":"/1.0.0/creator-guide/finetune/llamacpp","h":"#finetune","p":581},{"i":591,"t":"Merge","u":"/1.0.0/creator-guide/finetune/llamacpp","h":"#merge","p":581},{"i":597,"t":"Parse the URL content to a markdown file","u":"/1.0.0/creator-guide/knowledge/firecrawl","h":"#parse-the-url-content-to-a-markdown-file","p":595},{"i":599,"t":"Create embeddings from the markdown files","u":"/1.0.0/creator-guide/knowledge/firecrawl","h":"#create-embeddings-from-the-markdown-files","p":595},{"i":603,"t":"Prerequisites","u":"/1.0.0/creator-guide/knowledge/markdown","h":"#prerequisites","p":601},{"i":605,"t":"Start a vector database","u":"/1.0.0/creator-guide/knowledge/markdown","h":"#start-a-vector-database","p":601},{"i":607,"t":"Create the vector collection snapshot","u":"/1.0.0/creator-guide/knowledge/markdown","h":"#create-the-vector-collection-snapshot","p":601},{"i":609,"t":"Options","u":"/1.0.0/creator-guide/knowledge/markdown","h":"#options","p":601},{"i":611,"t":"Create a vector snapshot","u":"/1.0.0/creator-guide/knowledge/markdown","h":"#create-a-vector-snapshot","p":601},{"i":613,"t":"Next steps","u":"/1.0.0/creator-guide/knowledge/markdown","h":"#next-steps","p":601},{"i":617,"t":"Prerequisites","u":"/1.0.0/creator-guide/knowledge/text","h":"#prerequisites","p":615},{"i":619,"t":"Start a vector database","u":"/1.0.0/creator-guide/knowledge/text","h":"#start-a-vector-database","p":615},{"i":621,"t":"Create the vector collection snapshot","u":"/1.0.0/creator-guide/knowledge/text","h":"#create-the-vector-collection-snapshot","p":615},{"i":623,"t":"Options","u":"/1.0.0/creator-guide/knowledge/text","h":"#options","p":615},{"i":625,"t":"Create a vector snapshot","u":"/1.0.0/creator-guide/knowledge/text","h":"#create-a-vector-snapshot","p":615},{"i":627,"t":"Next steps","u":"/1.0.0/creator-guide/knowledge/text","h":"#next-steps","p":615},{"i":631,"t":"Tools to convert a PDF file to a markdown file","u":"/1.0.0/creator-guide/knowledge/pdf","h":"#tools-to-convert-a-pdf-file-to-a-markdown-file","p":629},{"i":632,"t":"Tool #1: LlamaParse","u":"/1.0.0/creator-guide/knowledge/pdf","h":"#tool-1-llamaparse","p":629},{"i":634,"t":"Tool #2: GPTPDF","u":"/1.0.0/creator-guide/knowledge/pdf","h":"#tool-2-gptpdf","p":629},{"i":636,"t":"Create embeddings from the markdown files","u":"/1.0.0/creator-guide/knowledge/pdf","h":"#create-embeddings-from-the-markdown-files","p":629},{"i":640,"t":"Steps to Launch Your Gaia Domain","u":"/1.0.0/domain-guide/quick-start","h":"#steps-to-launch-your-gaia-domain","p":638},{"i":643,"t":"Abstract","u":"/1.0.0/litepaper","h":"#abstract","p":642},{"i":645,"t":"Introduction","u":"/1.0.0/litepaper","h":"#introduction","p":642},{"i":647,"t":"Open-source and decentralization","u":"/1.0.0/litepaper","h":"#open-source-and-decentralization","p":642},{"i":649,"t":"GaiaNet node","u":"/1.0.0/litepaper","h":"#gaianet-node","p":642},{"i":651,"t":"GaiaNet network","u":"/1.0.0/litepaper","h":"#gaianet-network","p":642},{"i":653,"t":"GaiaNet token","u":"/1.0.0/litepaper","h":"#gaianet-token","p":642},{"i":655,"t":"Component marketplace for AI assets","u":"/1.0.0/litepaper","h":"#component-marketplace-for-ai-assets","p":642},{"i":657,"t":"Conclusion","u":"/1.0.0/litepaper","h":"#conclusion","p":642},{"i":661,"t":"Pre-set configurations","u":"/1.0.0/node-guide/customize","h":"#pre-set-configurations","p":659},{"i":663,"t":"The config subcommand","u":"/1.0.0/node-guide/customize","h":"#the-config-subcommand","p":659},{"i":665,"t":"Select an LLM","u":"/1.0.0/node-guide/customize","h":"#select-an-llm","p":659},{"i":667,"t":"Select a knowledge base","u":"/1.0.0/node-guide/customize","h":"#select-a-knowledge-base","p":659},{"i":669,"t":"Customize prompts","u":"/1.0.0/node-guide/customize","h":"#customize-prompts","p":659},{"i":671,"t":"Next steps","u":"/1.0.0/node-guide/customize","h":"#next-steps","p":659},{"i":675,"t":"help","u":"/1.0.0/node-guide/cli-options","h":"#help","p":673},{"i":677,"t":"version","u":"/1.0.0/node-guide/cli-options","h":"#version","p":673},{"i":679,"t":"init","u":"/1.0.0/node-guide/cli-options","h":"#init","p":673},{"i":681,"t":"start","u":"/1.0.0/node-guide/cli-options","h":"#start","p":673},{"i":683,"t":"stop","u":"/1.0.0/node-guide/cli-options","h":"#stop","p":673},{"i":685,"t":"config","u":"/1.0.0/node-guide/cli-options","h":"#config","p":673},{"i":687,"t":"base","u":"/1.0.0/node-guide/cli-options","h":"#base","p":673},{"i":691,"t":"Prerequisites","u":"/1.0.0/node-guide/quick-start","h":"#prerequisites","p":689},{"i":693,"t":"Installing the node","u":"/1.0.0/node-guide/quick-start","h":"#installing-the-node","p":689},{"i":695,"t":"Next steps","u":"/1.0.0/node-guide/quick-start","h":"#next-steps","p":689},{"i":699,"t":"Install","u":"/1.0.0/node-guide/install_uninstall","h":"#install","p":697},{"i":701,"t":"Install the latest version of GaiaNet node","u":"/1.0.0/node-guide/install_uninstall","h":"#install-the-latest-version-of-gaianet-node","p":697},{"i":703,"t":"Install the specific version of GaiaNet Node","u":"/1.0.0/node-guide/install_uninstall","h":"#install-the-specific-version-of-gaianet-node","p":697},{"i":705,"t":"Update the current Gaianet node","u":"/1.0.0/node-guide/install_uninstall","h":"#update-the-current-gaianet-node","p":697},{"i":707,"t":"Uninstall","u":"/1.0.0/node-guide/install_uninstall","h":"#uninstall","p":697},{"i":709,"t":"What's installed","u":"/1.0.0/node-guide/install_uninstall","h":"#whats-installed","p":697},{"i":711,"t":"CLI options for the installer","u":"/1.0.0/node-guide/install_uninstall","h":"#cli-options-for-the-installer","p":697},{"i":715,"t":"Bind your node","u":"/1.0.0/node-guide/register","h":"#bind-your-node","p":713},{"i":717,"t":"Protect your node ID and device ID","u":"/1.0.0/node-guide/register","h":"#protect-your-node-id-and-device-id","p":713},{"i":719,"t":"Join a Domain","u":"/1.0.0/node-guide/register","h":"#join-a-domain","p":713},{"i":721,"t":"Steps to Join a Domain from Your Node Management Page","u":"/1.0.0/node-guide/register","h":"#steps-to-join-a-domain-from-your-node-management-page","p":713},{"i":723,"t":"Steps to Join a Domain from the AI Agent Domains page","u":"/1.0.0/node-guide/register","h":"#steps-to-join-a-domain-from-the-ai-agent-domains-page","p":713},{"i":725,"t":"Important Notes","u":"/1.0.0/node-guide/register","h":"#important-notes","p":713},{"i":729,"t":"Supported on","u":"/1.0.0/node-guide/system-requirements","h":"#supported-on","p":727},{"i":731,"t":"GPU","u":"/1.0.0/node-guide/system-requirements","h":"#gpu","p":727},{"i":733,"t":"CPU","u":"/1.0.0/node-guide/system-requirements","h":"#cpu","p":727},{"i":735,"t":"Oses","u":"/1.0.0/node-guide/system-requirements","h":"#oses","p":727},{"i":739,"t":"Running an Nvidia GPU-enabled AWS instance","u":"/1.0.0/node-guide/tasks/aws","h":"#running-an-nvidia-gpu-enabled-aws-instance","p":737},{"i":741,"t":"Running a CPU-only AWS instance","u":"/1.0.0/node-guide/tasks/aws","h":"#running-a-cpu-only-aws-instance","p":737},{"i":745,"t":"Quick start","u":"/1.0.0/node-guide/tasks/docker","h":"#quick-start","p":743},{"i":747,"t":"Stop and re-start","u":"/1.0.0/node-guide/tasks/docker","h":"#stop-and-re-start","p":743},{"i":749,"t":"Make changes to the node","u":"/1.0.0/node-guide/tasks/docker","h":"#make-changes-to-the-node","p":743},{"i":751,"t":"Change the node ID","u":"/1.0.0/node-guide/tasks/docker","h":"#change-the-node-id","p":743},{"i":753,"t":"Build a node image locally","u":"/1.0.0/node-guide/tasks/docker","h":"#build-a-node-image-locally","p":743},{"i":757,"t":"Use Supervise","u":"/1.0.0/node-guide/tasks/protect","h":"#use-supervise","p":755},{"i":759,"t":"Reduce the nice value","u":"/1.0.0/node-guide/tasks/protect","h":"#reduce-the-nice-value","p":755},{"i":767,"t":"Ubuntu 22.04","u":"/1.0.0/node-guide/tasks/cuda","h":"#ubuntu-2204","p":765},{"i":768,"t":"1 Install the Nvidia driver.","u":"/1.0.0/node-guide/tasks/cuda","h":"#1-install-the-nvidia-driver","p":765},{"i":770,"t":"2 Install the CUDA toolkit.","u":"/1.0.0/node-guide/tasks/cuda","h":"#2-install-the-cuda-toolkit","p":765},{"i":772,"t":"More resources","u":"/1.0.0/node-guide/tasks/cuda","h":"#more-resources","p":765},{"i":775,"t":"The system cannot find CUDA libraries","u":"/1.0.0/node-guide/troubleshooting","h":"#the-system-cannot-find-cuda-libraries","p":774},{"i":777,"t":"Failed to recover from collection snapshot on Windows WSL","u":"/1.0.0/node-guide/troubleshooting","h":"#failed-to-recover-from-collection-snapshot-on-windows-wsl","p":774},{"i":779,"t":"Failed to start the node with an error message Port 8080 is in use. Exit ...","u":"/1.0.0/node-guide/troubleshooting","h":"#failed-to-start-the-node-with-an-error-message-port-8080-is-in-use-exit-","p":774},{"i":781,"t":"Load library failed: libgomp.so.1: cannot open shared object file: No such file or directory","u":"/1.0.0/node-guide/troubleshooting","h":"#load-library-failed-libgompso1-cannot-open-shared-object-file-no-such-file-or-directory","p":774},{"i":783,"t":"Failed to remove the default collection","u":"/1.0.0/node-guide/troubleshooting","h":"#failed-to-remove-the-default-collection","p":774},{"i":785,"t":"File I/O error","u":"/1.0.0/node-guide/troubleshooting","h":"#file-io-error","p":774},{"i":787,"t":"The \"Failed to open the file\" Error","u":"/1.0.0/node-guide/troubleshooting","h":"#the-failed-to-open-the-file-error","p":774},{"i":789,"t":"The \"Too many open files\" Error on macOS","u":"/1.0.0/node-guide/troubleshooting","h":"#the-too-many-open-files-error-on-macos","p":774},{"i":791,"t":"Permission denied when use the installer script to install WasmEdge","u":"/1.0.0/node-guide/troubleshooting","h":"#permission-denied-when-use-the-installer-script-to-install-wasmedge","p":774},{"i":795,"t":"Quickstart","u":"/1.0.0/tutorial/coinbase","h":"#quickstart","p":793},{"i":797,"t":"A Telegram bot for AgentKit","u":"/1.0.0/tutorial/coinbase","h":"#a-telegram-bot-for-agentkit","p":793},{"i":801,"t":"Build a Trump agent with eliza and Gaia","u":"/1.0.0/tutorial/eliza","h":"#build-a-trump-agent-with-eliza-and-gaia","p":799},{"i":803,"t":"Advanced use case","u":"/1.0.0/tutorial/eliza","h":"#advanced-use-case","p":799},{"i":807,"t":"Prerequisites","u":"/1.0.0/user-guide/apps/agent-zero","h":"#prerequisites","p":805},{"i":809,"t":"Configure the agent","u":"/1.0.0/user-guide/apps/agent-zero","h":"#configure-the-agent","p":805},{"i":811,"t":"Run the agent","u":"/1.0.0/user-guide/apps/agent-zero","h":"#run-the-agent","p":805},{"i":813,"t":"Example 1","u":"/1.0.0/user-guide/apps/agent-zero","h":"#example-1","p":805},{"i":815,"t":"Example 2","u":"/1.0.0/user-guide/apps/agent-zero","h":"#example-2","p":805},{"i":817,"t":"Example 3","u":"/1.0.0/user-guide/apps/agent-zero","h":"#example-3","p":805},{"i":819,"t":"Example 4","u":"/1.0.0/user-guide/apps/agent-zero","h":"#example-4","p":805},{"i":821,"t":"Example 5","u":"/1.0.0/user-guide/apps/agent-zero","h":"#example-5","p":805},{"i":825,"t":"Introduction to the LLM Translation Agent","u":"/1.0.0/tutorial/translator-agent","h":"#introduction-to-the-llm-translation-agent","p":823},{"i":827,"t":"Demo 1: Running Translation Agents with Llama-3-8B","u":"/1.0.0/tutorial/translator-agent","h":"#demo-1-running-translation-agents-with-llama-3-8b","p":823},{"i":829,"t":"Step 1.1: Run a Llama-3-8B GaiaNet node","u":"/1.0.0/tutorial/translator-agent","h":"#step-11-run-a-llama-3-8b-gaianet-node","p":823},{"i":831,"t":"Step 1.2 Run the Translation Agent on top of Llama-3-8B","u":"/1.0.0/tutorial/translator-agent","h":"#step-12-run-the-translation-agent-on-top-of-llama-3-8b","p":823},{"i":833,"t":"Demo 2: Running Translation Agents with gemma-2-27b","u":"/1.0.0/tutorial/translator-agent","h":"#demo-2-running-translation-agents-with-gemma-2-27b","p":823},{"i":835,"t":"Step 2.1 Run a gemma-2-27b GaiaNet node","u":"/1.0.0/tutorial/translator-agent","h":"#step-21-run-a-gemma-2-27b-gaianet-node","p":823},{"i":837,"t":"Step 2.2 Run the Translation Agent to run on top of gemma-2-27b","u":"/1.0.0/tutorial/translator-agent","h":"#step-22-run-the-translation-agent-to-run-on-top-of-gemma-2-27b","p":823},{"i":839,"t":"Demo 3: Running Translation Agents with Phi-3-Medium long context model","u":"/1.0.0/tutorial/translator-agent","h":"#demo-3-running-translation-agents-with-phi-3-medium-long-context-model","p":823},{"i":841,"t":"Step 3.1: Run a Phi-3-medium-128k GaiaNet node","u":"/1.0.0/tutorial/translator-agent","h":"#step-31-run-a-phi-3-medium-128k-gaianet-node","p":823},{"i":843,"t":"Step 3.2 Clone and run the Translation Agent on top of Phi-3-medium-128k","u":"/1.0.0/tutorial/translator-agent","h":"#step-32-clone-and-run-the-translation-agent-on-top-of-phi-3-medium-128k","p":823},{"i":845,"t":"Evaluation of Translation Quality","u":"/1.0.0/tutorial/translator-agent","h":"#evaluation-of-translation-quality","p":823},{"i":847,"t":"Conclusion","u":"/1.0.0/tutorial/translator-agent","h":"#conclusion","p":823},{"i":851,"t":"Steps","u":"/1.0.0/user-guide/apps/anything_llm","h":"#steps","p":849},{"i":855,"t":"Prerequisites","u":"/1.0.0/tutorial/tool-call","h":"#prerequisites","p":853},{"i":857,"t":"Run the demo agent","u":"/1.0.0/tutorial/tool-call","h":"#run-the-demo-agent","p":853},{"i":859,"t":"Use the agent","u":"/1.0.0/tutorial/tool-call","h":"#use-the-agent","p":853},{"i":861,"t":"Make it robust","u":"/1.0.0/tutorial/tool-call","h":"#make-it-robust","p":853},{"i":865,"t":"Prerequisites","u":"/1.0.0/user-guide/apps/codegpt","h":"#prerequisites","p":863},{"i":867,"t":"Install CodeGPT","u":"/1.0.0/user-guide/apps/codegpt","h":"#install-codegpt","p":863},{"i":869,"t":"Configure CodeGPT","u":"/1.0.0/user-guide/apps/codegpt","h":"#configure-codegpt","p":863},{"i":871,"t":"Use the plugin","u":"/1.0.0/user-guide/apps/codegpt","h":"#use-the-plugin","p":863},{"i":875,"t":"Prerequisites","u":"/1.0.0/user-guide/apps/continue","h":"#prerequisites","p":873},{"i":877,"t":"Install Continue","u":"/1.0.0/user-guide/apps/continue","h":"#install-continue","p":873},{"i":879,"t":"Configure Continue","u":"/1.0.0/user-guide/apps/continue","h":"#configure-continue","p":873},{"i":881,"t":"Use the plugin","u":"/1.0.0/user-guide/apps/continue","h":"#use-the-plugin","p":873},{"i":885,"t":"Steps","u":"/1.0.0/user-guide/apps/dify","h":"#steps","p":883},{"i":889,"t":"Prerequisites","u":"/1.0.0/user-guide/apps/cursor","h":"#prerequisites","p":887},{"i":891,"t":"Configure Cursor","u":"/1.0.0/user-guide/apps/cursor","h":"#configure-cursor","p":887},{"i":893,"t":"Use Cursor","u":"/1.0.0/user-guide/apps/cursor","h":"#use-cursor","p":887},{"i":896,"t":"Introduction","u":"/1.0.0/user-guide/api-reference","h":"#introduction","p":895},{"i":898,"t":"Endpoints","u":"/1.0.0/user-guide/api-reference","h":"#endpoints","p":895},{"i":899,"t":"Chat","u":"/1.0.0/user-guide/api-reference","h":"#chat","p":895},{"i":901,"t":"Embedding","u":"/1.0.0/user-guide/api-reference","h":"#embedding","p":895},{"i":903,"t":"Retrieve","u":"/1.0.0/user-guide/api-reference","h":"#retrieve","p":895},{"i":905,"t":"Get the model","u":"/1.0.0/user-guide/api-reference","h":"#get-the-model","p":895},{"i":907,"t":"Get the node info","u":"/1.0.0/user-guide/api-reference","h":"#get-the-node-info","p":895},{"i":909,"t":"Status Codes","u":"/1.0.0/user-guide/api-reference","h":"#status-codes","p":895},{"i":913,"t":"Prerequisites","u":"/1.0.0/user-guide/apps/flowiseai-tool-call","h":"#prerequisites","p":911},{"i":915,"t":"Start a FlowiseAI server","u":"/1.0.0/user-guide/apps/flowiseai-tool-call","h":"#start-a-flowiseai-server","p":911},{"i":917,"t":"Build a chatbot for realtime IP lookup","u":"/1.0.0/user-guide/apps/flowiseai-tool-call","h":"#build-a-chatbot-for-realtime-ip-lookup","p":911},{"i":919,"t":"Give it a try","u":"/1.0.0/user-guide/apps/flowiseai-tool-call","h":"#give-it-a-try","p":911},{"i":923,"t":"Prerequisites","u":"/1.0.0/user-guide/apps/llamacoder","h":"#prerequisites","p":921},{"i":925,"t":"Run the agent","u":"/1.0.0/user-guide/apps/llamacoder","h":"#run-the-agent","p":921},{"i":929,"t":"The OpenAI Python library","u":"/1.0.0/user-guide/apps/intro","h":"#the-openai-python-library","p":927},{"i":931,"t":"The OpenAI Node API library","u":"/1.0.0/user-guide/apps/intro","h":"#the-openai-node-api-library","p":927},{"i":935,"t":"Steps","u":"/1.0.0/user-guide/apps/llamaedgebook","h":"#steps","p":933},{"i":939,"t":"Prerequisites","u":"/1.0.0/user-guide/apps/llamatutor","h":"#prerequisites","p":937},{"i":941,"t":"Run the agent","u":"/1.0.0/user-guide/apps/llamatutor","h":"#run-the-agent","p":937},{"i":945,"t":"Steps","u":"/1.0.0/user-guide/apps/lobechat","h":"#steps","p":943},{"i":949,"t":"Prerequisites","u":"/1.0.0/user-guide/apps/openwebui","h":"#prerequisites","p":947},{"i":951,"t":"Start the Open WebUI on your machine","u":"/1.0.0/user-guide/apps/openwebui","h":"#start-the-open-webui-on-your-machine","p":947},{"i":953,"t":"Use Open WebUI as a Chatbot UI","u":"/1.0.0/user-guide/apps/openwebui","h":"#use-open-webui-as-a-chatbot-ui","p":947},{"i":955,"t":"Use Open WebUI as a client-side RAG tool","u":"/1.0.0/user-guide/apps/openwebui","h":"#use-open-webui-as-a-client-side-rag-tool","p":947},{"i":959,"t":"Prerequisites","u":"/1.0.0/user-guide/apps/obsidian","h":"#prerequisites","p":957},{"i":961,"t":"Obsidian-local-gpt Plugin Setup","u":"/1.0.0/user-guide/apps/obsidian","h":"#obsidian-local-gpt-plugin-setup","p":957},{"i":963,"t":"Install the Obsidian-local-gpt Plugin","u":"/1.0.0/user-guide/apps/obsidian","h":"#install-the-obsidian-local-gpt-plugin","p":957},{"i":965,"t":"Configure the Plugin","u":"/1.0.0/user-guide/apps/obsidian","h":"#configure-the-plugin","p":957},{"i":967,"t":"Configure Obsidian Hotkey","u":"/1.0.0/user-guide/apps/obsidian","h":"#configure-obsidian-hotkey","p":957},{"i":969,"t":"Use Cases","u":"/1.0.0/user-guide/apps/obsidian","h":"#use-cases","p":957},{"i":970,"t":"Text Continuation","u":"/1.0.0/user-guide/apps/obsidian","h":"#text-continuation","p":957},{"i":972,"t":"Summarization","u":"/1.0.0/user-guide/apps/obsidian","h":"#summarization","p":957},{"i":974,"t":"Spelling and Grammar Check","u":"/1.0.0/user-guide/apps/obsidian","h":"#spelling-and-grammar-check","p":957},{"i":976,"t":"Extract Action Items","u":"/1.0.0/user-guide/apps/obsidian","h":"#extract-action-items","p":957},{"i":978,"t":"General Assistance","u":"/1.0.0/user-guide/apps/obsidian","h":"#general-assistance","p":957},{"i":980,"t":"Try it now!","u":"/1.0.0/user-guide/apps/obsidian","h":"#try-it-now","p":957},{"i":984,"t":"Prerequisites","u":"/1.0.0/user-guide/apps/stockbot","h":"#prerequisites","p":982},{"i":986,"t":"Run the agent","u":"/1.0.0/user-guide/apps/stockbot","h":"#run-the-agent","p":982},{"i":990,"t":"Prepare the environment","u":"/1.0.0/user-guide/apps/translation-agent","h":"#prepare-the-environment","p":988},{"i":992,"t":"Prepare your translation task","u":"/1.0.0/user-guide/apps/translation-agent","h":"#prepare-your-translation-task","p":988},{"i":994,"t":"Translate","u":"/1.0.0/user-guide/apps/translation-agent","h":"#translate","p":988},{"i":998,"t":"Web-based chatbot","u":"/1.0.0/user-guide/mynode","h":"#web-based-chatbot","p":996},{"i":1000,"t":"OpenAI API replacement","u":"/1.0.0/user-guide/mynode","h":"#openai-api-replacement","p":996},{"i":1004,"t":"Prerequisites","u":"/1.0.0/user-guide/apps/zed","h":"#prerequisites","p":1002},{"i":1006,"t":"Configure Zed","u":"/1.0.0/user-guide/apps/zed","h":"#configure-zed","p":1002},{"i":1008,"t":"Use Zed","u":"/1.0.0/user-guide/apps/zed","h":"","p":1002},{"i":1012,"t":"Public Gaia domains","u":"/1.0.0/user-guide/nodes","h":"#public-gaia-domains","p":1010},{"i":1013,"t":"LLM: Llama 8b","u":"/1.0.0/user-guide/nodes","h":"#llm-llama-8b","p":1010},{"i":1015,"t":"Voice-to-text: Whisper v2 large","u":"/1.0.0/user-guide/nodes","h":"#voice-to-text-whisper-v2-large","p":1010},{"i":1017,"t":"Text-to-image: Realistic vision","u":"/1.0.0/user-guide/nodes","h":"#text-to-image-realistic-vision","p":1010},{"i":1019,"t":"Text-to-voice: GPT-SoVITS","u":"/1.0.0/user-guide/nodes","h":"#text-to-voice-gpt-sovits","p":1010},{"i":1021,"t":"Coding assistant agents","u":"/1.0.0/user-guide/nodes","h":"#coding-assistant-agents","p":1010},{"i":1022,"t":"Coder","u":"/1.0.0/user-guide/nodes","h":"#coder","p":1010},{"i":1024,"t":"Rust Coder","u":"/1.0.0/user-guide/nodes","h":"#rust-coder","p":1010},{"i":1026,"t":"Alternative LLM domains","u":"/1.0.0/user-guide/nodes","h":"#alternative-llm-domains","p":1010},{"i":1027,"t":"Llama 3b","u":"/1.0.0/user-guide/nodes","h":"#llama-3b","p":1010},{"i":1029,"t":"Qwen 7b","u":"/1.0.0/user-guide/nodes","h":"#qwen-7b","p":1010},{"i":1031,"t":"Qwen 72b","u":"/1.0.0/user-guide/nodes","h":"#qwen-72b","p":1010},{"i":1035,"t":"Prerequisites","u":"/1.0.0/user-guide/apps/llamaparse","h":"#prerequisites","p":1033},{"i":1037,"t":"Steps","u":"/1.0.0/user-guide/apps/llamaparse","h":"#steps","p":1033},{"i":1041,"t":"Segment your text file","u":"/1.0.0/creator-guide/knowledge/web-tool","h":"#segment-your-text-file","p":1039},{"i":1043,"t":"Generate the snapshot file","u":"/1.0.0/creator-guide/knowledge/web-tool","h":"#generate-the-snapshot-file","p":1039},{"i":1045,"t":"Update the node config","u":"/1.0.0/creator-guide/knowledge/web-tool","h":"#update-the-node-config","p":1039},{"i":1049,"t":"Prerequisites","u":"/1.0.0/user-guide/apps/gpt-planner","h":"#prerequisites","p":1047},{"i":1051,"t":"Run the agent","u":"/1.0.0/user-guide/apps/gpt-planner","h":"#run-the-agent","p":1047},{"i":1055,"t":"Prerequisites","u":"/1.0.0/user-guide/apps/flowiseai","h":"#prerequisites","p":1053},{"i":1057,"t":"Start a FlowiseAI server","u":"/1.0.0/user-guide/apps/flowiseai","h":"#start-a-flowiseai-server","p":1053},{"i":1059,"t":"Build a documents QnA chatbot","u":"/1.0.0/user-guide/apps/flowiseai","h":"#build-a-documents-qna-chatbot","p":1053},{"i":1061,"t":"Get the Flowise Docs QnA template","u":"/1.0.0/user-guide/apps/flowiseai","h":"#get-the-flowise-docs-qna-template","p":1053},{"i":1063,"t":"Connect the chat model API","u":"/1.0.0/user-guide/apps/flowiseai","h":"#connect-the-chat-model-api","p":1053},{"i":1065,"t":"Connect the embedding model API","u":"/1.0.0/user-guide/apps/flowiseai","h":"#connect-the-embedding-model-api","p":1053},{"i":1067,"t":"Set up your documents","u":"/1.0.0/user-guide/apps/flowiseai","h":"#set-up-your-documents","p":1053},{"i":1069,"t":"Give it a try","u":"/1.0.0/user-guide/apps/flowiseai","h":"#give-it-a-try","p":1053},{"i":1071,"t":"More examples","u":"/1.0.0/user-guide/apps/flowiseai","h":"#more-examples","p":1053},{"i":1075,"t":"Next steps:","u":"/1.0.0/intro","h":"#next-steps","p":1073},{"i":1076,"t":"Users","u":"/1.0.0/intro","h":"#users","p":1073},{"i":1078,"t":"Node operators","u":"/1.0.0/intro","h":"#node-operators","p":1073},{"i":1080,"t":"Domain operators","u":"/1.0.0/intro","h":"#domain-operators","p":1073},{"i":1082,"t":"Creators","u":"/1.0.0/intro","h":"#creators","p":1073}],"index":{"version":"2.3.9","fields":["t"],"fieldVectors":[["t/557",[0,4.314,1,2.629,2,3.58,3,3.044]],["t/559",[2,2.877,4,3.467,5,2.877,6,3.111,7,3.467,8,2.563]],["t/561",[9,5.708,10,5.123]],["t/563",[3,3.468,11,4.41,12,4.914]],["t/565",[5,4.078,6,4.41,13,4.914]],["t/569",[14,3.304]],["t/571",[15,2.995,16,3.094,17,4.078]],["t/573",[1,2.629,16,2.716,18,3.189,19,2.812]],["t/575",[20,5.307]],["t/577",[1,2.995,16,3.094,19,3.203]],["t/579",[21,4.028,22,2.713]],["t/583",[23,2.563,24,3.111,25,3.111,26,3.467,27,2.447,28,3.467]],["t/585",[29,4.45,30,4.22]],["t/587",[1,2.113,10,3.111,24,3.111,25,3.111,31,3.467,32,2.877]],["t/589",[33,6.808]],["t/591",[34,6.808]],["t/597",[35,3.844,36,3.844,37,3.844,38,2.997,39,2.343]],["t/599",[1,2.629,3,3.044,38,3.363,39,2.629]],["t/603",[14,3.304]],["t/605",[15,2.995,16,3.094,17,4.078]],["t/607",[1,2.629,16,2.716,18,3.189,19,2.812]],["t/609",[20,5.307]],["t/611",[1,2.995,16,3.094,19,3.203]],["t/613",[21,4.028,22,2.713]],["t/617",[14,3.304]],["t/619",[15,2.995,16,3.094,17,4.078]],["t/621",[1,2.629,16,2.716,18,3.189,19,2.812]],["t/623",[20,5.307]],["t/625",[1,2.995,16,3.094,19,3.203]],["t/627",[21,4.028,22,2.713]],["t/631",[38,2.702,39,3.211,40,2.702,41,3.467,42,3.467]],["t/632",[40,3.83,43,3.094,44,4.914]],["t/634",[40,3.83,45,3.203,46,4.914]],["t/636",[1,2.629,3,3.044,38,3.363,39,2.629]],["t/640",[22,2.05,47,4.314,48,3.58,49,2.92]],["t/643",[50,6.808]],["t/645",[51,5.651]],["t/647",[52,3.327,53,4.914,54,4.914]],["t/649",[55,3.594,56,2.77]],["t/651",[55,3.594,57,5.708]],["t/653",[55,3.594,58,5.708]],["t/655",[59,4.314,60,4.314,61,3.871,62,4.314]],["t/657",[63,6.11]],["t/661",[32,4.078,64,4.914,65,3.203]],["t/663",[66,4.737,67,5.708]],["t/665",[8,4.22,68,5.123]],["t/667",[2,4.078,29,3.83,68,4.41]],["t/669",[69,5.708,70,5.708]],["t/671",[21,4.028,22,2.713]],["t/675",[71,6.808]],["t/677",[72,5.651]],["t/679",[73,6.808]],["t/681",[15,4.15]],["t/683",[74,6.11]],["t/685",[66,5.651]],["t/687",[29,5.307]],["t/691",[14,3.304]],["t/693",[56,2.77,75,3.279]],["t/695",[21,4.028,22,2.713]],["t/699",[75,3.912]],["t/701",[55,2.42,56,1.866,72,3.19,75,2.209,76,3.844]],["t/703",[55,2.42,56,1.866,72,3.19,75,2.209,77,3.844]],["t/705",[55,2.716,56,2.093,78,3.871,79,4.314]],["t/707",[80,6.808]],["t/709",[75,3.279,81,5.708]],["t/711",[20,3.83,75,2.823,82,4.914]],["t/715",[56,2.77,83,5.708]],["t/717",[56,1.866,84,3.844,85,5.109,86,3.844]],["t/719",[49,3.864,87,4.737]],["t/721",[22,1.648,49,2.347,56,1.682,87,2.877,88,3.467,89,3.111]],["t/723",[22,1.5,49,3.319,61,2.833,87,2.62,89,2.833,90,1.565]],["t/725",[91,5.708,92,5.708]],["t/729",[93,6.808]],["t/731",[94,6.11]],["t/733",[95,6.11]],["t/735",[96,6.808]],["t/739",[94,3.111,97,1.758,98,3.111,99,3.467,100,3.111,101,3.111]],["t/741",[95,3.871,97,2.187,100,3.871,101,3.871]],["t/745",[15,3.479,102,5.708]],["t/747",[15,2.995,74,4.41,103,4.914]],["t/749",[56,2.385,104,4.41,105,4.41]],["t/751",[56,2.385,85,4.41,105,4.41]],["t/753",[23,3.189,56,2.093,106,3.871,107,3.58]],["t/757",[108,3.279,109,5.708]],["t/759",[110,4.914,111,4.914,112,4.914]],["t/767",[113,4.914,114,4.914,115,4.914]],["t/768",[43,2.716,75,2.478,98,3.871,116,4.314]],["t/770",[45,2.812,75,2.478,117,3.871,118,4.314]],["t/772",[119,5.123,120,5.708]],["t/775",[117,3.871,121,4.314,122,4.314,123,3.363]],["t/777",[18,2.563,19,2.26,124,2.563,125,3.467,126,3.467,127,3.467]],["t/779",[15,1.632,56,1.3,108,1.539,124,1.98,128,2.088,129,2.678,130,2.678,131,2.678,132,2.678]],["t/781",[39,2.218,43,1.374,52,1.477,123,1.701,124,1.613,133,2.182,134,2.182,135,2.182,136,2.182,137,2.182,138,2.182]],["t/783",[18,3.189,124,3.189,139,4.314,140,4.314]],["t/785",[39,2.995,128,3.83,141,4.914]],["t/787",[39,2.629,52,2.92,124,3.189,128,3.363]],["t/789",[39,2.343,52,2.603,128,2.997,142,3.844,143,3.844]],["t/791",[75,2.817,108,1.814,144,3.157,145,3.157,146,3.157,147,3.157]],["t/795",[148,6.808]],["t/797",[149,4.914,150,4.914,151,4.914]],["t/801",[23,2.842,48,3.19,90,1.906,152,3.844,153,3.844]],["t/803",[108,2.823,154,4.914,155,4.41]],["t/807",[14,3.304]],["t/809",[65,3.721,90,2.83]],["t/811",[90,2.83,97,2.894]],["t/813",[43,3.594,156,4.028]],["t/815",[45,3.721,156,4.028]],["t/817",[156,4.028,157,3.864]],["t/819",[156,4.028,158,5.708]],["t/821",[156,4.028,159,5.708]],["t/825",[8,3.189,51,3.58,90,2.139,160,2.629]],["t/827",[27,2.045,43,1.825,90,1.437,97,1.469,157,1.962,160,1.766,161,2.259,162,2.259]],["t/829",[22,1.273,27,1.89,43,2.71,55,1.686,56,1.3,97,1.358,157,1.813,162,2.088]],["t/831",[22,1.183,27,1.757,43,1.567,45,1.623,90,1.234,97,1.262,157,1.685,160,1.517,162,1.941,163,2.066]],["t/833",[45,2.989,90,1.437,97,1.469,160,1.766,161,2.259,164,2.405,165,2.405]],["t/835",[22,1.273,43,1.686,45,2.806,55,1.686,56,1.3,97,1.358,164,2.223,165,2.223]],["t/837",[22,1.105,45,3.193,90,1.153,97,1.945,160,1.417,163,1.93,164,1.93,165,1.93]],["t/839",[30,1.719,90,1.153,97,1.179,157,2.598,160,1.417,161,1.813,166,1.93,167,1.93,168,2.325,169,2.325]],["t/841",[22,1.183,43,1.567,55,1.567,56,1.208,97,1.262,157,2.747,166,2.066,167,2.066,170,2.234]],["t/843",[22,1.037,45,1.422,90,1.082,97,1.106,157,2.464,160,1.33,163,1.811,166,1.811,167,1.811,170,1.958,171,2.182]],["t/845",[160,2.995,172,4.914,173,4.914]],["t/847",[63,6.11]],["t/851",[22,3.236]],["t/855",[14,3.304]],["t/857",[90,2.437,97,2.491,161,3.83]],["t/859",[90,2.83,108,3.279]],["t/861",[104,5.123,174,5.708]],["t/865",[14,3.304]],["t/867",[75,3.279,175,5.123]],["t/869",[65,3.721,175,5.123]],["t/871",[108,3.279,176,4.22]],["t/875",[14,3.304]],["t/877",[75,3.279,177,4.737]],["t/879",[65,3.721,177,4.737]],["t/881",[108,3.279,176,4.22]],["t/885",[22,3.236]],["t/889",[14,3.304]],["t/891",[65,3.721,178,5.123]],["t/893",[108,3.279,178,5.123]],["t/896",[51,5.651]],["t/898",[179,6.808]],["t/899",[180,6.11]],["t/901",[3,4.805]],["t/903",[11,6.11]],["t/905",[30,5.033]],["t/907",[56,2.77,181,5.708]],["t/909",[182,5.708,183,5.123]],["t/913",[14,3.304]],["t/915",[15,2.995,184,4.41,185,4.41]],["t/917",[23,2.842,186,2.997,187,3.844,188,3.844,189,3.844]],["t/919",[190,5.123,191,4.737]],["t/923",[14,3.304]],["t/925",[90,2.83,97,2.894]],["t/929",[123,3.83,192,4.078,193,4.914]],["t/931",[56,2.093,123,3.363,192,3.58,194,3.363]],["t/935",[22,3.236]],["t/939",[14,3.304]],["t/941",[90,2.83,97,2.894]],["t/945",[22,3.236]],["t/949",[14,3.304]],["t/951",[15,2.629,52,2.92,195,3.58,196,4.314]],["t/953",[52,2.603,108,2.209,186,2.997,195,3.19,197,3.844]],["t/955",[40,2.461,52,2.137,108,1.814,195,2.62,198,3.157,199,3.157,200,3.157]],["t/959",[14,3.304]],["t/961",[107,3.19,176,2.842,201,3.19,202,3.19,203,3.844]],["t/963",[75,2.209,107,3.19,176,2.842,201,3.19,202,3.19]],["t/965",[65,3.721,176,4.22]],["t/967",[65,3.203,201,4.078,204,4.914]],["t/969",[108,3.279,155,5.123]],["t/970",[177,4.737,205,4.22]],["t/972",[206,6.808]],["t/974",[207,4.914,208,4.914,209,4.914]],["t/976",[210,4.914,211,4.914,212,4.914]],["t/978",[213,5.123,214,5.123]],["t/980",[191,4.737,215,5.708]],["t/984",[14,3.304]],["t/986",[90,2.83,97,2.894]],["t/990",[216,5.123,217,5.708]],["t/992",[160,2.995,216,4.41,218,4.914]],["t/994",[160,4.15]],["t/998",[29,3.83,186,3.83,219,4.914]],["t/1000",[192,4.078,194,3.83,220,4.914]],["t/1004",[14,3.304]],["t/1006",[65,3.721,221,5.123]],["t/1008",[108,3.279,221,5.123]],["t/1012",[48,4.078,49,3.327,222,4.914]],["t/1013",[8,3.633,27,3.468,162,3.83]],["t/1015",[205,2.842,223,3.45,224,3.844,225,3.844,226,3.844]],["t/1017",[106,3.871,205,3.189,227,4.314,228,4.314]],["t/1019",[202,3.58,205,3.189,223,3.871,229,4.314]],["t/1021",[90,2.437,183,4.41,214,4.41]],["t/1022",[230,6.11]],["t/1024",[230,5.123,231,5.708]],["t/1026",[8,3.633,49,3.327,232,4.914]],["t/1027",[27,4.028,233,5.708]],["t/1029",[234,5.123,235,5.708]],["t/1031",[234,5.123,236,5.708]],["t/1035",[14,3.304]],["t/1037",[22,3.236]],["t/1041",[39,2.995,205,3.633,237,4.914]],["t/1043",[19,3.203,39,2.995,213,4.41]],["t/1045",[56,2.385,66,4.078,78,4.41]],["t/1049",[14,3.304]],["t/1051",[90,2.83,97,2.894]],["t/1055",[14,3.304]],["t/1057",[15,2.995,184,4.41,185,4.41]],["t/1059",[23,3.189,186,3.363,238,3.871,239,3.871]],["t/1061",[239,3.871,240,4.314,241,4.314,242,4.314]],["t/1063",[30,3.189,180,3.871,194,3.363,243,3.871]],["t/1065",[3,3.044,30,3.189,194,3.363,243,3.871]],["t/1067",[32,4.078,238,4.41,244,4.914]],["t/1069",[190,5.123,191,4.737]],["t/1071",[119,5.123,156,4.028]],["t/1075",[21,4.028,22,2.713]],["t/1076",[5,5.651]],["t/1078",[56,2.77,245,5.123]],["t/1080",[49,3.864,245,5.123]],["t/1082",[246,6.808]]],"invertedIndex":[["04",{"_index":115,"t":{"767":{"position":[[10,2]]}}}],["1",{"_index":43,"t":{"632":{"position":[[6,1]]},"768":{"position":[[0,1]]},"781":{"position":[[32,1]]},"813":{"position":[[8,1]]},"827":{"position":[[5,1]]},"829":{"position":[[5,1],[7,1]]},"831":{"position":[[5,1]]},"835":{"position":[[7,1]]},"841":{"position":[[7,1]]}}}],["128k",{"_index":170,"t":{"841":{"position":[[29,4]]},"843":{"position":[[68,4]]}}}],["2",{"_index":45,"t":{"634":{"position":[[6,1]]},"770":{"position":[[0,1]]},"815":{"position":[[8,1]]},"831":{"position":[[7,1]]},"833":{"position":[[5,1],[46,1]]},"835":{"position":[[5,1],[21,1]]},"837":{"position":[[5,1],[7,1],[58,1]]},"843":{"position":[[7,1]]}}}],["22",{"_index":114,"t":{"767":{"position":[[7,2]]}}}],["27b",{"_index":165,"t":{"833":{"position":[[48,3]]},"835":{"position":[[23,3]]},"837":{"position":[[60,3]]}}}],["3",{"_index":157,"t":{"817":{"position":[[8,1]]},"827":{"position":[[46,1]]},"829":{"position":[[22,1]]},"831":{"position":[[51,1]]},"839":{"position":[[5,1],[44,1]]},"841":{"position":[[5,1],[20,1]]},"843":{"position":[[5,1],[59,1]]}}}],["3b",{"_index":233,"t":{"1027":{"position":[[6,2]]}}}],["4",{"_index":158,"t":{"819":{"position":[[8,1]]}}}],["5",{"_index":159,"t":{"821":{"position":[[8,1]]}}}],["72b",{"_index":236,"t":{"1031":{"position":[[5,3]]}}}],["7b",{"_index":235,"t":{"1029":{"position":[[5,2]]}}}],["8080",{"_index":131,"t":{"779":{"position":[[52,4]]}}}],["8b",{"_index":162,"t":{"827":{"position":[[48,2]]},"829":{"position":[[24,2]]},"831":{"position":[[53,2]]},"1013":{"position":[[11,2]]}}}],["abstract",{"_index":50,"t":{"643":{"position":[[0,8]]}}}],["action",{"_index":211,"t":{"976":{"position":[[8,6]]}}}],["advanc",{"_index":154,"t":{"803":{"position":[[0,8]]}}}],["agent",{"_index":90,"t":{"723":{"position":[[35,5]]},"801":{"position":[[14,5]]},"809":{"position":[[14,5]]},"811":{"position":[[8,5]]},"825":{"position":[[36,5]]},"827":{"position":[[28,6]]},"831":{"position":[[29,5]]},"833":{"position":[[28,6]]},"837":{"position":[[29,5]]},"839":{"position":[[28,6]]},"843":{"position":[[39,5]]},"857":{"position":[[13,5]]},"859":{"position":[[8,5]]},"925":{"position":[[8,5]]},"941":{"position":[[8,5]]},"986":{"position":[[8,5]]},"1021":{"position":[[17,6]]},"1051":{"position":[[8,5]]}}}],["agentkit",{"_index":151,"t":{"797":{"position":[[19,8]]}}}],["ai",{"_index":61,"t":{"655":{"position":[[26,2]]},"723":{"position":[[32,2]]}}}],["altern",{"_index":232,"t":{"1026":{"position":[[0,11]]}}}],["answer",{"_index":31,"t":{"587":{"position":[[22,6]]}}}],["api",{"_index":194,"t":{"931":{"position":[[16,3]]},"1000":{"position":[[7,3]]},"1063":{"position":[[23,3]]},"1065":{"position":[[28,3]]}}}],["ask",{"_index":9,"t":{"561":{"position":[[0,3]]}}}],["asset",{"_index":62,"t":{"655":{"position":[[29,6]]}}}],["assist",{"_index":214,"t":{"978":{"position":[[8,10]]},"1021":{"position":[[7,9]]}}}],["aw",{"_index":100,"t":{"739":{"position":[[30,3]]},"741":{"position":[[19,3]]}}}],["base",{"_index":29,"t":{"585":{"position":[[8,4]]},"667":{"position":[[19,4]]},"687":{"position":[[0,4]]},"998":{"position":[[4,5]]}}}],["bind",{"_index":83,"t":{"715":{"position":[[0,4]]}}}],["bot",{"_index":150,"t":{"797":{"position":[[11,3]]}}}],["build",{"_index":23,"t":{"583":{"position":[[0,5]]},"753":{"position":[[0,5]]},"801":{"position":[[0,5]]},"917":{"position":[[0,5]]},"1059":{"position":[[0,5]]}}}],["case",{"_index":155,"t":{"803":{"position":[[13,4]]},"969":{"position":[[4,5]]}}}],["chang",{"_index":105,"t":{"749":{"position":[[5,7]]},"751":{"position":[[0,6]]}}}],["chat",{"_index":180,"t":{"899":{"position":[[0,4]]},"1063":{"position":[[12,4]]}}}],["chatbot",{"_index":186,"t":{"917":{"position":[[8,7]]},"953":{"position":[[20,7]]},"998":{"position":[[10,7]]},"1059":{"position":[[22,7]]}}}],["check",{"_index":209,"t":{"974":{"position":[[21,5]]}}}],["cli",{"_index":82,"t":{"711":{"position":[[0,3]]}}}],["client",{"_index":198,"t":{"955":{"position":[[20,6]]}}}],["clone",{"_index":171,"t":{"843":{"position":[[9,5]]}}}],["code",{"_index":183,"t":{"909":{"position":[[7,5]]},"1021":{"position":[[0,6]]}}}],["codegpt",{"_index":175,"t":{"867":{"position":[[8,7]]},"869":{"position":[[10,7]]}}}],["coder",{"_index":230,"t":{"1022":{"position":[[0,5]]},"1024":{"position":[[5,5]]}}}],["collect",{"_index":18,"t":{"573":{"position":[[18,10]]},"607":{"position":[[18,10]]},"621":{"position":[[18,10]]},"777":{"position":[[23,10]]},"783":{"position":[[29,10]]}}}],["compon",{"_index":59,"t":{"655":{"position":[[0,9]]}}}],["conclus",{"_index":63,"t":{"657":{"position":[[0,10]]},"847":{"position":[[0,10]]}}}],["config",{"_index":66,"t":{"663":{"position":[[4,6]]},"685":{"position":[[0,6]]},"1045":{"position":[[16,6]]}}}],["configur",{"_index":65,"t":{"661":{"position":[[8,14]]},"809":{"position":[[0,9]]},"869":{"position":[[0,9]]},"879":{"position":[[0,9]]},"891":{"position":[[0,9]]},"965":{"position":[[0,9]]},"967":{"position":[[0,9]]},"1006":{"position":[[0,9]]}}}],["connect",{"_index":243,"t":{"1063":{"position":[[0,7]]},"1065":{"position":[[0,7]]}}}],["content",{"_index":37,"t":{"597":{"position":[[14,7]]}}}],["context",{"_index":169,"t":{"839":{"position":[[58,7]]}}}],["continu",{"_index":177,"t":{"877":{"position":[[8,8]]},"879":{"position":[[10,8]]},"970":{"position":[[5,12]]}}}],["convert",{"_index":41,"t":{"631":{"position":[[9,7]]}}}],["cpp",{"_index":28,"t":{"583":{"position":[[39,3]]}}}],["cpu",{"_index":95,"t":{"733":{"position":[[0,3]]},"741":{"position":[[10,3]]}}}],["creat",{"_index":1,"t":{"557":{"position":[[13,8]]},"573":{"position":[[0,6]]},"577":{"position":[[0,6]]},"587":{"position":[[0,6]]},"599":{"position":[[0,6]]},"607":{"position":[[0,6]]},"611":{"position":[[0,6]]},"621":{"position":[[0,6]]},"625":{"position":[[0,6]]},"636":{"position":[[0,6]]}}}],["creator",{"_index":246,"t":{"1082":{"position":[[0,8]]}}}],["cuda",{"_index":117,"t":{"770":{"position":[[14,4]]},"775":{"position":[[23,4]]}}}],["current",{"_index":79,"t":{"705":{"position":[[11,7]]}}}],["cursor",{"_index":178,"t":{"891":{"position":[[10,6]]},"893":{"position":[[4,6]]}}}],["custom",{"_index":69,"t":{"669":{"position":[[0,9]]}}}],["databas",{"_index":17,"t":{"571":{"position":[[15,8]]},"605":{"position":[[15,8]]},"619":{"position":[[15,8]]}}}],["decentr",{"_index":54,"t":{"647":{"position":[[16,16]]}}}],["default",{"_index":140,"t":{"783":{"position":[[21,7]]}}}],["demo",{"_index":161,"t":{"827":{"position":[[0,4]]},"833":{"position":[[0,4]]},"839":{"position":[[0,4]]},"857":{"position":[[8,4]]}}}],["deni",{"_index":145,"t":{"791":{"position":[[11,6]]}}}],["devic",{"_index":86,"t":{"717":{"position":[[25,6]]}}}],["directori",{"_index":138,"t":{"781":{"position":[[83,9]]}}}],["doc",{"_index":241,"t":{"1061":{"position":[[16,4]]}}}],["document",{"_index":238,"t":{"1059":{"position":[[8,9]]},"1067":{"position":[[12,9]]}}}],["domain",{"_index":49,"t":{"640":{"position":[[26,6]]},"719":{"position":[[7,6]]},"721":{"position":[[16,6]]},"723":{"position":[[16,6],[41,7]]},"1012":{"position":[[12,7]]},"1026":{"position":[[16,7]]},"1080":{"position":[[0,6]]}}}],["driver",{"_index":116,"t":{"768":{"position":[[21,6]]}}}],["eliza",{"_index":153,"t":{"801":{"position":[[25,5]]}}}],["embed",{"_index":3,"t":{"557":{"position":[[32,10]]},"563":{"position":[[17,10]]},"599":{"position":[[7,10]]},"636":{"position":[[7,10]]},"901":{"position":[[0,9]]},"1065":{"position":[[12,9]]}}}],["enabl",{"_index":99,"t":{"739":{"position":[[22,7]]}}}],["endpoint",{"_index":179,"t":{"898":{"position":[[0,9]]}}}],["environ",{"_index":217,"t":{"990":{"position":[[12,11]]}}}],["error",{"_index":128,"t":{"779":{"position":[[33,5]]},"785":{"position":[[9,5]]},"787":{"position":[[30,5]]},"789":{"position":[[26,5]]}}}],["evalu",{"_index":172,"t":{"845":{"position":[[0,10]]}}}],["exampl",{"_index":156,"t":{"813":{"position":[[0,7]]},"815":{"position":[[0,7]]},"817":{"position":[[0,7]]},"819":{"position":[[0,7]]},"821":{"position":[[0,7]]},"1071":{"position":[[5,8]]}}}],["exit",{"_index":132,"t":{"779":{"position":[[68,4]]}}}],["extract",{"_index":210,"t":{"976":{"position":[[0,7]]}}}],["fail",{"_index":124,"t":{"777":{"position":[[0,6]]},"779":{"position":[[0,6]]},"781":{"position":[[13,6]]},"783":{"position":[[0,6]]},"787":{"position":[[5,6]]}}}],["file",{"_index":39,"t":{"597":{"position":[[36,4]]},"599":{"position":[[36,5]]},"631":{"position":[[23,4],[42,4]]},"636":{"position":[[36,5]]},"781":{"position":[[61,4],[75,4]]},"785":{"position":[[0,4]]},"787":{"position":[[24,4]]},"789":{"position":[[19,5]]},"1041":{"position":[[18,4]]},"1043":{"position":[[22,4]]}}}],["find",{"_index":122,"t":{"775":{"position":[[18,4]]}}}],["fine",{"_index":24,"t":{"583":{"position":[[10,4]]},"587":{"position":[[37,4]]}}}],["finetun",{"_index":33,"t":{"589":{"position":[[0,8]]}}}],["flowis",{"_index":240,"t":{"1061":{"position":[[8,7]]}}}],["flowiseai",{"_index":184,"t":{"915":{"position":[[8,9]]},"1057":{"position":[[8,9]]}}}],["gaia",{"_index":48,"t":{"640":{"position":[[21,4]]},"801":{"position":[[35,4]]},"1012":{"position":[[7,4]]}}}],["gaianet",{"_index":55,"t":{"649":{"position":[[0,7]]},"651":{"position":[[0,7]]},"653":{"position":[[0,7]]},"701":{"position":[[30,7]]},"703":{"position":[[32,7]]},"705":{"position":[[19,7]]},"829":{"position":[[27,7]]},"835":{"position":[[27,7]]},"841":{"position":[[34,7]]}}}],["gemma",{"_index":164,"t":{"833":{"position":[[40,5]]},"835":{"position":[[15,5]]},"837":{"position":[[52,5]]}}}],["gener",{"_index":213,"t":{"978":{"position":[[0,7]]},"1043":{"position":[[0,8]]}}}],["give",{"_index":190,"t":{"919":{"position":[[0,4]]},"1069":{"position":[[0,4]]}}}],["gpt",{"_index":202,"t":{"961":{"position":[[15,3]]},"963":{"position":[[27,3]]},"1019":{"position":[[15,3]]}}}],["gptpdf",{"_index":46,"t":{"634":{"position":[[9,6]]}}}],["gpu",{"_index":94,"t":{"731":{"position":[[0,3]]},"739":{"position":[[18,3]]}}}],["grammar",{"_index":208,"t":{"974":{"position":[[13,7]]}}}],["help",{"_index":71,"t":{"675":{"position":[[0,4]]}}}],["hotkey",{"_index":204,"t":{"967":{"position":[[19,6]]}}}],["id",{"_index":85,"t":{"717":{"position":[[18,2],[32,2]]},"751":{"position":[[16,2]]}}}],["imag",{"_index":106,"t":{"753":{"position":[[13,5]]},"1017":{"position":[[8,5]]}}}],["import",{"_index":91,"t":{"725":{"position":[[0,9]]}}}],["info",{"_index":181,"t":{"907":{"position":[[13,4]]}}}],["init",{"_index":73,"t":{"679":{"position":[[0,4]]}}}],["instal",{"_index":75,"t":{"693":{"position":[[0,10]]},"699":{"position":[[0,7]]},"701":{"position":[[0,7]]},"703":{"position":[[0,7]]},"709":{"position":[[7,9]]},"711":{"position":[[20,9]]},"768":{"position":[[2,7]]},"770":{"position":[[2,7]]},"791":{"position":[[31,9],[51,7]]},"867":{"position":[[0,7]]},"877":{"position":[[0,7]]},"963":{"position":[[0,7]]}}}],["instanc",{"_index":101,"t":{"739":{"position":[[34,8]]},"741":{"position":[[23,8]]}}}],["introduct",{"_index":51,"t":{"645":{"position":[[0,12]]},"825":{"position":[[0,12]]},"896":{"position":[[0,12]]}}}],["ip",{"_index":188,"t":{"917":{"position":[[29,2]]}}}],["item",{"_index":212,"t":{"976":{"position":[[15,5]]}}}],["join",{"_index":87,"t":{"719":{"position":[[0,4]]},"721":{"position":[[9,4]]},"723":{"position":[[9,4]]}}}],["knowledg",{"_index":2,"t":{"557":{"position":[[22,9]]},"559":{"position":[[31,9]]},"667":{"position":[[9,9]]}}}],["larg",{"_index":226,"t":{"1015":{"position":[[26,5]]}}}],["latest",{"_index":76,"t":{"701":{"position":[[12,6]]}}}],["launch",{"_index":47,"t":{"640":{"position":[[9,6]]}}}],["libgomp",{"_index":134,"t":{"781":{"position":[[21,7]]}}}],["librari",{"_index":123,"t":{"775":{"position":[[28,9]]},"781":{"position":[[5,7]]},"929":{"position":[[18,7]]},"931":{"position":[[20,7]]}}}],["lifecycl",{"_index":4,"t":{"559":{"position":[[0,9]]}}}],["llama",{"_index":27,"t":{"583":{"position":[[33,5]]},"827":{"position":[[40,5]]},"829":{"position":[[16,5]]},"831":{"position":[[45,5]]},"1013":{"position":[[5,5]]},"1027":{"position":[[0,5]]}}}],["llamapars",{"_index":44,"t":{"632":{"position":[[9,10]]}}}],["llm",{"_index":8,"t":{"559":{"position":[[54,3]]},"665":{"position":[[10,3]]},"825":{"position":[[20,3]]},"1013":{"position":[[0,3]]},"1026":{"position":[[12,3]]}}}],["load",{"_index":133,"t":{"781":{"position":[[0,4]]}}}],["local",{"_index":107,"t":{"753":{"position":[[19,7]]},"961":{"position":[[9,5]]},"963":{"position":[[21,5]]}}}],["long",{"_index":168,"t":{"839":{"position":[[53,4]]}}}],["lookup",{"_index":189,"t":{"917":{"position":[[32,6]]}}}],["machin",{"_index":196,"t":{"951":{"position":[[29,7]]}}}],["maco",{"_index":143,"t":{"789":{"position":[[35,5]]}}}],["make",{"_index":104,"t":{"749":{"position":[[0,4]]},"861":{"position":[[0,4]]}}}],["manag",{"_index":88,"t":{"721":{"position":[[38,10]]}}}],["mani",{"_index":142,"t":{"789":{"position":[[9,4]]}}}],["markdown",{"_index":38,"t":{"597":{"position":[[27,8]]},"599":{"position":[[27,8]]},"631":{"position":[[33,8]]},"636":{"position":[[27,8]]}}}],["marketplac",{"_index":60,"t":{"655":{"position":[[10,11]]}}}],["medium",{"_index":167,"t":{"839":{"position":[[46,6]]},"841":{"position":[[22,6]]},"843":{"position":[[61,6]]}}}],["merg",{"_index":34,"t":{"591":{"position":[[0,5]]}}}],["messag",{"_index":129,"t":{"779":{"position":[[39,7]]}}}],["model",{"_index":30,"t":{"585":{"position":[[13,5]]},"839":{"position":[[66,5]]},"905":{"position":[[8,5]]},"1063":{"position":[[17,5]]},"1065":{"position":[[22,5]]}}}],["more",{"_index":119,"t":{"772":{"position":[[0,4]]},"1071":{"position":[[0,4]]}}}],["network",{"_index":57,"t":{"651":{"position":[[8,7]]}}}],["next",{"_index":21,"t":{"579":{"position":[[0,4]]},"613":{"position":[[0,4]]},"627":{"position":[[0,4]]},"671":{"position":[[0,4]]},"695":{"position":[[0,4]]},"1075":{"position":[[0,4]]}}}],["nice",{"_index":111,"t":{"759":{"position":[[11,4]]}}}],["node",{"_index":56,"t":{"649":{"position":[[8,4]]},"693":{"position":[[15,4]]},"701":{"position":[[38,4]]},"703":{"position":[[40,4]]},"705":{"position":[[27,4]]},"715":{"position":[[10,4]]},"717":{"position":[[13,4]]},"721":{"position":[[33,4]]},"749":{"position":[[20,4]]},"751":{"position":[[11,4]]},"753":{"position":[[8,4]]},"779":{"position":[[20,4]]},"829":{"position":[[35,4]]},"835":{"position":[[35,4]]},"841":{"position":[[42,4]]},"907":{"position":[[8,4]]},"931":{"position":[[11,4]]},"1045":{"position":[[11,4]]},"1078":{"position":[[0,4]]}}}],["note",{"_index":92,"t":{"725":{"position":[[10,5]]}}}],["now",{"_index":215,"t":{"980":{"position":[[7,3]]}}}],["nvidia",{"_index":98,"t":{"739":{"position":[[11,6]]},"768":{"position":[[14,6]]}}}],["o",{"_index":141,"t":{"785":{"position":[[7,1]]}}}],["object",{"_index":136,"t":{"781":{"position":[[54,6]]}}}],["obsidian",{"_index":201,"t":{"961":{"position":[[0,8]]},"963":{"position":[[12,8]]},"967":{"position":[[10,8]]}}}],["open",{"_index":52,"t":{"647":{"position":[[0,4]]},"781":{"position":[[42,4]]},"787":{"position":[[15,4]]},"789":{"position":[[14,4]]},"951":{"position":[[10,4]]},"953":{"position":[[4,4]]},"955":{"position":[[4,4]]}}}],["openai",{"_index":192,"t":{"929":{"position":[[4,6]]},"931":{"position":[[4,6]]},"1000":{"position":[[0,6]]}}}],["oper",{"_index":245,"t":{"1078":{"position":[[5,9]]},"1080":{"position":[[7,9]]}}}],["option",{"_index":20,"t":{"575":{"position":[[0,7]]},"609":{"position":[[0,7]]},"623":{"position":[[0,7]]},"711":{"position":[[4,7]]}}}],["os",{"_index":96,"t":{"735":{"position":[[0,4]]}}}],["page",{"_index":89,"t":{"721":{"position":[[49,4]]},"723":{"position":[[49,4]]}}}],["pars",{"_index":35,"t":{"597":{"position":[[0,5]]}}}],["pdf",{"_index":42,"t":{"631":{"position":[[19,3]]}}}],["permiss",{"_index":144,"t":{"791":{"position":[[0,10]]}}}],["phi",{"_index":166,"t":{"839":{"position":[[40,3]]},"841":{"position":[[16,3]]},"843":{"position":[[55,3]]}}}],["plugin",{"_index":176,"t":{"871":{"position":[[8,6]]},"881":{"position":[[8,6]]},"961":{"position":[[19,6]]},"963":{"position":[[31,6]]},"965":{"position":[[14,6]]}}}],["port",{"_index":130,"t":{"779":{"position":[[47,4]]}}}],["pre",{"_index":64,"t":{"661":{"position":[[0,3]]}}}],["prepar",{"_index":216,"t":{"990":{"position":[[0,7]]},"992":{"position":[[0,7]]}}}],["prerequisit",{"_index":14,"t":{"569":{"position":[[0,13]]},"603":{"position":[[0,13]]},"617":{"position":[[0,13]]},"691":{"position":[[0,13]]},"807":{"position":[[0,13]]},"855":{"position":[[0,13]]},"865":{"position":[[0,13]]},"875":{"position":[[0,13]]},"889":{"position":[[0,13]]},"913":{"position":[[0,13]]},"923":{"position":[[0,13]]},"939":{"position":[[0,13]]},"949":{"position":[[0,13]]},"959":{"position":[[0,13]]},"984":{"position":[[0,13]]},"1004":{"position":[[0,13]]},"1035":{"position":[[0,13]]},"1049":{"position":[[0,13]]},"1055":{"position":[[0,13]]}}}],["prompt",{"_index":70,"t":{"669":{"position":[[10,7]]}}}],["protect",{"_index":84,"t":{"717":{"position":[[0,7]]}}}],["public",{"_index":222,"t":{"1012":{"position":[[0,6]]}}}],["python",{"_index":193,"t":{"929":{"position":[[11,6]]}}}],["qna",{"_index":239,"t":{"1059":{"position":[[18,3]]},"1061":{"position":[[21,3]]}}}],["qualiti",{"_index":173,"t":{"845":{"position":[[26,7]]}}}],["queri",{"_index":6,"t":{"559":{"position":[[20,5]]},"565":{"position":[[21,5]]}}}],["question",{"_index":10,"t":{"561":{"position":[[6,8]]},"587":{"position":[[9,8]]}}}],["quick",{"_index":102,"t":{"745":{"position":[[0,5]]}}}],["quickstart",{"_index":148,"t":{"795":{"position":[[0,10]]}}}],["qwen",{"_index":234,"t":{"1029":{"position":[[0,4]]},"1031":{"position":[[0,4]]}}}],["rag",{"_index":200,"t":{"955":{"position":[[32,3]]}}}],["re",{"_index":103,"t":{"747":{"position":[[9,2]]}}}],["realist",{"_index":227,"t":{"1017":{"position":[[15,9]]}}}],["realtim",{"_index":187,"t":{"917":{"position":[[20,8]]}}}],["recov",{"_index":125,"t":{"777":{"position":[[10,7]]}}}],["reduc",{"_index":110,"t":{"759":{"position":[[0,6]]}}}],["remov",{"_index":139,"t":{"783":{"position":[[10,6]]}}}],["replac",{"_index":220,"t":{"1000":{"position":[[11,11]]}}}],["resourc",{"_index":120,"t":{"772":{"position":[[5,9]]}}}],["respons",{"_index":13,"t":{"565":{"position":[[0,8]]}}}],["retriev",{"_index":11,"t":{"563":{"position":[[0,8]]},"903":{"position":[[0,8]]}}}],["robust",{"_index":174,"t":{"861":{"position":[[8,6]]}}}],["run",{"_index":97,"t":{"739":{"position":[[0,7]]},"741":{"position":[[0,7]]},"811":{"position":[[0,3]]},"827":{"position":[[8,7]]},"829":{"position":[[10,3]]},"831":{"position":[[9,3]]},"833":{"position":[[8,7]]},"835":{"position":[[9,3]]},"837":{"position":[[9,3],[38,3]]},"839":{"position":[[8,7]]},"841":{"position":[[10,3]]},"843":{"position":[[19,3]]},"857":{"position":[[0,3]]},"925":{"position":[[0,3]]},"941":{"position":[[0,3]]},"986":{"position":[[0,3]]},"1051":{"position":[[0,3]]}}}],["rust",{"_index":231,"t":{"1024":{"position":[[0,4]]}}}],["s",{"_index":81,"t":{"709":{"position":[[5,1]]}}}],["script",{"_index":146,"t":{"791":{"position":[[41,6]]}}}],["segment",{"_index":237,"t":{"1041":{"position":[[0,7]]}}}],["select",{"_index":68,"t":{"665":{"position":[[0,6]]},"667":{"position":[[0,6]]}}}],["server",{"_index":185,"t":{"915":{"position":[[18,6]]},"1057":{"position":[[18,6]]}}}],["set",{"_index":32,"t":{"587":{"position":[[29,3]]},"661":{"position":[[4,3]]},"1067":{"position":[[0,3]]}}}],["setup",{"_index":203,"t":{"961":{"position":[[26,5]]}}}],["share",{"_index":135,"t":{"781":{"position":[[47,6]]}}}],["side",{"_index":199,"t":{"955":{"position":[[27,4]]}}}],["similar",{"_index":12,"t":{"563":{"position":[[9,7]]}}}],["snapshot",{"_index":19,"t":{"573":{"position":[[29,8]]},"577":{"position":[[16,8]]},"607":{"position":[[29,8]]},"611":{"position":[[16,8]]},"621":{"position":[[29,8]]},"625":{"position":[[16,8]]},"777":{"position":[[34,8]]},"1043":{"position":[[13,8]]}}}],["sourc",{"_index":53,"t":{"647":{"position":[[5,6]]}}}],["sovit",{"_index":229,"t":{"1019":{"position":[[19,6]]}}}],["specif",{"_index":77,"t":{"703":{"position":[[12,8]]}}}],["spell",{"_index":207,"t":{"974":{"position":[[0,8]]}}}],["start",{"_index":15,"t":{"571":{"position":[[0,5]]},"605":{"position":[[0,5]]},"619":{"position":[[0,5]]},"681":{"position":[[0,5]]},"745":{"position":[[6,5]]},"747":{"position":[[12,5]]},"779":{"position":[[10,5]]},"915":{"position":[[0,5]]},"951":{"position":[[0,5]]},"1057":{"position":[[0,5]]}}}],["statu",{"_index":182,"t":{"909":{"position":[[0,6]]}}}],["step",{"_index":22,"t":{"579":{"position":[[5,5]]},"613":{"position":[[5,5]]},"627":{"position":[[5,5]]},"640":{"position":[[0,5]]},"671":{"position":[[5,5]]},"695":{"position":[[5,5]]},"721":{"position":[[0,5]]},"723":{"position":[[0,5]]},"829":{"position":[[0,4]]},"831":{"position":[[0,4]]},"835":{"position":[[0,4]]},"837":{"position":[[0,4]]},"841":{"position":[[0,4]]},"843":{"position":[[0,4]]},"851":{"position":[[0,5]]},"885":{"position":[[0,5]]},"935":{"position":[[0,5]]},"945":{"position":[[0,5]]},"1037":{"position":[[0,5]]},"1075":{"position":[[5,5]]}}}],["stop",{"_index":74,"t":{"683":{"position":[[0,4]]},"747":{"position":[[0,4]]}}}],["subcommand",{"_index":67,"t":{"663":{"position":[[11,10]]}}}],["such",{"_index":137,"t":{"781":{"position":[[70,4]]}}}],["summar",{"_index":206,"t":{"972":{"position":[[0,13]]}}}],["supervis",{"_index":109,"t":{"757":{"position":[[4,9]]}}}],["supplement",{"_index":7,"t":{"559":{"position":[[41,12]]}}}],["support",{"_index":93,"t":{"729":{"position":[[0,9]]}}}],["system",{"_index":121,"t":{"775":{"position":[[4,6]]}}}],["task",{"_index":218,"t":{"992":{"position":[[25,4]]}}}],["telegram",{"_index":149,"t":{"797":{"position":[[2,8]]}}}],["templat",{"_index":242,"t":{"1061":{"position":[[25,8]]}}}],["text",{"_index":205,"t":{"970":{"position":[[0,4]]},"1015":{"position":[[9,4]]},"1017":{"position":[[0,4]]},"1019":{"position":[[0,4]]},"1041":{"position":[[13,4]]}}}],["token",{"_index":58,"t":{"653":{"position":[[8,5]]}}}],["tool",{"_index":40,"t":{"631":{"position":[[0,5]]},"632":{"position":[[0,4]]},"634":{"position":[[0,4]]},"955":{"position":[[36,4]]}}}],["toolkit",{"_index":118,"t":{"770":{"position":[[19,7]]}}}],["top",{"_index":163,"t":{"831":{"position":[[38,3]]},"837":{"position":[[45,3]]},"843":{"position":[[48,3]]}}}],["translat",{"_index":160,"t":{"825":{"position":[[24,11]]},"827":{"position":[[16,11]]},"831":{"position":[[17,11]]},"833":{"position":[[16,11]]},"837":{"position":[[17,11]]},"839":{"position":[[16,11]]},"843":{"position":[[27,11]]},"845":{"position":[[14,11]]},"992":{"position":[[13,11]]},"994":{"position":[[0,9]]}}}],["tri",{"_index":191,"t":{"919":{"position":[[10,3]]},"980":{"position":[[0,3]]},"1069":{"position":[[10,3]]}}}],["trump",{"_index":152,"t":{"801":{"position":[[8,5]]}}}],["tune",{"_index":25,"t":{"583":{"position":[[15,4]]},"587":{"position":[[42,6]]}}}],["ubuntu",{"_index":113,"t":{"767":{"position":[[0,6]]}}}],["ui",{"_index":197,"t":{"953":{"position":[[28,2]]}}}],["uninstal",{"_index":80,"t":{"707":{"position":[[0,9]]}}}],["up",{"_index":244,"t":{"1067":{"position":[[4,2]]}}}],["updat",{"_index":78,"t":{"705":{"position":[[0,6]]},"1045":{"position":[[0,6]]}}}],["url",{"_index":36,"t":{"597":{"position":[[10,3]]}}}],["us",{"_index":108,"t":{"757":{"position":[[0,3]]},"779":{"position":[[63,3]]},"791":{"position":[[23,3]]},"803":{"position":[[9,3]]},"859":{"position":[[0,3]]},"871":{"position":[[0,3]]},"881":{"position":[[0,3]]},"893":{"position":[[0,3]]},"953":{"position":[[0,3]]},"955":{"position":[[0,3]]},"969":{"position":[[0,3]]},"1008":{"position":[[0,3]]}}}],["user",{"_index":5,"t":{"559":{"position":[[15,4]]},"565":{"position":[[16,4]]},"1076":{"position":[[0,5]]}}}],["util",{"_index":26,"t":{"583":{"position":[[20,7]]}}}],["v2",{"_index":225,"t":{"1015":{"position":[[23,2]]}}}],["valu",{"_index":112,"t":{"759":{"position":[[16,5]]}}}],["vector",{"_index":16,"t":{"571":{"position":[[8,6]]},"573":{"position":[[11,6]]},"577":{"position":[[9,6]]},"605":{"position":[[8,6]]},"607":{"position":[[11,6]]},"611":{"position":[[9,6]]},"619":{"position":[[8,6]]},"621":{"position":[[11,6]]},"625":{"position":[[9,6]]}}}],["version",{"_index":72,"t":{"677":{"position":[[0,7]]},"701":{"position":[[19,7]]},"703":{"position":[[21,7]]}}}],["vision",{"_index":228,"t":{"1017":{"position":[[25,6]]}}}],["voic",{"_index":223,"t":{"1015":{"position":[[0,5]]},"1019":{"position":[[8,5]]}}}],["wasmedg",{"_index":147,"t":{"791":{"position":[[59,8]]}}}],["web",{"_index":219,"t":{"998":{"position":[[0,3]]}}}],["webui",{"_index":195,"t":{"951":{"position":[[15,5]]},"953":{"position":[[9,5]]},"955":{"position":[[9,5]]}}}],["whisper",{"_index":224,"t":{"1015":{"position":[[15,7]]}}}],["window",{"_index":126,"t":{"777":{"position":[[46,7]]}}}],["workflow",{"_index":0,"t":{"557":{"position":[[0,8]]}}}],["wsl",{"_index":127,"t":{"777":{"position":[[54,3]]}}}],["zed",{"_index":221,"t":{"1006":{"position":[[10,3]]},"1008":{"position":[[4,3]]}}}]],"pipeline":["stemmer"]}},{"documents":[{"i":556,"t":"The LLM app requires both long-term and short-term memories. Long-term memory includes factual knowledge, historical facts, background stories etc. They are best added to the context as complete chapters instead of small chunks of text to maintain the internal consistency of the knowledge. RAG is an important technique to inject contextual knowledge into an LLM application. It improves accuracy and reduces the hallucination of LLMs. An effective RAG application combines real-time and user-specific short-term memory (chunks) with stable long-term memory (chapters) in the prompt context. Since the application's long-term memory is stable (even immutable), we package it in a vector database tightly coupled with the LLM. The client app assembles the short-term memory in the prompt and is supplemented with the long-term memory on the LLM server. We call the approach \"server-side RAG\". The long context length supported by modern LLMs are especially well-suited for long-term knowledge that are best represented by chapters of text. A Gaia node is an OpenAI compatible LLM service that is grounded by long-term knowledge on the server side. The client application can simply chat with it or provide realtime / short-term memory since the LLM is already aware of the domain or background. For example, if you ask ChatGPT the question What is Layer 2, the answer is that Layer 2 is a concept from the computer network. However, if you ask a blockchain person, they answer that Layer 2 is a way to scale the original Ethereum network. That's the difference between a generic LLM and knowledge-supplemented LLMs. We will cover the external knowledge preparation and how a knowledge-supplemented LLM completes a conversation. If you have learned how a RAG application works, go to Build a RAG application with Gaia to start building one. Create embeddings for your own knowledge as the long-term memory. Lifecycle of a user query on a knowledge-supplemented LLM. For this solution, we will use a chat model like Llama-3-8B for generating responses to the user. a text embedding model like nomic-embed-text for creating and retrieving embeddings. a Vector DB like Qdrant for storing embeddings.","s":"Gaia nodes with long-term knowledge","u":"/1.0.0/creator-guide/knowledge/concepts","h":"","p":555},{"i":558,"t":"The first step is to create embeddings for our knowledge base and store the embeddings in a vector DB. First of all, we split the long text into sections (i.e, chunks). All LLMs have a maximum context length. The model can't read the context if the text is too long. The most used rule for a Gaia node is to put the content in one chapter together. Remember, insert a blank line between two chunks. You can also use other algorithms to chunk your text. After chunking the document, we can convert these chunks into embeddings leveraging the embedding model. The embedding model is trained to create embeddings based on text and search for similar embeddings. We will use the latter function in the process of user query. Additionally, we will need a vector DB to store the embeddings so that we can retrieve these embeddings quickly at any time. On a Gaia node, we will get a database snapshot with the embeddings to use at last. Check out how to create your embeddings using Gaia web tool, from a plain text file, and from a markdown file.","s":"Workflow for creating knowledge embeddings","u":"/1.0.0/creator-guide/knowledge/concepts","h":"#workflow-for-creating-knowledge-embeddings","p":555},{"i":560,"t":"Next, let's learn the lifecycle of a user query on a knowledge-supplemented LLM. We will take a Gaia Node with Gaia knowledge as an example.","s":"Lifecycle of a user query on a knowledge-supplemented LLM","u":"/1.0.0/creator-guide/knowledge/concepts","h":"#lifecycle-of-a-user-query-on-a-knowledge-supplemented-llm","p":555},{"i":562,"t":"when you send a question in human language to the node, the embedding model will first convert your question to embedding.","s":"Ask a question","u":"/1.0.0/creator-guide/knowledge/concepts","h":"#ask-a-question","p":555},{"i":564,"t":"Then, the embedding model will search all the embeddings stored in the Qdrant vector DB and retrieve the embeddings that are similar to the question embeddings.","s":"Retrieve similar embeddings","u":"/1.0.0/creator-guide/knowledge/concepts","h":"#retrieve-similar-embeddings","p":555},{"i":566,"t":"The embedding node will return the retrieved embeddings to the chat model. The chat model will use the retrieved embeddings plus your input questions as context to answer your queries finally.","s":"Response to the user query","u":"/1.0.0/creator-guide/knowledge/concepts","h":"#response-to-the-user-query","p":555},{"i":568,"t":"In this section, we will discuss how to create a vector collection snapshot for optimal retrieval of long-form text documents. The approach is to create two columns of text in a CSV file. The first column is the long-form source text from the knowledge document, such as a book chapter or a markdown section. The long-form source text is difficult to search. The second column is a \"search-friendly\" summary of the source text. It could contain a list of questions that can be answered by the first column source text. We will create a vector snapshot where each vector is computed from the summary text (second column), but the retrieved source text for that vector is from the first column. The snapshot file can then be loaded by a Gaia node as its knowledge base. We have a simple Python script to build properly formatted CSV files from a set of articles or chapters. See how it works.","s":"Knowledge base from source / summary pairs","u":"/1.0.0/creator-guide/knowledge/csv","h":"","p":567},{"i":570,"t":"Install the WasmEdge Runtime, the cross-platform LLM runtime. curl -sSf https://raw.githubusercontent.com/WasmEdge/WasmEdge/master/utils/install_v2.sh | bash -s Download an embedding model. curl -LO https://huggingface.co/gaianet/Nomic-embed-text-v1.5-Embedding-GGUF/resolve/main/nomic-embed-text-v1.5.f16.gguf The embedding model is a special kind of LLM that turns sentences into vectors. The vectors can then be stored in a vector database and searched later. When the sentences are from a body of text that represents a knowledge domain, that vector database becomes our RAG knowledge base.","s":"Prerequisites","u":"/1.0.0/creator-guide/knowledge/csv","h":"#prerequisites","p":567},{"i":572,"t":"By default, we use Qdrant as the vector database. You can start a Qdrant instance by starting a Gaia node with a knowledge snapshot. note Or, you can start a Qdrant server using Docker. The following command starts it in the background. mkdir qdrant_storage mkdir qdrant_snapshots nohup docker run -d -p 6333:6333 -p 6334:6334 \\ -v $(pwd)/qdrant_storage:/qdrant/storage:z \\ -v $(pwd)/qdrant_snapshots:/qdrant/snapshots:z \\ qdrant/qdrant","s":"Start a vector database","u":"/1.0.0/creator-guide/knowledge/csv","h":"#start-a-vector-database","p":567},{"i":574,"t":"Delete the default collection if it exists. curl -X DELETE 'http://localhost:6333/collections/default' Create a new collection called default. Notice that it is 768 dimensions. That is the output vector size of the embedding model nomic-embed-text-v1.5. If you are using a different embedding model, you should use a dimension that fits the model. curl -X PUT 'http://localhost:6333/collections/default' \\ -H 'Content-Type: application/json' \\ --data-raw '{ \"vectors\": { \"size\": 768, \"distance\": \"Cosine\", \"on_disk\": true } }' Download a program to create embeddings from the CSV file. curl -LO https://github.com/GaiaNet-AI/embedding-tools/raw/main/csv_embed/csv_embed.wasm You can check out the Rust source code here and modify it if you need to use a different CSV layout. Next, you can run the program by passing a collection name, vector dimension, and the CSV document. The --ctx_size option matches the embedding model's context window size, which in this case is 8192 tokens allowing it to process long sections of text. Make sure that Qdrant is running on your local machine. The model is preloaded under the name embedding. The wasm app then uses the embedding model to create the 768-dimension vectors from paris.csv and saves them into the default collection. curl -LO https://huggingface.co/datasets/gaianet/paris/raw/main/paris.csv wasmedge --dir .:. \\ --nn-preload embedding:GGML:AUTO:nomic-embed-text-v1.5.f16.gguf \\ csv_embed.wasm embedding default 768 paris.csv --ctx_size 8192","s":"Create the vector collection snapshot","u":"/1.0.0/creator-guide/knowledge/csv","h":"#create-the-vector-collection-snapshot","p":567},{"i":576,"t":"You can pass the following options to the program. Using -c or --ctx_size to specify the context size of the input. This defaults to 512. Using -m or --maximum_context_length to specify a context length in the CLI argument. That is to truncate and warn for each text segment that goes above the context length. Using -s or --start_vector_id to specify the start vector ID in the CLI argument. This will allow us to run this app multiple times on multiple documents on the same vector collection. Example: the above example but to append the London guide to the end of an existing collection starting from index 42. wasmedge --dir .:. \\ --nn-preload embedding:GGML:AUTO:nomic-embed-text-v1.5.f16.gguf \\ csv_embed.wasm embedding default 768 london.csv -c 8192 -l 1 -s 42","s":"Options","u":"/1.0.0/creator-guide/knowledge/csv","h":"#options","p":567},{"i":578,"t":"You can create a snapshot of the collection, which can be shared and loaded into a different Qdrant database. You can find the snapshot file in the qdrant_snapshots directory, or the ~/gaianet/qdrant/snapshots directory in the Gaia node. curl -X POST 'http://localhost:6333/collections/default/snapshots' We also recommend you to compress the snapshot file. tar czvf my.snapshot.tar.gz my.snapshot Finally, upload the my.snapshot.tar.gz file to Huggingface so that the Gaia node can download and use it.","s":"Create a vector snapshot","u":"/1.0.0/creator-guide/knowledge/csv","h":"#create-a-vector-snapshot","p":567},{"i":580,"t":"Start a new Gaia node Customize the Gaia node Have fun!","s":"Next steps","u":"/1.0.0/creator-guide/knowledge/csv","h":"#next-steps","p":567},{"i":582,"t":"The popular llama.cpp tool comes with a finetune utility. It works well on CPUs! This fine-tune guide is reproduced with permission from Tony Yuan's Finetune an open-source LLM for the chemistry subject project.","s":"llama.cpp","u":"/1.0.0/creator-guide/finetune/llamacpp","h":"","p":581},{"i":584,"t":"The finetune utility in llama.cpp can work with quantized GGUF files on CPUs, and hence dramatically reducing the hardware requirements and expenses for fine-tuning LLMs. Check out and download the llama.cpp source code. git clone https://github.com/ggerganov/llama.cpp cd llama.cpp Build the llama.cpp binary. mkdir build cd build cmake .. cmake --build . --config Release If you have NVIDIA GPU and CUDA toolkit installed, you should build llama.cpp with CUDA support. mkdir build cd build cmake .. -DLLAMA_CUBLAS=ON -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc cmake --build . --config Release","s":"Build the fine-tune utility from llama.cpp","u":"/1.0.0/creator-guide/finetune/llamacpp","h":"#build-the-fine-tune-utility-from-llamacpp","p":581},{"i":586,"t":"We are going to use Meta's Llama2 chat 13B model as the base model. Note that we are using a Q5 quantized GGUF model file directly to save computing resources. You can use any of the Llama2 compatible GGUF models on Hugging Face. cd .. # change to the llama.cpp directory cd models/ curl -LO https://huggingface.co/gaianet/Llama-2-13B-Chat-GGUF/resolve/main/llama-2-13b-chat.Q5_K_M.gguf","s":"Get the base model","u":"/1.0.0/creator-guide/finetune/llamacpp","h":"#get-the-base-model","p":581},{"i":588,"t":"Next we came up with 1700+ pairs of QAs for the chemistry subject. It is like the following in a CSV file. Question Answer What is unique about hydrogen? It's the most abundant element in the universe, making up over 75% of all matter. What is the main component of Jupiter? Hydrogen is the main component of Jupiter and the other gas giant planets. Can hydrogen be used as fuel? Yes, hydrogen is used as rocket fuel. It can also power fuel cells to generate electricity. What is mercury's atomic number? The atomic number of mercury is 80 What is Mercury? Mercury is a silver colored metal that is liquid at room temperature. It has an atomic number of 80 on the periodic table. It is toxic to humans. We used GPT-4 to help me come up many of these QAs. Then, we wrote a Python script to convert each row in the CSV file into a sample QA in the Llama2 chat template format. Notice that each QA pair starts with as an indicator for the fine-tune program to start a sample. The result train.txt file can now be used in fine-tuning. Put the train.txt file in the llama.cpp/models directory with the GGUF base model.","s":"Create a question and answer set for fine-tuning","u":"/1.0.0/creator-guide/finetune/llamacpp","h":"#create-a-question-and-answer-set-for-fine-tuning","p":581},{"i":590,"t":"Use the following command to start the fine-tuning process on your CPUs. I am putting it in the background so that it can run continuously now. It could take several days or even a couple of weeks depending on how many CPUs you have. nohup ../build/bin/finetune --model-base llama-2-13b-chat.Q5_K_M.gguf --lora-out lora.bin --train-data train.txt --sample-start '' --adam-iter 1024 & You can check the process every few hours in the nohup.out file. It will report the loss for each iteration. You can stop the process when the loss goes consistently under 0.1. Note 1 If you have multiple CPUs (or CPU cores), you can speed up the fine-tuning process by adding a -t parameter to the above command to use more threads. For example, if you have 60 CPU cores, you could do -t 60 to use all of them. Note 2 If your fine-tuning process is interrupted, you can restart it from checkpoint-250.gguf. The next file it outputs is checkpoint-260.gguf. nohup ../build/bin/finetune --model-base llama-2-13b-chat.Q5_K_M.gguf --checkpoint-in checkpoint-250.gguf --lora-out lora.bin --train-data train.txt --sample-start '' --adam-iter 1024 &","s":"Finetune!","u":"/1.0.0/creator-guide/finetune/llamacpp","h":"#finetune","p":581},{"i":592,"t":"The fine-tuning process updates several layers of the LLM's neural network. Those updated layers are saved in a file called lora.bin and you can now merge them back to the base LLM to create the new fine-tuned LLM. ../build/bin/export-lora --model-base llama-2-13b-chat.Q5_K_M.gguf --lora lora.bin --model-out chemistry-assistant-13b-q5_k_m.gguf The result is this file. curl -LO https://huggingface.co/juntaoyuan/chemistry-assistant-13b/resolve/main/chemistry-assistant-13b-q5_k_m.gguf Note 3 If you want to use a checkpoint to generate a lora.bin file, use the following command. This is needed when you believe the final lora.bin is an overfit. ../build/bin/finetune --model-base llama-2-13b-chat.Q5_K_M.gguf --checkpoint-in checkpoint-250.gguf --only-write-lora --lora-out lora.bin","s":"Merge","u":"/1.0.0/creator-guide/finetune/llamacpp","h":"#merge","p":581},{"i":594,"t":"You could fine-tune an open-source LLM to Teach it to follow conversations. Teach it to respect and follow instructions. Make it refuse to answer certain questions. Give it a specific \"speaking\" style. Make it response in certain formats (e.g., JSON). Give it focus on a specific domain area. Teach it certain knowledge. To do that, you need to create a set of question and answer pairs to show the model the prompt and the expected response. Then, you can use a fine-tuning tool to perform the training and make the model respond the expected answer for each question.","s":"Fine-tune LLMs","u":"/1.0.0/creator-guide/finetune/intro","h":"","p":593},{"i":596,"t":"In this section, we will discuss how to create a vector collection snapshot from a Web URL. First, we will parse the URL to a structured markdown file. Then, we will follow the steps from Knowledge base from a markdown file to create embedding for your URL.","s":"Knowledge base from a URL","u":"/1.0.0/creator-guide/knowledge/firecrawl","h":"","p":595},{"i":598,"t":"Firecrawl can crawl and convert any website into LLM-ready markdown or structured data. It also supports crawling a URL and all accessible subpages. To use Firecrawl, you need to sign up on Firecrawl and get an API key. First, install the dependencies. We are assuming that you already have Node.JS 20+ installed. git clone https://github.com/JYC0413/firecrawl-integration.git cd firecrawl-integration npm install Then, export the API key in the terminal. export FIRECRAWL_KEY=\"your_api_key_here\" next, we can use the following command line to run the service. node crawlWebToMd.js After the application is running successfully, you will see the prompt appear on the Terminal. You can type your URL in the terminal right now. Here we have two choices. Multiple pages: input your link with / at the end, the program will crawl and convert the page and its subpages to one single markdown file. This way will cost lots of API token usage. One single page: input your link without / at the end. the program will crawl and convert the current page to one single markdown file. The output markdown file will be located in this folder named output.md.","s":"Parse the URL content to a markdown file","u":"/1.0.0/creator-guide/knowledge/firecrawl","h":"#parse-the-url-content-to-a-markdown-file","p":595},{"i":600,"t":"Please follow the tutorial Knowledge base from a markdown file to convert your markdown file to a snapshot of embeddings that can be imported into a GaiaNet node.","s":"Create embeddings from the markdown files","u":"/1.0.0/creator-guide/knowledge/firecrawl","h":"#create-embeddings-from-the-markdown-files","p":595},{"i":602,"t":"In this section, we will discuss how to create a vector collection snapshot from a markdown file. The snapshot file can then be loaded by a Gaia node as its knowledge base. The markdown file is segmented into multiple sections by headings. See an example. Each section is turned into a vector, and when retrieved, added to the prompt context for the LLM.","s":"Knowledge base from a markdown file","u":"/1.0.0/creator-guide/knowledge/markdown","h":"","p":601},{"i":604,"t":"Install the WasmEdge Runtime, the cross-platform LLM runtime. curl -sSf https://raw.githubusercontent.com/WasmEdge/WasmEdge/master/utils/install_v2.sh | bash -s Download an embedding model. curl -LO https://huggingface.co/gaianet/Nomic-embed-text-v1.5-Embedding-GGUF/resolve/main/nomic-embed-text-v1.5.f16.gguf The embedding model is a special kind of LLM that turns sentences into vectors. The vectors can then be stored in a vector database and searched later. When the sentences are from a body of text that represents a knowledge domain, that vector database becomes our RAG knowledge base.","s":"Prerequisites","u":"/1.0.0/creator-guide/knowledge/markdown","h":"#prerequisites","p":601},{"i":606,"t":"By default, we use Qdrant as the vector database. You can start a Qdrant instance by starting a Gaia node with a knowledge snapshot. note Or, you can start a Qdrant server using Docker. The following command starts it in the background. mkdir qdrant_storage mkdir qdrant_snapshots nohup docker run -d -p 6333:6333 -p 6334:6334 \\ -v $(pwd)/qdrant_storage:/qdrant/storage:z \\ -v $(pwd)/qdrant_snapshots:/qdrant/snapshots:z \\ qdrant/qdrant","s":"Start a vector database","u":"/1.0.0/creator-guide/knowledge/markdown","h":"#start-a-vector-database","p":601},{"i":608,"t":"Delete the default collection if it exists. curl -X DELETE 'http://localhost:6333/collections/default' Create a new collection called default. Notice that it is 768 dimensions. That is the output vector size of the embedding model nomic-embed-text-v1.5. If you are using a different embedding model, you should use a dimension that fits the model. curl -X PUT 'http://localhost:6333/collections/default' \\ -H 'Content-Type: application/json' \\ --data-raw '{ \"vectors\": { \"size\": 768, \"distance\": \"Cosine\", \"on_disk\": true } }' Download a program to segment the markdown document and create embeddings. curl -LO https://github.com/GaiaNet-AI/embedding-tools/raw/main/markdown_embed/markdown_embed.wasm It chunks the document based on markdown sections. You can check out the Rust source code here and modify it if you need to use a different chunking strategy. Next, you can run the program by passing a collection name, vector dimension, and the source document. You can pass in the desired markdown heading level for chunking using the --heading_level option. The --ctx_size option matches the embedding model's context window size, which in this case is 8192 tokens allowing it to process long sections of text. Make sure that Qdrant is running on your local machine. The model is preloaded under the name embedding. The wasm app then uses the embedding model to create the 768-dimension vectors from paris.md and saves them into the default collection. curl -LO https://huggingface.co/datasets/gaianet/paris/raw/main/paris.md wasmedge --dir .:. \\ --nn-preload embedding:GGML:AUTO:nomic-embed-text-v1.5.f16.gguf \\ markdown_embed.wasm embedding default 768 paris.md --heading_level 1 --ctx_size 8192","s":"Create the vector collection snapshot","u":"/1.0.0/creator-guide/knowledge/markdown","h":"#create-the-vector-collection-snapshot","p":601},{"i":610,"t":"You can pass the following options to the program. Using -c or --ctx_size to specify the context size of the input. This defaults to 512. Using -l or --heading_level to specify the markdown heading level for each vector. This defaults to 1. Using -m or --maximum_context_length to specify a context length in the CLI argument. That is to truncate and warn for each text segment that goes above the context length. Using -s or --start_vector_id to specify the start vector ID in the CLI argument. This will allow us to run this app multiple times on multiple documents on the same vector collection. Example: the above example but to append the London guide to the end of an existing collection starting from index 42. wasmedge --dir .:. \\ --nn-preload embedding:GGML:AUTO:nomic-embed-text-v1.5.f16.gguf \\ markdown_embed.wasm embedding default 768 london.md -c 8192 -l 1 -s 42","s":"Options","u":"/1.0.0/creator-guide/knowledge/markdown","h":"#options","p":601},{"i":612,"t":"You can create a snapshot of the collection, which can be shared and loaded into a different Qdrant database. You can find the snapshot file in the qdrant_snapshots directory, or the ~/gaianet/qdrant/snapshots directory in the Gaia node. curl -X POST 'http://localhost:6333/collections/default/snapshots' We also recommend you to compress the snapshot file. tar czvf my.snapshot.tar.gz my.snapshot Finally, upload the my.snapshot.tar.gz file to Huggingface so that the Gaia node can download and use it.","s":"Create a vector snapshot","u":"/1.0.0/creator-guide/knowledge/markdown","h":"#create-a-vector-snapshot","p":601},{"i":614,"t":"Start a new Gaia node Customize the Gaia node Have fun!","s":"Next steps","u":"/1.0.0/creator-guide/knowledge/markdown","h":"#next-steps","p":601},{"i":616,"t":"In this section, we will discuss how to create a vector collection snapshot from a plain text file. The snapshot file can then be loaded by a Gaia node as its knowledge base. The text file is segmented into multiple chunks by blank lines. See an example. Each chunk is turned into a vector, and when retrieved, added to the prompt context for the LLM.","s":"Knowledge base from a plain text file","u":"/1.0.0/creator-guide/knowledge/text","h":"","p":615},{"i":618,"t":"Install the WasmEdge Runtime, the cross-platform LLM runtime. curl -sSf https://raw.githubusercontent.com/WasmEdge/WasmEdge/master/utils/install_v2.sh | bash -s Download an embedding model. curl -LO https://huggingface.co/gaianet/Nomic-embed-text-v1.5-Embedding-GGUF/resolve/main/nomic-embed-text-v1.5.f16.gguf The embedding model is a special kind of LLM that turns sentences into vectors. The vectors can then be stored in a vector database and searched later. When the sentences are from a body of text that represents a knowledge domain, that vector database becomes our RAG knowledge base.","s":"Prerequisites","u":"/1.0.0/creator-guide/knowledge/text","h":"#prerequisites","p":615},{"i":620,"t":"By default, we use Qdrant as the vector database. You can start a Qdrant instance by starting a Gaia node with a knowledge snapshot. note Or, you can start a Qdrant server using Docker. The following command starts it in the background. mkdir qdrant_storage mkdir qdrant_snapshots nohup docker run -d -p 6333:6333 -p 6334:6334 \\ -v $(pwd)/qdrant_storage:/qdrant/storage:z \\ -v $(pwd)/qdrant_snapshots:/qdrant/snapshots:z \\ qdrant/qdrant","s":"Start a vector database","u":"/1.0.0/creator-guide/knowledge/text","h":"#start-a-vector-database","p":615},{"i":622,"t":"Delete the default collection if it exists. curl -X DELETE 'http://localhost:6333/collections/default' Create a new collection called default. Notice that it is 768 dimensions. That is the output vector size of the embedding model nomic-embed-text-v1.5. If you are using a different embedding model, you should use a dimension that fits the model. curl -X PUT 'http://localhost:6333/collections/default' \\ -H 'Content-Type: application/json' \\ --data-raw '{ \"vectors\": { \"size\": 768, \"distance\": \"Cosine\", \"on_disk\": true } }' Download a program to chunk a document and create embeddings. curl -LO https://github.com/GaiaNet-AI/embedding-tools/raw/main/paragraph_embed/paragraph_embed.wasm It chunks the document based on empty lines. So, you MUST prepare your source document this way -- segment the document into sections of around 200 words with empty lines. You can check out the Rust source code here and modify it if you need to use a different chunking strategy. The paragraph_embed.wasm program would NOT break up code listings even if there are empty lines with in the listing. Next, you can run the program by passing a collection name, vector dimension, and the source document. Make sure that Qdrant is running on your local machine. The model is preloaded under the name embedding. The wasm app then uses the embedding model to create the 768-dimension vectors from paris_chunks.txt and saves them into the default collection. curl -LO https://huggingface.co/datasets/gaianet/paris/raw/main/paris_chunks.txt wasmedge --dir .:. \\ --nn-preload embedding:GGML:AUTO:nomic-embed-text-v1.5.f16.gguf \\ paragraph_embed.wasm embedding default 768 paris_chunks.txt -c 8192","s":"Create the vector collection snapshot","u":"/1.0.0/creator-guide/knowledge/text","h":"#create-the-vector-collection-snapshot","p":615},{"i":624,"t":"You can pass the following options to the program. Using -m or --maximum_context_length to specify a context length in the CLI argument. That is to truncate and warn for each text segment that goes above the context length. Using -s or --start_vector_id to specify the start vector ID in the CLI argument. This will allow us to run this app multiple times on multiple documents on the same vector collection. Using -c or --ctx_size to specify the context size of the input. This defaults to 512. Example: the above example but to append the London guide to the end of an existing collection starting from index 42. wasmedge --dir .:. \\ --nn-preload embedding:GGML:AUTO:nomic-embed-text-v1.5.f16.gguf \\ paragraph_embed.wasm embedding default 768 london.txt -c 8192 -s 42","s":"Options","u":"/1.0.0/creator-guide/knowledge/text","h":"#options","p":615},{"i":626,"t":"You can create a snapshot of the collection, which can be shared and loaded into a different Qdrant database. You can find the snapshot file in the qdrant_snapshots directory, or the ~/gaianet/qdrant/snapshots directory in the Gaia node. curl -X POST 'http://localhost:6333/collections/default/snapshots' We also recommend you to compress the snapshot file. tar czvf my.snapshot.tar.gz my.snapshot Finally, upload the my.snapshot.tar.gz file to Huggingface so that the Gaia node can download and use it.","s":"Create a vector snapshot","u":"/1.0.0/creator-guide/knowledge/text","h":"#create-a-vector-snapshot","p":615},{"i":628,"t":"Start a new Gaia node Customize the Gaia node Have fun!","s":"Next steps","u":"/1.0.0/creator-guide/knowledge/text","h":"#next-steps","p":615},{"i":630,"t":"In this section, we will discuss how to create a vector collection snapshot from a PDF file. First, we will parse the unstructured PDF file to a structured markdown file. Then, we will follow the steps from Knowledge base from a markdown file to create embedding for your PDF files.","s":"Knowledge base from a PDF file","u":"/1.0.0/creator-guide/knowledge/pdf","h":"","p":629},{"i":633,"t":"LlamaParse is a tool to parse files for optimal RAG. You will need a LlamaCloud key from https://cloud.llamaindex.ai. First, install the dependencies. we are assuming that you already have Node.JS 20+ installed. git clone https://github.com/alabulei1/llamaparse-integration.git cd llamaparse-integration npm install llamaindex npm install dotenv Then, edit the .env file to set up the PDF file path and LlamaCloud Key. In this case, you don't need to care about the LLM-related settings. After that, run the following command line to parse your pdf into a markdown file. npx tsx transMd.ts The output markdown file will be located in this folder named output.md by default. You can change the path in the .env file.","s":"Tool #1: LlamaParse","u":"/1.0.0/creator-guide/knowledge/pdf","h":"#tool-1-llamaparse","p":629},{"i":635,"t":"GPTPDF is an open-source tool using GPT-4o to parse PDF into markdown. You will need an OpenAI key here. First, install the gptpdf software. pip install gptpdf Then, enter the Python environment. python Next, use the following command to parse your pdf. from gptpdf import parse_pdf api_key = 'Your OpenAI API Key' content, image_paths = parse_pdf(Your_Pdf_Path, api_key=api_key) print(content) The output markdown files called output.md will be located in your root directory.","s":"Tool #2: GPTPDF","u":"/1.0.0/creator-guide/knowledge/pdf","h":"#tool-2-gptpdf","p":629},{"i":637,"t":"Please follow the tutorial Knowledge base from a markdown file to convert your markdown file to a snapshot of embeddings that can be imported into a GaiaNet node.","s":"Create embeddings from the markdown files","u":"/1.0.0/creator-guide/knowledge/pdf","h":"#create-embeddings-from-the-markdown-files","p":629},{"i":639,"t":"This guide provides all the information you need to quickly set up and run a Gaia Domain. Note: Ensure that you are the owner of a Gaia Domain Name before proceeding. You can verify your Gaia Domain Name in the \"Assets\" section of your profile. Gaia simplifies the process for domain operators to launch and host a Gaia Domain service in just a few clicks.","s":"Quick Start with Launching Gaia Domain","u":"/1.0.0/domain-guide/quick-start","h":"","p":638},{"i":641,"t":"Access the Create Gaia Domain Page Click LAUNCH DOMAIN in the \"Domain\" or \"Assets\" section under your profile. This will take you to the Create Gaia Domain page. Fill in Domain Details Enter the general information for your domain, including: Domain profile Domain Name Description System Prompt Choose a Gaia Domain Name Select a Gaia domain name from your assets. Select a Supplier Currently, Gaia Cloud is the only supplier. Pick a Gaia Domain Tier Choose a tier to enhance your domain's rewards, which is necessary. Configure Server and Management Options Confirm the server configuration for running your domain. Set management preferences, such as whether nodes can join automatically and the specific LLM to use. After completing these six steps, your Gaia Domain will be successfully launched and other nodes can join your domain.","s":"Steps to Launch Your Gaia Domain","u":"/1.0.0/domain-guide/quick-start","h":"#steps-to-launch-your-gaia-domain","p":638},{"i":644,"t":"Specialized, finetuned and RAG-enhanced open-source Large Language Models are key elements in emerging AI agent applications. However, those agent apps also present unique challenges to the traditional cloud computing and SaaS infrastructure, including new requirements for application portability, virtualization, security isolation, costs, data privacy, and ownership. GaiaNet is a decentralized computing infrastructure that enables everyone to create, deploy, scale, and monetize their own AI agents that reflect their styles, values, knowledge, and expertise. A GaiaNet node consists of a high-performance and cross-platform application runtime, a finetuned LLM, a knowledge embedding model, a vector database, a prompt manager, an open API server, and a plugin system for calling external tools and functions using LLM outputs. It can be deployed by any knowledge worker as a digital twin and offered as a web API service. A new class of tradeable assets and a marketplace could be created from individualized knowledge bases and components. Similar GaiaNet nodes are organized into GaiaNet domains, which offer trusted and reliable AI agent services to the public. The GaiaNet node and domains are governed by the GaiaNet DAO (Decentralized Autonomous Organization). Through Purpose Bound Money smart contracts, the GaiaNet network is a decentralized marketplace for AI agent services.","s":"Abstract","u":"/1.0.0/litepaper","h":"#abstract","p":642},{"i":646,"t":"The emergence of ChatGPT and Large Language Model (LLM) has revolutionized how humans produce and consume knowledge. Within a year, AI-native applications have evolved from chatbots to copilots, to agents. AI agents would increasingly evolve from supportive tools (akin to Copilots) to autonomous entities capable of completing tasks independently. — Dr. Andrew Ng at Sequoia Capital AI Ascent 2024 Summit Agents are software applications that can complete tasks on its own autonomously like a human. The agent can understand the task, plan the steps to complete the task, execute all the steps, handle errors and exceptions, and deliver the results. While a powerful LLM could act as the “brain” for the agent, we need to connect to external data sources (eyes and ears), domain-specific knowledge base and prompts (skills), context stores (memory), and external tools (hands). For agent tasks, we often need to customize the LLM itself to reduce hallucinations in a specific domain. to generate responses in a specific format (e.g., a JSON schema). to answer “politically incorrect” questions (e.g., to analyze CVE exploits for an agent in the security domain). and to answer requests in a specific style (e.g., to mimic a person). Agents are complex software that require significant amount of engineering and resources. Today, most agents are close-source and hosted on SaaS-based LLMs. Popular examples include GPTs and Microsoft/GitHub copilots on OpenAI LLMs, and Duet on Google’s Gemini LLMs. However, as we discussed, a key requirement for agents is to customize and adapt its underlying LLM and software stack for domain-specific tasks — an area where centralized SaaS perform very poorly. For example, with ChatGPT, every small task must be handled by a very large model. It is also enormously expensive to fine-tune or modify any ChatGPT models. The one-size-fits-all LLMs are detrimental to the agent use case in capabilities, alignment, and cost structure. Furthermore, the SaaS hosted LLMs lack privacy controls on how the agent’s private knowledge might be used and shared. Because of these shortcomings, it is difficult for individual knowledge workers to create and monetize agents for his or her own domain and tasks on SaaS platforms like OpenAI, Google, Anthropic, Microsoft and AWS. In this paper, we propose a decentralized software platform and protocol network for AI agents for everyone. Specifically, our goals are two-folds. Goal #1: Empower individuals to incorporate his/her private knowledge and expertise into personal LLM agent apps. Those apps aim to perform knowledge tasks and use tools just as the individual would, but also reflect the individual’s style and values. Goal #2: Enable individuals to provide and scale their LLM agents as services, and get compensated for their expertise and work. GaiaNet is “YouTube for knowledge and skills.”","s":"Introduction","u":"/1.0.0/litepaper","h":"#introduction","p":642},{"i":648,"t":"As of April 2024, there are over 6000 open-source LLMs published on Hugging face. Compared with close-source LLMs, such as GPT-4, open-source LLMs offer advantages in privacy, cost, and systematic bias. Even with general QA performance, open-source LLMs are closing the gap with close-source counterparties quickly. For AI agent use cases, it has been demonstrated that smaller but task-specific LLMs often outperform larger general models. However, it is difficult for individuals and businesses to deploy and orchestrate multiple finetuned LLMs on their own heterogeneous GPU infrastructure. The complex software stack for agents, as well as the complex interaction with external tools, are fragile and error-prone. Furthermore, LLM agents have entirely different scaling characteristics than past application servers. LLM is extremely computationally intensive. A LLM agent server can typically only serve one user at a time, and it often blocks for seconds at a time. The scaling need is no longer to handle many async requests on a single server, but to load balance among many discrete servers on the internet scale. The GaiaNet project provides a cross-platform and highly efficient SDK and runtime for finetuned open-source LLMs with proprietary knowledge bases, customized prompts, structured responses, and external tools for function calling. A GaiaNet node can be started in minutes on any personal, cloud, or edge device. It can then offer services through an incentivized web3 network.","s":"Open-source and decentralization","u":"/1.0.0/litepaper","h":"#open-source-and-decentralization","p":642},{"i":650,"t":"The basic operational unit in the GaiaNet network is a node. A GaiaNet node is a streamlined software stack that allows any technically competent person to run an AI agent of his own. The software stack on the GaiaNet node consists of the following 7 key components. 1 Application runtime. GaiaNet applications run in a lightweight, secure and high-performance sandbox called WasmEdge. As an open-source project managed by the Linux Foundation and CNCF, WasmEdge runtime works seamlessly with leading cloud native tools such as Docker, containerd, CRI-O, Podman and Kubernetes. It is also the virtual machine of choice by leading public blockchains to securely and efficiently execute on-chain and off-chain smart contracts. WasmEdge is a high-performance and cross-platform runtime. It can run AI models on almost all CPUs, GPUs, and AI accelerators at native speed, making it an ideal runtime for decentralized AI agents. 2 Finetuned LLM. The GaiaNet node supports almost all open-source LLMs, multimodal models (eg Large Vision Models or LVMs), text-to-image models (eg Stable Diffusion) and text-to-video models. That includes all finetuned models using personal or proprietary data. The node owner can finetune open-source models using a wide variety of tools. For example, the node owner can finetune an LLM using personal chat histories so that the finetuned LLM can mimic his own speaking style. He can also finetune an LLM to focus it on a specific knowledge domain to reduce hallucinations and improve answer quality for questions in that domain. A finetuned LLM can guarantee to output JSON text that matches a pre-determined schema for use with external tools. Besides LLMs, the node owner could finetune Stable Diffusion models with her own photos to generate images that look like her. 3 Embedding model. The GaiaNet node needs to manage a body of public or proprietary knowledge for the AI agent. It is a key feature that enables the agent to specialize and outperform much larger models in a specific domain. The embedding models are specially trained LLMs that turns input sentences into a vector representation, instead of generating completions. Since the embedding models are trained from LLMs, they can “embed” the “meaning” of the sentences into the vectors so that similar sentences are located close together in the high dimensional space occupied by those vectors. With the embedding model, a GaiaNet node can ingest a body of text, images, PDFs, web links, audio and video files, and generate a collection of embedding vectors based on their contents. The embedding model also turns user questions and conversations into vectors, which allows the GaiaNet node to quickly identify contents in its knowledge base that are relevant to the current conversation. 4 Vector database. The embedding vectors that form GaiaNet node’s knowledge base are stored on the node itself for optimal performance and maximum privacy. The GaiaNet node includes a Qdrant vector database. 5 Custom prompts. Besides finetuning and knowledge arguments, the easiest way to customize an LLM for new applications is simply to prompt it. Like humans, LLMs are remarkable one-shot learners. You can simply give it an example of how to accomplish a task, and it will learn and do similar tasks on its own. Prompt engineering is a practical field to research and develop such prompts. Furthermore, effective prompts could be highly dependent on the model in use. A prompt that works well for a large model, such as Mixtral 8x22b, is probably not going to work well for a small model like Mistral 7b. The GaiaNet node can support several different prompts that are dynamically chosen and used in applications. For example, The system_prompt is a general introduction to the agent task the node is supposed to perform. It often contains a persona to help the LLM respond with the right tone. For example, the system_prompt for a college teaching assistant could be: “You are a teaching assistant for UC Berkeley’s computer science 101 class. Please explain concepts and answer questions in detail. Do not answer any question that is not related to math or computer science.” The rag_prompt is a prefix prompt to be dynamically inserted in front of knowledge base search results in an RAG chat. It could be something like this: “Please answer the question based on facts and opinions in the context below. Do not make anything that is not in the context. ---------” The LLM community has developed many useful prompts for different application use cases. GaiaNet node allows you to easily manage and experiment with them. Through the our developer SDK, GaiaNet owners and operators could customize the logic of dynamic prompt generation in their own way. For example, a GaiaNet node could perform a Google search for any user question, and add the search results into the prompt as context. 6 Function calls and tool use. The LLM not only is great at generating human language, but also excels at generating machine instructions. Through finetuning and prompt engineering, we could get some LLMs to consistently generate structured JSON objects or computer code in many language tasks, such as summarizing and extracting key elements from a paragraph of text. The GaiaNet node allows you to specify the output format of the generated text. You can give it a grammar specification file to enforce that responses will always conform to a pre-defined JSON schema. Once the LLM returns a structured JSON response, the agent typically need to pass the JSON to a tool that performs the task and comes back with an answer. For example, the user question might be. What is the weather like in Singapore? The LLM generates the following JSON response. {\"tool\":\"get_current_weather\", \"location\":\"Singapore\",\"unit\":\"celsius\"} The GaiaNet node must know what is the tool associated with get_current_weather and then invoke it. GaiaNet node owners and operators can configure any number of external tools by mapping a tool name with a web service endpoint. In the above example, the get_current_weather tool might be associated with a web service that takes this JSON data. The GaiaNet node sends the JSON to the web service endpoint via HTTPS POST and receives an answer. 42 It then optionally feeds the answer to the LLM to generate a human language answer. The current weather in Singapore is 42C. Through the GaiaNet node SDK, developers are not limited to using web services. They can write plugins to process LLM responses locally on the node. For example, the LLM might return Python code, which can be executed locally in a sandbox and for the GaiaNet node to perform a complex operation. 7 The API server. All GaiaNet nodes must have the same API for questions and answers. That allows front-end applications to work with, and potentially be load-balanced to any GaiaNet node. We choose to support the OpenAI API specification, which enables GaiaNet nodes to become drop-in replacements for OpenAI API endpoints for a large ecosystem of applications. The API server runs securely and cross-platform on the WasmEdge runtime. It ties together all the other components in the GaiaNet node. It receives user requests, generates an embedding from the request, searches the vector database, adds search results to the prompt context, generates an LLM response, and then optionally uses the response to perform function calling. The API server also provides a web-based chatbot UI for users to chat with the RAG-enhanced finetuned LLM on the node.","s":"GaiaNet node","u":"/1.0.0/litepaper","h":"#gaianet-node","p":642},{"i":652,"t":"While each GaiaNet node is already a powerful AI agent capable of answering complex questions and performing actions, individual nodes are not suitable for providing public services. There are several important reasons. For the public consumers and users, it is very hard to judge the trustworthiness of individual GaiaNet nodes. Harmful misinformation could be spread by malicious node operators. For GaiaNet node owners and operators, there is no economic incentive to provide such services to the public, which could be very costly to run. The AI agent servers have very different scaling characteristics than traditional internet application servers. When the agent is processing a user request, it typically takes up all the computing resources on the hardware. Instead of using software to scale concurrent users on a single server, the challenge of GaiaNet is to scale to many different identical nodes for a large application. Those challenges have given rise to the GaiaNet domain, which forms the basis of the GaiaNet web3 network. A GaiaNet domain is a collection of GaiaNet nodes available under a single Internet domain name. The domain operator decides which GaiaNet nodes can be registered under the domain and makes the node services available to the public. For example, a GaiaNet domain might be a Computer Science teaching assistant for UC Berkeley. The domain could provide services through https://cs101.gaianet.berkeley.edu. The domain operator needs to do the following. Verify and admit individual nodes to be registered under the domain. Those nodes must all meet requirements, such as the LLM, knowledge base, and prompts, set by the domain operator to ensure service quality. The node registration on a domain could be done via a whitelist or blacklist. It is up to the domain operator. Monitor each node’s performance at real time and remove inactive ones. Promotes the “teaching assistant” chatbot apps to the target audience. Set the price for the API services. Load balance between active nodes. Getting paid by users. Pay nodes for their services. Each GaiaNet node has an unique node ID in the form of an ETH address. The private key associated with the ETH address is stored on the node. Once a node is successfully registered with a domain, it is entitled to receive payments from both service revenue and network awards from the domain. The domain could send payments directly to the node's ETH address. Or, the domain could provide a mechanism for a node operator to register multiple nodes under a single Metamask address, such as signing a challenge phrase using the node private keys. In that case, the node operator will receive aggregated payments in his Metamask account for all associated nodes. Each GaiaNet domain has an associated smart contract that is used for escrow payments. It is similar to OpenAI’s credit payment model, where users purchase credits first, and then consume them over time. When the user pays into the smart contract, an access token will be automatically issued to him. He uses this token to make API calls to the domain, which is then load-balanced to random nodes in the domain. As the user consumes those services, his fund in the contract depletes and the access token stops working if he no longer has any balance. The pricing and payment of the API service are determined by the domain operator. It is typically denominated in USD stable coins. The domain operator pays a share of the revenue to node operators who provided the services. The GaiaNet network is a decentralized marketplace of agent services. The funds locked in GaiaNet domain contracts are for a single purpose of consuming API services. It is called Purpose Bound Money. A key aspect of the GaiaNet protocol is that the domain operators are “trust providers” in the ecosystem of decentralized nodes. The protocol network is designed to incentivize the trust of the operators through tokenomics designs such as mining and staking. GaiaNet nodes, domains, users, and developers form a DAO to grow the network and benefit all contributors.","s":"GaiaNet network","u":"/1.0.0/litepaper","h":"#gaianet-network","p":642},{"i":654,"t":"The GaiaNet token is a utility token designed to facilitate transactions, support governance, and foster trust in the network. It serves three primary purposes. As a DAO governance token, holders can participate in setting the rules of the network. As a staking token, holders vouch for domain operators’ trustworthiness. Stakers get a cut from the domain operator’s service revenue. But they could also be slashed if the domain operator misbehave, such as spreading misinformation or providing unreliable services. As a payment token, the GaiaNet token could be deposited into the domain’s escrow contract and be used to pay for services over time. The payment utility of the GaiaNet token is designed to balance the network supply and demand. The value of the GaiaNet token asset is determined at the time when it enters or leaves the escrow smart contract based on real-time exchange rates. Service consumers could lock in savings from the potential appreciation of the token. For example, if a user deposits $100 worth of GaiaNet tokens into the contract, and when the domain and nodes get paid, the token value has gone up to $110, he would have received $110 worth of agent services. Conversely, if the token price drops, the service providers (domains and nodes) now have an opportunity to “mine” the tokens on the cheap. If the $100 initial tokens is only worth $90 now, service providers will get more tokens for each unit of electricity and compute they provide. That incentivizes more nodes to join the network and speculate on a later rise in token value. An exercise: OpenAI is projected to reach $5 billion in ARR in 2024. Assume that most enterprise customers pay quarterly, that is $1.25 billion of circulation market cap in addition to OpenAI’s current enterprise value if they were to issue a payment token. The overall AI services market size is projected to reach $2 trillion in a few years. That translates to $500 billion market cap for a payment utility token alone.","s":"GaiaNet token","u":"/1.0.0/litepaper","h":"#gaianet-token","p":642},{"i":656,"t":"GaiaNet is a developer platform to create your agent services. We provide tools for you to do these. Tools to generate finetuning datasets and perform finetuning on CPU and GPU machines. Tools to ingest documents and create vector embeddings for the knowledge base. Rust-based SDK to dynamically generate and manage prompts. Rust-based SDK to extend the agent’s capability for invoking tools and software on the node. For developers who do not wish to operate nodes, we are building a marketplace for finetuned models knowledge bases and datasets function-calling plugins All those components are blockchain-based assets represented by NFTs. A node operator could purchase NFTs for the components it wishes to use, and share service revenue with the component developers. That enables diverse and cashflow-generating assets to be issued from the GaiaNet ecosystem.","s":"Component marketplace for AI assets","u":"/1.0.0/litepaper","h":"#component-marketplace-for-ai-assets","p":642},{"i":658,"t":"GaiaNet provides open-source tools for individuals and teams to create agent services using their proprietary knowledge and skills. Developers could create finetuned LLMs, knowledge collections, and plugins for the agent, and issue assets based on those components. The GaiaNet protocol makes those nodes discoverable and accessible through GaiaNet domains.","s":"Conclusion","u":"/1.0.0/litepaper","h":"#conclusion","p":642},{"i":660,"t":"A key goal of the GaiaNet project is to enable each individual to create and run his or her own agent service node using finetuned LLMs and proprietary knowledge. In all likelihood, you are not going to run a node with the default Llama 3.2 3B LLM and Paris guidebook knowledge base. In this chapter, we will discuss ways to customize your node.","s":"Customize Your GaiaNet Node","u":"/1.0.0/node-guide/customize","h":"","p":659},{"i":662,"t":"All the node configuration options, such as LLM settings, vector collection for the knowledge base, and prompts, are all in the gaianet/config.json file. You can edit this file directly to use your models and vector collections. Or, you can select a different config.json when you initialize the node. Just pass in a URL to the config.json file in your gaianet init command. We have several pre-set config.json files to choose from in this repo. For example, the following command initializes a GaiaNet node with a Llama 3 8B model. gaianet init --config https://raw.githubusercontent.com/GaiaNet-AI/node-configs/main/llama-3-8b-instruct/config.json The URL to the config.json must point to the actual text file. (i.e., the raw.githubusercontent.com URL for GitHub links) instead of the GitHub HTML page for that file.","s":"Pre-set configurations","u":"/1.0.0/node-guide/customize","h":"#pre-set-configurations","p":659},{"i":664,"t":"After you have initialized the node, you can still make changes to its configuration by editing the config.json file directly. But it is easier and safer to use the gaianet CLI to make changes. You MUST run gaianet init again after you make any changes to the node configuration. The following command shows the config.json fields you can make changes to. gaianet config list Now, let's look at some examples.","s":"The config subcommand","u":"/1.0.0/node-guide/customize","h":"#the-config-subcommand","p":659},{"i":666,"t":"There are over 10,000 finetuned open-source LLMs you can choose from on Huggingface. They each have different sizes (larger models are more capable but more expensive to run), unique capabilities (e.g., uncensored, to excel in math or reasoning, to support large context length etc), domain expertise (e.g., medicine, coding), and / or styles (e.g., to speak like a teacher or a pirate, to respond in code, to follow conversations). To replace GaiaNet node's default LLM with an alternative finetuned model, you will need to make changes to the model file, prompt template, and model context length parameters. Those parameters vary depending on the model, but they can be found on the GaiaNet Huggingface organization's model cards. For example, the following command changes the LLM to a Llama 3 8B model. gaianet config \\ --chat-url https://huggingface.co/gaianet/Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q5_K_M.gguf \\ --chat-ctx-size 4096 \\ --prompt-template llama-3-chat The llama 3 8B model requires at least 16GB of RAM. If none of the published finetuned models are perfect for your use case, you can also finetune your own LLM by following these guides. Your GaiaNet node can run your own finetuned models. The --chat-url argument could point to a local file under $HOME/gaianet instead of a public URL. That allows you to use a privately trained or finetuned LLM model file.","s":"Select an LLM","u":"/1.0.0/node-guide/customize","h":"#select-an-llm","p":659},{"i":668,"t":"A key feature of GaiaNet is that users can create and deploy proprietary knowledge base on the node to supplement the LLM. Each knowledge base is a snapshot file for a vector collection. We encourage you to create your own knowledge base. But you can also use ready-made knowledge bases. You will need to do the following. specify the URL to the vector collection (i.e., the snapshot or snapshot.tar.gz file) in the snapshot option. use the same embedding model that generated this vector collection. modify the system_prompt to give the model background knowledge. modify the rag_prompt to instruct the model to answer the question when context is retrieved from the vector collection. The following example changes the knowledge base in the node from \"Paris guidebook\" to \"London guidebook\". gaianet config \\ --snapshot https://huggingface.co/datasets/gaianet/london/resolve/main/london_768_nomic-embed-text-v1.5-f16.snapshot.tar.gz \\ --embedding-url https://huggingface.co/gaianet/Nomic-embed-text-v1.5-Embedding-GGUF/resolve/main/nomic-embed-text-v1.5.f16.gguf \\ --embedding-ctx-size 8192 \\ --system-prompt \"You are a tour guide in London, UK. Please answer the question from a London visitor accurately.\" \\ --rag-prompt \"The following text is the context for the user question.\\n----------------\\n\" The --snapshot could point to a local file under $HOME/gaianet instead of a public URL. That allows you to use a private vector collection snapshot. Depending on the quality and size of the vectors, you might also need to change the qdrant- options to customize the retrieval behavior. qdrant-limit sets the max number of relevant context to add to the prompt. If your knowledge base consists of large sections of text (i.e., each book chapter is a vector), you should probably make this 1 or 2 to limit the prompt length to a reasonable size. qdrant-score-threshold is the min match \"score\" the knowledge content must meet in order to be considered \"relevant\". This depends on the quality of the knowledge text and the embedding model. In general, this score should be over 0.5 to reduce irrelevant context in the prompt. The embedding model encodes and transforms text into vectors so that they can be stored, searched and retrieved. For different context material, you might need a different embedding model to achieve the optimal performance. The MTEB leaderboard is a good place to see the performance benchmarks of embedding models. You can find many of them in the gaianet organization on Huggingface.","s":"Select a knowledge base","u":"/1.0.0/node-guide/customize","h":"#select-a-knowledge-base","p":659},{"i":670,"t":"In config.json, you can also customize the prompts. Prompts are often tailored for the finetuned LLM or the knowledge base to generate optimal responses from the node. The --system-prompt option sets a system prompt. It provides the background and \"personality\" of the node. Each API request can set its own system prompt. The --rag-prompt is the prompt to be appended after the system prompt (or user query). It introduces the RAG context retrieved from the vector database, which follows it. The --rag-policy option specifies where the rag-prompt and context should go. By default, its value is system-message and it puts the context in the system prompt. But you could also set it to last-user-message, which puts the rag-prompt and context in front of the latest message from the user.","s":"Customize prompts","u":"/1.0.0/node-guide/customize","h":"#customize-prompts","p":659},{"i":672,"t":"Remember to re-initialize and re-start the node after you make configuration changes. # If the node is running # gaianet stop gaianet init gaianet start Next, you can Create a knowledge base from your proprietary knowledge or skills. Finetune your own LLM. Have fun!","s":"Next steps","u":"/1.0.0/node-guide/customize","h":"#next-steps","p":659},{"i":674,"t":"After installing the GaiaNet software, you can use the gaianet CLI to manage the node. The following are the CLI options.","s":"GaiaNet CLI options","u":"/1.0.0/node-guide/cli-options","h":"","p":673},{"i":676,"t":"You can use gaianet --help to check all the available CLI options. gaianet --help ## Output Usage: gaianet {config|init|run|stop|OPTIONS} Subcommands: config Update the configuration. init Initialize the GaiaNet node. run|start Start the GaiaNet node. stop Stop the GaiaNet node. Options: --help Show this help message","s":"help","u":"/1.0.0/node-guide/cli-options","h":"#help","p":673},{"i":678,"t":"You can use gaianet --version to check your GaiaNet version. gaianet --version","s":"version","u":"/1.0.0/node-guide/cli-options","h":"#version","p":673},{"i":680,"t":"The gaianet init command initializes the node according to the $HOME/gaianet/config.json file. You can use some of our pre-set configurations. gaianet init will init the default node. It's an RAG application with Gaianet knowledge. gaianet init --config mua will init a node with the MUA project knowledge. gaianet init --base will init a node in an alternative directory. You can also use gaianet init url_your_config_json to init your customized settings for the node. You can customize your node using the Gaianet node link. If you're familiar with the Gaianet config.json, you can create your own manually. See an example here. gaianet init --config https://raw.githubusercontent.com/GaiaNet-AI/node-configs/main/pure-llama-3-8b/config.json","s":"init","u":"/1.0.0/node-guide/cli-options","h":"#init","p":673},{"i":682,"t":"The gaianet start is to start running the node. Use gaianet start to start the node according to the $HOME/gaianet/config.json file. Use gaianet start --base $HOME/gaianet-2.alt to start the node according to the $HOME/gaianet-2/config.json file. Use gaianet start --local-only to start the node for local use according to the $HOME/gaianet/config.json file.","s":"start","u":"/1.0.0/node-guide/cli-options","h":"#start","p":673},{"i":684,"t":"The gaianet stop is to stop the running node. Use gaianet stop to stop running the node. Use gaianet stop --force to force stop the GaiaNet node. Use gaianet stop --base $HOME/gaianet-2.alt to stop the node according to the $HOME/gaianet-2/config.json file.","s":"stop","u":"/1.0.0/node-guide/cli-options","h":"#stop","p":673},{"i":686,"t":"The gaianet config can update the key fields defined in the config.json file. gaianet config --help will list all the available arguments gaianet config --chat-url will change the download link of the chat model. gaianet config --prompt-template