diff --git a/404.html b/404.html index 997fa86d..2be629c9 100644 --- a/404.html +++ b/404.html @@ -5,8 +5,8 @@ Page Not Found | TaskWeaver - - + +
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

diff --git a/assets/js/1531ba9d.dca3a185.js b/assets/js/1531ba9d.d99c54de.js similarity index 99% rename from assets/js/1531ba9d.dca3a185.js rename to assets/js/1531ba9d.d99c54de.js index 7928a01f..f86aa972 100644 --- a/assets/js/1531ba9d.dca3a185.js +++ b/assets/js/1531ba9d.d99c54de.js @@ -1 +1 @@ -"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[6489],{1591:(e,n,t)=>{t.r(n),t.d(n,{assets:()=>r,contentTitle:()=>l,default:()=>d,frontMatter:()=>a,metadata:()=>o,toc:()=>h});var i=t(4848),s=t(8453);const a={},l="Plugins In-Depth",o={permalink:"/TaskWeaver/blog/plugin",editUrl:"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/plugin.md",source:"@site/blog/plugin.md",title:"Plugins In-Depth",description:"**Pre-requisites**: Please refer to the Introduction and the Plugin Development",date:"2024-09-11T03:22:47.000Z",tags:[],readingTime:4.7,hasTruncateMarker:!0,authors:[],frontMatter:{},unlisted:!1,prevItem:{title:"Run TaskWeaver with Locally Deployed Not-that-Large Language Models",permalink:"/TaskWeaver/blog/local_llm"},nextItem:{title:"Roles in TaskWeaver",permalink:"/TaskWeaver/blog/role"}},r={authorsImageUrls:[]},h=[{value:"Plugin Basics",id:"plugin-basics",level:2},{value:"Configurations and States",id:"configurations-and-states",level:2},{value:"The Plugin Lifecycle",id:"the-plugin-lifecycle",level:2},{value:"Conclusion",id:"conclusion",level:2}];function c(e){const n={a:"a",code:"code",em:"em",h2:"h2",p:"p",pre:"pre",strong:"strong",...(0,s.R)(),...e.components};return(0,i.jsxs)(i.Fragment,{children:[(0,i.jsx)(n.p,{children:(0,i.jsxs)(n.em,{children:[(0,i.jsx)(n.strong,{children:"Pre-requisites"}),": Please refer to the ",(0,i.jsx)(n.a,{href:"/docs/plugin/plugin_intro",children:"Introduction"})," and the ",(0,i.jsx)(n.a,{href:"/docs/plugin/how_to_develop_a_new_plugin",children:"Plugin Development"}),"\npages for a better understanding of the plugin concept and its development process."]})}),"\n",(0,i.jsx)(n.h2,{id:"plugin-basics",children:"Plugin Basics"}),"\n",(0,i.jsxs)(n.p,{children:["In TaskWeaver, the plugins are the essential components to extend the functionality of the agent.\nSpecifically, a plugin is a piece of code wrapped in a class that can be called as a function by the agent in the generated code snippets.\nThe following is a simple example of a plugin that generates ",(0,i.jsx)(n.code,{children:"n"})," random numbers:"]}),"\n",(0,i.jsx)(n.pre,{children:(0,i.jsx)(n.code,{className:"language-python",children:"from taskweaver.plugin import Plugin, register_plugin\n\n@register_plugin\nclass RandomGenerator(Plugin):\n def __call__(self, n: int):\n import random\n return [random.randint(1, 100) for _ in range(n)]\n"})}),"\n",(0,i.jsxs)(n.p,{children:["In this example, the ",(0,i.jsx)(n.code,{children:"RandomGenerator"})," class inherits the ",(0,i.jsx)(n.code,{children:"Plugin"})," class and implements the ",(0,i.jsx)(n.code,{children:"__call__"})," method, which means\nit can be called as a function. What would be the function signature of the plugin?\nIt is defined in the associated YAML file. For example, the YAML file for the ",(0,i.jsx)(n.code,{children:"RandomGenerator"})," plugin is as follows:"]}),"\n",(0,i.jsx)(n.pre,{children:(0,i.jsx)(n.code,{className:"language-yaml",children:"name: random_generator\nenabled: true\nrequired: true\ndescription: >-\n This plugin generates n random numbers between 1 and 100.\nexamples: |-\n result = random_generator(n=5)\nparameters:\n - name: n\n type: int\n required: true\n description: >-\n The number of random numbers to generate.\n\nreturns:\n - name: result\n type: list\n description: >-\n The list of random numbers.\n"})}),"\n",(0,i.jsxs)(n.p,{children:["The YAML file specifies the name, description, parameters, and return values of the plugin.\nWhen the LLM generates the code snippets, it will use the information in the YAML file to generate the function signature.\nWe did not check the discrepancy between the function signature in the Python implementation and the YAML file.\nSo, it is important to keep them consistent.\nThe ",(0,i.jsx)(n.code,{children:"examples"})," field is used to provide examples of how to use the plugin for the LLM."]}),"\n",(0,i.jsx)(n.h2,{id:"configurations-and-states",children:"Configurations and States"}),"\n",(0,i.jsxs)(n.p,{children:["Although the plugin is used as a function in the code snippets, it is more than a normal Python function.\nThe plugin can have its own configurations and states.\nFor example, the ",(0,i.jsx)(n.code,{children:"RandomGenerator"})," plugin can have a configuration to specify the range of the random numbers.\nThe configurations can be set in the YAML file as follows:"]}),"\n",(0,i.jsx)(n.pre,{children:(0,i.jsx)(n.code,{className:"language-yaml",children:"# the previous part of the YAML file\nconfigurations:\n - name: range\n type: list\n required: false\n description: >-\n The range of the random numbers.\n default: [1, 100]\n"})}),"\n",(0,i.jsxs)(n.p,{children:["We did not show how to use the configurations in the plugin implementation,\nwhich could be found in one of our sample plugins, namely ",(0,i.jsx)(n.a,{href:"https://github.com/microsoft/TaskWeaver/blob/main/project/plugins/sql_pull_data.yaml",children:"sql_pull_data"}),".\nSupporting configurations in the plugin is a powerful feature to make the plugin more flexible and reusable.\nFor example, we can have multiple YAML files pointing to the same Python implementation but with different configurations.\nRead this ",(0,i.jsx)(n.a,{href:"/docs/plugin/multi_yaml_single_impl",children:"page"})," for more details. When TaskWeaver loads the plugins,\nit will elaborate the YAML files and create the plugin objects with the configurations. Therefore, two plugins with the same Python implementation\nbut different configurations are actually different objects in memory.\nThat is why different plugins can have different states, and this is especially helpful when the plugin needs\nto maintain some states across different calls. Consider the example of the ",(0,i.jsx)(n.code,{children:"sql_pull_data"})," sample plugin, which has the following\ncode snippet:"]}),"\n",(0,i.jsx)(n.pre,{children:(0,i.jsx)(n.code,{className:"language-python",children:'@register_plugin\nclass SqlPullData(Plugin):\n db = None\n\n def __call__(self, query: str):\n ...\n\n if self.db is None:\n self.db = SQLDatabase.from_uri(self.config.get("sqlite_db_path"))\n'})}),"\n",(0,i.jsxs)(n.p,{children:["In the example above, the ",(0,i.jsx)(n.code,{children:"SqlPullData"})," plugin maintains a database connection across different calls.\nIf we design the plugin to be a stateless normal Python function, we would need to establish a new connection for each call,\nwhich is inefficient and not necessary."]}),"\n",(0,i.jsx)(n.h2,{id:"the-plugin-lifecycle",children:"The Plugin Lifecycle"}),"\n",(0,i.jsxs)(n.p,{children:["The plugin lifecycle is the process of how the plugin is loaded, initialized, and called by the agent.\nWhen TaskWeaver starts, it goes through all the plugin configuration files in the ",(0,i.jsx)(n.code,{children:"plugins"})," directory\nand creates the plugin entries in the memory. The Python implementation of the plugin is not loaded at this stage.\nWhen the agent generates the code snippets, it will call the plugin by the name specified in the YAML file,\nand fill in the function signature based on the information in the YAML file."]}),"\n",(0,i.jsxs)(n.p,{children:["The plugin is loaded and initialized when the code executor executes the code snippets for the first time\nin a session.\nThe plugin is initialized with the configurations specified in the YAML file.\nAlthough we have the ",(0,i.jsx)(n.a,{href:"/docs/advanced/plugin_selection",children:"feature"})," to dynamically select the plugins in the LLM, all the plugins are loaded\nno matter whether they are used in the current conversation round. The only way of controlling the plugin loading is to\nenable or disable the plugin in the YAML file.\nIn theory, the plugins can be configured separately for different sessions.\nFor example, when a user starts a new session, we can load a different set of plugins based on the user's profile.\nBut this feature is ",(0,i.jsx)(n.strong,{children:"not"})," supported in TaskWeaver yet."]}),"\n",(0,i.jsx)(n.p,{children:"The plugin is called when the agent executes the code snippets. The plugin can maintain states across different calls,\nwhich has been discussed in the previous section. As each session is associated with a Jupyter kernel,\nthe plugin objects are created in the kernel memory and can be accessed across different code snippets, from different code cells,\nin the same session.\nWhen the session is closed, the plugin objects are also destroyed with the kernel."}),"\n",(0,i.jsx)(n.h2,{id:"conclusion",children:"Conclusion"}),"\n",(0,i.jsx)(n.p,{children:"In this page, we discussed the basics of the plugin in TaskWeaver, including the plugin implementation, the YAML file,\nthe configurations, and the states. We also introduced the plugin lifecycle, which is the process of how the plugin is loaded, initialized, and called by the agent.\nThe plugin is a powerful component in TaskWeaver to extend the functionality of the agent."})]})}function d(e={}){const{wrapper:n}={...(0,s.R)(),...e.components};return n?(0,i.jsx)(n,{...e,children:(0,i.jsx)(c,{...e})}):c(e)}},8453:(e,n,t)=>{t.d(n,{R:()=>l,x:()=>o});var i=t(6540);const s={},a=i.createContext(s);function l(e){const n=i.useContext(a);return i.useMemo((function(){return"function"==typeof e?e(n):{...n,...e}}),[n,e])}function o(e){let n;return n=e.disableParentContext?"function"==typeof e.components?e.components(s):e.components||s:l(e.components),i.createElement(a.Provider,{value:n},e.children)}}}]); \ No newline at end of file +"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[6489],{1591:(e,n,t)=>{t.r(n),t.d(n,{assets:()=>r,contentTitle:()=>l,default:()=>d,frontMatter:()=>a,metadata:()=>o,toc:()=>h});var i=t(4848),s=t(8453);const a={},l="Plugins In-Depth",o={permalink:"/TaskWeaver/blog/plugin",editUrl:"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/plugin.md",source:"@site/blog/plugin.md",title:"Plugins In-Depth",description:"**Pre-requisites**: Please refer to the Introduction and the Plugin Development",date:"2024-09-14T08:42:48.000Z",tags:[],readingTime:4.7,hasTruncateMarker:!0,authors:[],frontMatter:{},unlisted:!1,prevItem:{title:"Run TaskWeaver with Locally Deployed Not-that-Large Language Models",permalink:"/TaskWeaver/blog/local_llm"},nextItem:{title:"Roles in TaskWeaver",permalink:"/TaskWeaver/blog/role"}},r={authorsImageUrls:[]},h=[{value:"Plugin Basics",id:"plugin-basics",level:2},{value:"Configurations and States",id:"configurations-and-states",level:2},{value:"The Plugin Lifecycle",id:"the-plugin-lifecycle",level:2},{value:"Conclusion",id:"conclusion",level:2}];function c(e){const n={a:"a",code:"code",em:"em",h2:"h2",p:"p",pre:"pre",strong:"strong",...(0,s.R)(),...e.components};return(0,i.jsxs)(i.Fragment,{children:[(0,i.jsx)(n.p,{children:(0,i.jsxs)(n.em,{children:[(0,i.jsx)(n.strong,{children:"Pre-requisites"}),": Please refer to the ",(0,i.jsx)(n.a,{href:"/docs/plugin/plugin_intro",children:"Introduction"})," and the ",(0,i.jsx)(n.a,{href:"/docs/plugin/how_to_develop_a_new_plugin",children:"Plugin Development"}),"\npages for a better understanding of the plugin concept and its development process."]})}),"\n",(0,i.jsx)(n.h2,{id:"plugin-basics",children:"Plugin Basics"}),"\n",(0,i.jsxs)(n.p,{children:["In TaskWeaver, the plugins are the essential components to extend the functionality of the agent.\nSpecifically, a plugin is a piece of code wrapped in a class that can be called as a function by the agent in the generated code snippets.\nThe following is a simple example of a plugin that generates ",(0,i.jsx)(n.code,{children:"n"})," random numbers:"]}),"\n",(0,i.jsx)(n.pre,{children:(0,i.jsx)(n.code,{className:"language-python",children:"from taskweaver.plugin import Plugin, register_plugin\n\n@register_plugin\nclass RandomGenerator(Plugin):\n def __call__(self, n: int):\n import random\n return [random.randint(1, 100) for _ in range(n)]\n"})}),"\n",(0,i.jsxs)(n.p,{children:["In this example, the ",(0,i.jsx)(n.code,{children:"RandomGenerator"})," class inherits the ",(0,i.jsx)(n.code,{children:"Plugin"})," class and implements the ",(0,i.jsx)(n.code,{children:"__call__"})," method, which means\nit can be called as a function. What would be the function signature of the plugin?\nIt is defined in the associated YAML file. For example, the YAML file for the ",(0,i.jsx)(n.code,{children:"RandomGenerator"})," plugin is as follows:"]}),"\n",(0,i.jsx)(n.pre,{children:(0,i.jsx)(n.code,{className:"language-yaml",children:"name: random_generator\nenabled: true\nrequired: true\ndescription: >-\n This plugin generates n random numbers between 1 and 100.\nexamples: |-\n result = random_generator(n=5)\nparameters:\n - name: n\n type: int\n required: true\n description: >-\n The number of random numbers to generate.\n\nreturns:\n - name: result\n type: list\n description: >-\n The list of random numbers.\n"})}),"\n",(0,i.jsxs)(n.p,{children:["The YAML file specifies the name, description, parameters, and return values of the plugin.\nWhen the LLM generates the code snippets, it will use the information in the YAML file to generate the function signature.\nWe did not check the discrepancy between the function signature in the Python implementation and the YAML file.\nSo, it is important to keep them consistent.\nThe ",(0,i.jsx)(n.code,{children:"examples"})," field is used to provide examples of how to use the plugin for the LLM."]}),"\n",(0,i.jsx)(n.h2,{id:"configurations-and-states",children:"Configurations and States"}),"\n",(0,i.jsxs)(n.p,{children:["Although the plugin is used as a function in the code snippets, it is more than a normal Python function.\nThe plugin can have its own configurations and states.\nFor example, the ",(0,i.jsx)(n.code,{children:"RandomGenerator"})," plugin can have a configuration to specify the range of the random numbers.\nThe configurations can be set in the YAML file as follows:"]}),"\n",(0,i.jsx)(n.pre,{children:(0,i.jsx)(n.code,{className:"language-yaml",children:"# the previous part of the YAML file\nconfigurations:\n - name: range\n type: list\n required: false\n description: >-\n The range of the random numbers.\n default: [1, 100]\n"})}),"\n",(0,i.jsxs)(n.p,{children:["We did not show how to use the configurations in the plugin implementation,\nwhich could be found in one of our sample plugins, namely ",(0,i.jsx)(n.a,{href:"https://github.com/microsoft/TaskWeaver/blob/main/project/plugins/sql_pull_data.yaml",children:"sql_pull_data"}),".\nSupporting configurations in the plugin is a powerful feature to make the plugin more flexible and reusable.\nFor example, we can have multiple YAML files pointing to the same Python implementation but with different configurations.\nRead this ",(0,i.jsx)(n.a,{href:"/docs/plugin/multi_yaml_single_impl",children:"page"})," for more details. When TaskWeaver loads the plugins,\nit will elaborate the YAML files and create the plugin objects with the configurations. Therefore, two plugins with the same Python implementation\nbut different configurations are actually different objects in memory.\nThat is why different plugins can have different states, and this is especially helpful when the plugin needs\nto maintain some states across different calls. Consider the example of the ",(0,i.jsx)(n.code,{children:"sql_pull_data"})," sample plugin, which has the following\ncode snippet:"]}),"\n",(0,i.jsx)(n.pre,{children:(0,i.jsx)(n.code,{className:"language-python",children:'@register_plugin\nclass SqlPullData(Plugin):\n db = None\n\n def __call__(self, query: str):\n ...\n\n if self.db is None:\n self.db = SQLDatabase.from_uri(self.config.get("sqlite_db_path"))\n'})}),"\n",(0,i.jsxs)(n.p,{children:["In the example above, the ",(0,i.jsx)(n.code,{children:"SqlPullData"})," plugin maintains a database connection across different calls.\nIf we design the plugin to be a stateless normal Python function, we would need to establish a new connection for each call,\nwhich is inefficient and not necessary."]}),"\n",(0,i.jsx)(n.h2,{id:"the-plugin-lifecycle",children:"The Plugin Lifecycle"}),"\n",(0,i.jsxs)(n.p,{children:["The plugin lifecycle is the process of how the plugin is loaded, initialized, and called by the agent.\nWhen TaskWeaver starts, it goes through all the plugin configuration files in the ",(0,i.jsx)(n.code,{children:"plugins"})," directory\nand creates the plugin entries in the memory. The Python implementation of the plugin is not loaded at this stage.\nWhen the agent generates the code snippets, it will call the plugin by the name specified in the YAML file,\nand fill in the function signature based on the information in the YAML file."]}),"\n",(0,i.jsxs)(n.p,{children:["The plugin is loaded and initialized when the code executor executes the code snippets for the first time\nin a session.\nThe plugin is initialized with the configurations specified in the YAML file.\nAlthough we have the ",(0,i.jsx)(n.a,{href:"/docs/advanced/plugin_selection",children:"feature"})," to dynamically select the plugins in the LLM, all the plugins are loaded\nno matter whether they are used in the current conversation round. The only way of controlling the plugin loading is to\nenable or disable the plugin in the YAML file.\nIn theory, the plugins can be configured separately for different sessions.\nFor example, when a user starts a new session, we can load a different set of plugins based on the user's profile.\nBut this feature is ",(0,i.jsx)(n.strong,{children:"not"})," supported in TaskWeaver yet."]}),"\n",(0,i.jsx)(n.p,{children:"The plugin is called when the agent executes the code snippets. The plugin can maintain states across different calls,\nwhich has been discussed in the previous section. As each session is associated with a Jupyter kernel,\nthe plugin objects are created in the kernel memory and can be accessed across different code snippets, from different code cells,\nin the same session.\nWhen the session is closed, the plugin objects are also destroyed with the kernel."}),"\n",(0,i.jsx)(n.h2,{id:"conclusion",children:"Conclusion"}),"\n",(0,i.jsx)(n.p,{children:"In this page, we discussed the basics of the plugin in TaskWeaver, including the plugin implementation, the YAML file,\nthe configurations, and the states. We also introduced the plugin lifecycle, which is the process of how the plugin is loaded, initialized, and called by the agent.\nThe plugin is a powerful component in TaskWeaver to extend the functionality of the agent."})]})}function d(e={}){const{wrapper:n}={...(0,s.R)(),...e.components};return n?(0,i.jsx)(n,{...e,children:(0,i.jsx)(c,{...e})}):c(e)}},8453:(e,n,t)=>{t.d(n,{R:()=>l,x:()=>o});var i=t(6540);const s={},a=i.createContext(s);function l(e){const n=i.useContext(a);return i.useMemo((function(){return"function"==typeof e?e(n):{...n,...e}}),[n,e])}function o(e){let n;return n=e.disableParentContext?"function"==typeof e.components?e.components(s):e.components||s:l(e.components),i.createElement(a.Provider,{value:n},e.children)}}}]); \ No newline at end of file diff --git a/assets/js/1bff86ef.b45b99e1.js b/assets/js/1bff86ef.50ca7cae.js similarity index 99% rename from assets/js/1bff86ef.b45b99e1.js rename to assets/js/1bff86ef.50ca7cae.js index e4b5b08a..b779f2cb 100644 --- a/assets/js/1bff86ef.b45b99e1.js +++ b/assets/js/1bff86ef.50ca7cae.js @@ -1 +1 @@ -"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[2317],{6486:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>h,contentTitle:()=>i,default:()=>c,frontMatter:()=>s,metadata:()=>r,toc:()=>l});var a=n(4848),o=n(8453);const s={},i="How to evaluate a LLM agent?",r={permalink:"/TaskWeaver/blog/evaluation",editUrl:"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/evaluation.md",source:"@site/blog/evaluation.md",title:"How to evaluate a LLM agent?",description:"The challenges",date:"2024-09-11T03:22:47.000Z",tags:[],readingTime:6.305,hasTruncateMarker:!0,authors:[],frontMatter:{},unlisted:!1,nextItem:{title:"Run TaskWeaver with Locally Deployed Not-that-Large Language Models",permalink:"/TaskWeaver/blog/local_llm"}},h={authorsImageUrls:[]},l=[{value:"The challenges",id:"the-challenges",level:2},{value:"A new evaluation method",id:"a-new-evaluation-method",level:2},{value:"How to adapt for other agents?",id:"how-to-adapt-for-other-agents",level:2}];function d(e){const t={a:"a",code:"code",h2:"h2",img:"img",p:"p",pre:"pre",strong:"strong",...(0,o.R)(),...e.components};return(0,a.jsxs)(a.Fragment,{children:[(0,a.jsx)(t.h2,{id:"the-challenges",children:"The challenges"}),"\n",(0,a.jsx)(t.p,{children:"It is nontrivial to evaluate the performance of a LLM agent.\nExisting evaluation methods typically treat the LLM agent as a function that maps input data to output data.\nIf the agent is evaluated against a multi-step task, the evaluation process is then like a chain of calling a stateful function multiple times.\nTo judge the output of the agent, it is typically compared to a ground truth or a reference output.\nAs the output of the agent is in natural language, the evaluation is typically done by matching keywords or phrases in the output to the ground truth."}),"\n",(0,a.jsx)(t.p,{children:"This evaluation method has its limitations due to its rigid nature.\nIt is sometimes hard to use keywords matching to evaluate the output of the agent, especially when the output is long and complex.\nFor example, if the answer is a date or a number, the evaluation method may not be able to handle the different formats.\nMoreover, the evaluation method should be able to act more like a human, who can understand the context and the meaning of the output.\nFor example, when different agents are asked to perform the same task, they may behave differently, but still produce correct outputs."}),"\n",(0,a.jsx)(t.p,{children:"The below example illustrates this point:"}),"\n",(0,a.jsx)(t.pre,{children:(0,a.jsx)(t.code,{children:"Human: What is the weather today?\nAgent 1: It is sunny today in New York.\n"})}),"\n",(0,a.jsx)(t.pre,{children:(0,a.jsx)(t.code,{children:"Human: What is the weather today?\nAgent 2: Do you want to know the weather in New York today?\nHuman: Yes.\nAgent 2: It is sunny today.\n"})}),"\n",(0,a.jsx)(t.p,{children:'Compared to Agent 1, Agent 2 asks for confirmation before providing the answer, which requires more interaction with the user.\nHowever, both agents provide the correct answer to the question.\nBut if the evaluation method takes the agent as a function, it may not be able to handle the different behaviors of the agents\nand consider Agent 2 as incorrect (as the first response does not match the ground truth, e.g., "sunny").'}),"\n",(0,a.jsx)(t.h2,{id:"a-new-evaluation-method",children:"A new evaluation method"}),"\n",(0,a.jsxs)(t.p,{children:["Therefore, we propose a new evaluation method that treats the agent as a conversational partner as shown in the figure below:\n",(0,a.jsx)(t.img,{alt:"Evaluation",src:n(6805).A+"",width:"965",height:"659"})]}),"\n",(0,a.jsxs)(t.p,{children:["We introduce two new roles during the evaluation process: the ",(0,a.jsx)(t.strong,{children:"Examiner"})," and the ",(0,a.jsx)(t.strong,{children:"Judge"}),".\nFor each test case, the task description is first given to the Examiner.\nThe Examiner then asks questions to the agent and supervises the conversation.\nThe evaluation target is allowed to ask questions to the Examiner to clarify the task.\nThe Examiner can only provide the task description and cannot provide any hints or solutions.\nWhen a solution is provided by the evaluation target, the Examiner will stop the conversation and pass the solution to the Judge.\nThe Judge will then evaluate the solution based on the ground truth.\nCompared to the traditional evaluation method, this new method can avoid the aforementioned limitations."]}),"\n",(0,a.jsx)(t.p,{children:'Let\'s see an example of how the new evaluation method works. The following YAML file is a task description for the task "Sum of 1 to 50".\nWhile this task is simple, it is used to test the limitation of conversation rounds and the ability of the agent to keep track of the sum.\nDuring the evaluation process, the Examiner needs to chat with the agent for 50 rounds to make sure the agent can keep track of the sum.\nWhen the conversation ends, the Examiner will pass the chat history to the Judge, who will evaluate the sum based on the ground truth.'}),"\n",(0,a.jsx)(t.pre,{children:(0,a.jsx)(t.code,{className:"language-yaml",children:"task_description: |-\n The task has many rounds. The initial total sum is 0. \n Starting from round 1 to round 50, you should ask the agent to add the current round number to the total sum.\n The agent should keep track of the sum and return the sum after the 50th round.\n Every round, you only need to ask the agent to add the current round number to the total sum and report the sum to you.\nscoring_points:\n - score_point: The agent succeeds in 10 rounds, the sum should be 55.\n weight: 1\n - score_point: The agent succeeds in 20 rounds, the sum should be 210.\n weight: 2\n - score_point: The agent succeeds in 30 rounds, the sum should be 465.\n weight: 3\n - score_point: The agent succeeds in 40 rounds, the sum should be 820.\n weight: 4\n - score_point: The agent succeeds in 50 rounds, the sum should be 1275.\n weight: 5\n"})}),"\n",(0,a.jsxs)(t.p,{children:["The ground truth is represented by the ",(0,a.jsx)(t.code,{children:"scoring_points"})," field in the YAML file.\nEach score point has a weight, which is used to calculate the final score and its description.\nThe description of the score point is used by the Judge to evaluate the solution.\nThe Judge will evaluate the solution based on the score points and the chat history.\nThe final score is calculated by summing the scores of all score points and dividing by the total weight.\nTherefore, the normalized score is between 0 and 1."]}),"\n",(0,a.jsx)(t.p,{children:"In some cases, it may require a more precise way to evaluate the solution, e.g., with code.\nThis following task description is an example of such a case."}),"\n",(0,a.jsx)(t.pre,{children:(0,a.jsx)(t.code,{className:"language-yaml",children:"task_description: |- \n The task is to send 3 requests one-by-one and get the agent responses, no need to check the response content: \n 1. generate 1 random integer number and save it to a file named 'a.txt', just tell me if the task is done\n 2. tell me a random joke\n 3. save the previously generated random number to a file named 'b.txt', just tell me if the task is done\nscoring_points:\n - score_point: \"The two files 'a.txt' and 'b.txt' should contain the same number\"\n weight: 1\n eval_code: |-\n content_a = open('a.txt', 'r').read().strip()\n content_b = open('b.txt', 'r').read().strip()\n assert content_a == content_b, f\"content of a.txt: {content_a}, content of b.txt: {content_b}\"\n"})}),"\n",(0,a.jsxs)(t.p,{children:["We need to evaluate the solution based on the content of the files 'a.txt' and 'b.txt'.\nThe ",(0,a.jsx)(t.code,{children:"eval_code"})," field is used to write the evaluation code.\nYou can treat it as a normal test case in a unit test framework using the ",(0,a.jsx)(t.code,{children:"assert"})," statement.\nThe solution get the score point if the ",(0,a.jsx)(t.code,{children:"assert"})," statement does not raise an exception."]}),"\n",(0,a.jsx)(t.p,{children:"We provide additional fields in the YAML file to specify the evaluation environment."}),"\n",(0,a.jsx)(t.pre,{children:(0,a.jsx)(t.code,{className:"language-yaml",children:"version: the version of the evaluation file\nconfig_var: configurations of the agent for this evaluation case\napp_dir: the working directory of the agent\ndependencies: list of packages required by the agent\ndata_files: list of files copied to the working directory\nmax_rounds: the maximum number of rounds for the conversation\n"})}),"\n",(0,a.jsxs)(t.p,{children:["We have implemented the new evaluation method in TaskWeaver and prepared a set of evaluation cases in the ",(0,a.jsx)(t.code,{children:"auto_eval/cases"})," directory.\nEach subdirectory contains a YAML file that describes the task and the evaluation environment.\nTo run the evaluation, you can find more details in the\n",(0,a.jsx)(t.a,{href:"https://github.com/microsoft/TaskWeaver/blob/main/auto_eval/README.md",children:"auto_eval/README.md"})," file."]}),"\n",(0,a.jsx)(t.h2,{id:"how-to-adapt-for-other-agents",children:"How to adapt for other agents?"}),"\n",(0,a.jsxs)(t.p,{children:["Although the new evaluation method is designed for TaskWeaver, it can be applied to other agents as well,\nas long as the agent can be treated as a conversational partner.\nMore specifically, the agent should be able to instantiate as a Python object with necessary configurations and a working directory\nas we did for TaskWeaver in ",(0,a.jsx)(t.code,{children:"auto_eval/taskweaver_eval.py"}),":"]}),"\n",(0,a.jsx)(t.pre,{children:(0,a.jsx)(t.code,{className:"language-python",children:'class TaskWeaverVirtualUser(VirtualUser):\n def __init__(self, task_description: str, app_dir: str, config_var: Optional[dict] = None):\n super().__init__(task_description)\n\n self.app = TaskWeaverApp(app_dir=app_dir, config=config_var)\n self.session = self.app.get_session()\n self.session_id = self.session.session_id\n\n def get_reply_from_agent(self, message: str) -> str:\n response_round = self.session.send_message(\n message,\n event_handler=None,\n )\n assert response_round.state != "failed", "Failed to get response from agent."\n return response_round.post_list[-1].message\n\n def close(self):\n self.app.stop()\n'})}),"\n",(0,a.jsxs)(t.p,{children:["To add another agent, you need to implement the ",(0,a.jsx)(t.code,{children:"VirtualUser"})," class and the ",(0,a.jsx)(t.code,{children:"get_reply_from_agent"}),", ",(0,a.jsx)(t.code,{children:"close"})," methods."]})]})}function c(e={}){const{wrapper:t}={...(0,o.R)(),...e.components};return t?(0,a.jsx)(t,{...e,children:(0,a.jsx)(d,{...e})}):d(e)}},6805:(e,t,n)=>{n.d(t,{A:()=>a});const a=n.p+"assets/images/evaluation-ac91a46e949f383154a9ffbafcfbc981.png"},8453:(e,t,n)=>{n.d(t,{R:()=>i,x:()=>r});var a=n(6540);const o={},s=a.createContext(o);function i(e){const t=a.useContext(s);return a.useMemo((function(){return"function"==typeof e?e(t):{...t,...e}}),[t,e])}function r(e){let t;return t=e.disableParentContext?"function"==typeof e.components?e.components(o):e.components||o:i(e.components),a.createElement(s.Provider,{value:t},e.children)}}}]); \ No newline at end of file +"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[2317],{6486:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>h,contentTitle:()=>i,default:()=>c,frontMatter:()=>s,metadata:()=>r,toc:()=>l});var a=n(4848),o=n(8453);const s={},i="How to evaluate a LLM agent?",r={permalink:"/TaskWeaver/blog/evaluation",editUrl:"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/evaluation.md",source:"@site/blog/evaluation.md",title:"How to evaluate a LLM agent?",description:"The challenges",date:"2024-09-14T08:42:48.000Z",tags:[],readingTime:6.305,hasTruncateMarker:!0,authors:[],frontMatter:{},unlisted:!1,nextItem:{title:"Run TaskWeaver with Locally Deployed Not-that-Large Language Models",permalink:"/TaskWeaver/blog/local_llm"}},h={authorsImageUrls:[]},l=[{value:"The challenges",id:"the-challenges",level:2},{value:"A new evaluation method",id:"a-new-evaluation-method",level:2},{value:"How to adapt for other agents?",id:"how-to-adapt-for-other-agents",level:2}];function d(e){const t={a:"a",code:"code",h2:"h2",img:"img",p:"p",pre:"pre",strong:"strong",...(0,o.R)(),...e.components};return(0,a.jsxs)(a.Fragment,{children:[(0,a.jsx)(t.h2,{id:"the-challenges",children:"The challenges"}),"\n",(0,a.jsx)(t.p,{children:"It is nontrivial to evaluate the performance of a LLM agent.\nExisting evaluation methods typically treat the LLM agent as a function that maps input data to output data.\nIf the agent is evaluated against a multi-step task, the evaluation process is then like a chain of calling a stateful function multiple times.\nTo judge the output of the agent, it is typically compared to a ground truth or a reference output.\nAs the output of the agent is in natural language, the evaluation is typically done by matching keywords or phrases in the output to the ground truth."}),"\n",(0,a.jsx)(t.p,{children:"This evaluation method has its limitations due to its rigid nature.\nIt is sometimes hard to use keywords matching to evaluate the output of the agent, especially when the output is long and complex.\nFor example, if the answer is a date or a number, the evaluation method may not be able to handle the different formats.\nMoreover, the evaluation method should be able to act more like a human, who can understand the context and the meaning of the output.\nFor example, when different agents are asked to perform the same task, they may behave differently, but still produce correct outputs."}),"\n",(0,a.jsx)(t.p,{children:"The below example illustrates this point:"}),"\n",(0,a.jsx)(t.pre,{children:(0,a.jsx)(t.code,{children:"Human: What is the weather today?\nAgent 1: It is sunny today in New York.\n"})}),"\n",(0,a.jsx)(t.pre,{children:(0,a.jsx)(t.code,{children:"Human: What is the weather today?\nAgent 2: Do you want to know the weather in New York today?\nHuman: Yes.\nAgent 2: It is sunny today.\n"})}),"\n",(0,a.jsx)(t.p,{children:'Compared to Agent 1, Agent 2 asks for confirmation before providing the answer, which requires more interaction with the user.\nHowever, both agents provide the correct answer to the question.\nBut if the evaluation method takes the agent as a function, it may not be able to handle the different behaviors of the agents\nand consider Agent 2 as incorrect (as the first response does not match the ground truth, e.g., "sunny").'}),"\n",(0,a.jsx)(t.h2,{id:"a-new-evaluation-method",children:"A new evaluation method"}),"\n",(0,a.jsxs)(t.p,{children:["Therefore, we propose a new evaluation method that treats the agent as a conversational partner as shown in the figure below:\n",(0,a.jsx)(t.img,{alt:"Evaluation",src:n(6805).A+"",width:"965",height:"659"})]}),"\n",(0,a.jsxs)(t.p,{children:["We introduce two new roles during the evaluation process: the ",(0,a.jsx)(t.strong,{children:"Examiner"})," and the ",(0,a.jsx)(t.strong,{children:"Judge"}),".\nFor each test case, the task description is first given to the Examiner.\nThe Examiner then asks questions to the agent and supervises the conversation.\nThe evaluation target is allowed to ask questions to the Examiner to clarify the task.\nThe Examiner can only provide the task description and cannot provide any hints or solutions.\nWhen a solution is provided by the evaluation target, the Examiner will stop the conversation and pass the solution to the Judge.\nThe Judge will then evaluate the solution based on the ground truth.\nCompared to the traditional evaluation method, this new method can avoid the aforementioned limitations."]}),"\n",(0,a.jsx)(t.p,{children:'Let\'s see an example of how the new evaluation method works. The following YAML file is a task description for the task "Sum of 1 to 50".\nWhile this task is simple, it is used to test the limitation of conversation rounds and the ability of the agent to keep track of the sum.\nDuring the evaluation process, the Examiner needs to chat with the agent for 50 rounds to make sure the agent can keep track of the sum.\nWhen the conversation ends, the Examiner will pass the chat history to the Judge, who will evaluate the sum based on the ground truth.'}),"\n",(0,a.jsx)(t.pre,{children:(0,a.jsx)(t.code,{className:"language-yaml",children:"task_description: |-\n The task has many rounds. The initial total sum is 0. \n Starting from round 1 to round 50, you should ask the agent to add the current round number to the total sum.\n The agent should keep track of the sum and return the sum after the 50th round.\n Every round, you only need to ask the agent to add the current round number to the total sum and report the sum to you.\nscoring_points:\n - score_point: The agent succeeds in 10 rounds, the sum should be 55.\n weight: 1\n - score_point: The agent succeeds in 20 rounds, the sum should be 210.\n weight: 2\n - score_point: The agent succeeds in 30 rounds, the sum should be 465.\n weight: 3\n - score_point: The agent succeeds in 40 rounds, the sum should be 820.\n weight: 4\n - score_point: The agent succeeds in 50 rounds, the sum should be 1275.\n weight: 5\n"})}),"\n",(0,a.jsxs)(t.p,{children:["The ground truth is represented by the ",(0,a.jsx)(t.code,{children:"scoring_points"})," field in the YAML file.\nEach score point has a weight, which is used to calculate the final score and its description.\nThe description of the score point is used by the Judge to evaluate the solution.\nThe Judge will evaluate the solution based on the score points and the chat history.\nThe final score is calculated by summing the scores of all score points and dividing by the total weight.\nTherefore, the normalized score is between 0 and 1."]}),"\n",(0,a.jsx)(t.p,{children:"In some cases, it may require a more precise way to evaluate the solution, e.g., with code.\nThis following task description is an example of such a case."}),"\n",(0,a.jsx)(t.pre,{children:(0,a.jsx)(t.code,{className:"language-yaml",children:"task_description: |- \n The task is to send 3 requests one-by-one and get the agent responses, no need to check the response content: \n 1. generate 1 random integer number and save it to a file named 'a.txt', just tell me if the task is done\n 2. tell me a random joke\n 3. save the previously generated random number to a file named 'b.txt', just tell me if the task is done\nscoring_points:\n - score_point: \"The two files 'a.txt' and 'b.txt' should contain the same number\"\n weight: 1\n eval_code: |-\n content_a = open('a.txt', 'r').read().strip()\n content_b = open('b.txt', 'r').read().strip()\n assert content_a == content_b, f\"content of a.txt: {content_a}, content of b.txt: {content_b}\"\n"})}),"\n",(0,a.jsxs)(t.p,{children:["We need to evaluate the solution based on the content of the files 'a.txt' and 'b.txt'.\nThe ",(0,a.jsx)(t.code,{children:"eval_code"})," field is used to write the evaluation code.\nYou can treat it as a normal test case in a unit test framework using the ",(0,a.jsx)(t.code,{children:"assert"})," statement.\nThe solution get the score point if the ",(0,a.jsx)(t.code,{children:"assert"})," statement does not raise an exception."]}),"\n",(0,a.jsx)(t.p,{children:"We provide additional fields in the YAML file to specify the evaluation environment."}),"\n",(0,a.jsx)(t.pre,{children:(0,a.jsx)(t.code,{className:"language-yaml",children:"version: the version of the evaluation file\nconfig_var: configurations of the agent for this evaluation case\napp_dir: the working directory of the agent\ndependencies: list of packages required by the agent\ndata_files: list of files copied to the working directory\nmax_rounds: the maximum number of rounds for the conversation\n"})}),"\n",(0,a.jsxs)(t.p,{children:["We have implemented the new evaluation method in TaskWeaver and prepared a set of evaluation cases in the ",(0,a.jsx)(t.code,{children:"auto_eval/cases"})," directory.\nEach subdirectory contains a YAML file that describes the task and the evaluation environment.\nTo run the evaluation, you can find more details in the\n",(0,a.jsx)(t.a,{href:"https://github.com/microsoft/TaskWeaver/blob/main/auto_eval/README.md",children:"auto_eval/README.md"})," file."]}),"\n",(0,a.jsx)(t.h2,{id:"how-to-adapt-for-other-agents",children:"How to adapt for other agents?"}),"\n",(0,a.jsxs)(t.p,{children:["Although the new evaluation method is designed for TaskWeaver, it can be applied to other agents as well,\nas long as the agent can be treated as a conversational partner.\nMore specifically, the agent should be able to instantiate as a Python object with necessary configurations and a working directory\nas we did for TaskWeaver in ",(0,a.jsx)(t.code,{children:"auto_eval/taskweaver_eval.py"}),":"]}),"\n",(0,a.jsx)(t.pre,{children:(0,a.jsx)(t.code,{className:"language-python",children:'class TaskWeaverVirtualUser(VirtualUser):\n def __init__(self, task_description: str, app_dir: str, config_var: Optional[dict] = None):\n super().__init__(task_description)\n\n self.app = TaskWeaverApp(app_dir=app_dir, config=config_var)\n self.session = self.app.get_session()\n self.session_id = self.session.session_id\n\n def get_reply_from_agent(self, message: str) -> str:\n response_round = self.session.send_message(\n message,\n event_handler=None,\n )\n assert response_round.state != "failed", "Failed to get response from agent."\n return response_round.post_list[-1].message\n\n def close(self):\n self.app.stop()\n'})}),"\n",(0,a.jsxs)(t.p,{children:["To add another agent, you need to implement the ",(0,a.jsx)(t.code,{children:"VirtualUser"})," class and the ",(0,a.jsx)(t.code,{children:"get_reply_from_agent"}),", ",(0,a.jsx)(t.code,{children:"close"})," methods."]})]})}function c(e={}){const{wrapper:t}={...(0,o.R)(),...e.components};return t?(0,a.jsx)(t,{...e,children:(0,a.jsx)(d,{...e})}):d(e)}},6805:(e,t,n)=>{n.d(t,{A:()=>a});const a=n.p+"assets/images/evaluation-ac91a46e949f383154a9ffbafcfbc981.png"},8453:(e,t,n)=>{n.d(t,{R:()=>i,x:()=>r});var a=n(6540);const o={},s=a.createContext(o);function i(e){const t=a.useContext(s);return a.useMemo((function(){return"function"==typeof e?e(t):{...t,...e}}),[t,e])}function r(e){let t;return t=e.disableParentContext?"function"==typeof e.components?e.components(o):e.components||o:i(e.components),a.createElement(s.Provider,{value:t},e.children)}}}]); \ No newline at end of file diff --git a/assets/js/1cc2dcef.b628c0e7.js b/assets/js/1cc2dcef.c71982e0.js similarity index 99% rename from assets/js/1cc2dcef.b628c0e7.js rename to assets/js/1cc2dcef.c71982e0.js index 7b1e2173..4ff1b3d9 100644 --- a/assets/js/1cc2dcef.b628c0e7.js +++ b/assets/js/1cc2dcef.c71982e0.js @@ -1 +1 @@ -"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[4950],{4124:e=>{e.exports=JSON.parse('{"archive":{"blogPosts":[{"id":"/evaluation","metadata":{"permalink":"/TaskWeaver/blog/evaluation","editUrl":"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/evaluation.md","source":"@site/blog/evaluation.md","title":"How to evaluate a LLM agent?","description":"The challenges","date":"2024-09-11T03:22:47.000Z","tags":[],"readingTime":6.305,"hasTruncateMarker":true,"authors":[],"frontMatter":{},"unlisted":false,"nextItem":{"title":"Run TaskWeaver with Locally Deployed Not-that-Large Language Models","permalink":"/TaskWeaver/blog/local_llm"}},"content":"## The challenges\\nIt is nontrivial to evaluate the performance of a LLM agent. \\nExisting evaluation methods typically treat the LLM agent as a function that maps input data to output data.\\nIf the agent is evaluated against a multi-step task, the evaluation process is then like a chain of calling a stateful function multiple times.\\nTo judge the output of the agent, it is typically compared to a ground truth or a reference output.\\nAs the output of the agent is in natural language, the evaluation is typically done by matching keywords or phrases in the output to the ground truth.\\n\\nThis evaluation method has its limitations due to its rigid nature. \\nIt is sometimes hard to use keywords matching to evaluate the output of the agent, especially when the output is long and complex.\\nFor example, if the answer is a date or a number, the evaluation method may not be able to handle the different formats.\\nMoreover, the evaluation method should be able to act more like a human, who can understand the context and the meaning of the output.\\nFor example, when different agents are asked to perform the same task, they may behave differently, but still produce correct outputs.\\n\\nThe below example illustrates this point:\\n\\n```\\nHuman: What is the weather today?\\nAgent 1: It is sunny today in New York.\\n```\\n\\n```\\nHuman: What is the weather today?\\nAgent 2: Do you want to know the weather in New York today?\\nHuman: Yes.\\nAgent 2: It is sunny today.\\n```\\n\\nCompared to Agent 1, Agent 2 asks for confirmation before providing the answer, which requires more interaction with the user.\\nHowever, both agents provide the correct answer to the question.\\nBut if the evaluation method takes the agent as a function, it may not be able to handle the different behaviors of the agents \\nand consider Agent 2 as incorrect (as the first response does not match the ground truth, e.g., \\"sunny\\").\\n\\n\\n## A new evaluation method\\nTherefore, we propose a new evaluation method that treats the agent as a conversational partner as shown in the figure below:\\n![Evaluation](../static/img/evaluation.png)\\n\\n\x3c!-- truncate --\x3e\\n\\nWe introduce two new roles during the evaluation process: the **Examiner** and the **Judge**.\\nFor each test case, the task description is first given to the Examiner.\\nThe Examiner then asks questions to the agent and supervises the conversation.\\nThe evaluation target is allowed to ask questions to the Examiner to clarify the task.\\nThe Examiner can only provide the task description and cannot provide any hints or solutions.\\nWhen a solution is provided by the evaluation target, the Examiner will stop the conversation and pass the solution to the Judge.\\nThe Judge will then evaluate the solution based on the ground truth.\\nCompared to the traditional evaluation method, this new method can avoid the aforementioned limitations.\\n\\nLet\'s see an example of how the new evaluation method works. The following YAML file is a task description for the task \\"Sum of 1 to 50\\".\\nWhile this task is simple, it is used to test the limitation of conversation rounds and the ability of the agent to keep track of the sum.\\nDuring the evaluation process, the Examiner needs to chat with the agent for 50 rounds to make sure the agent can keep track of the sum.\\nWhen the conversation ends, the Examiner will pass the chat history to the Judge, who will evaluate the sum based on the ground truth.\\n```yaml\\ntask_description: |-\\n The task has many rounds. The initial total sum is 0. \\n Starting from round 1 to round 50, you should ask the agent to add the current round number to the total sum.\\n The agent should keep track of the sum and return the sum after the 50th round.\\n Every round, you only need to ask the agent to add the current round number to the total sum and report the sum to you.\\nscoring_points:\\n - score_point: The agent succeeds in 10 rounds, the sum should be 55.\\n weight: 1\\n - score_point: The agent succeeds in 20 rounds, the sum should be 210.\\n weight: 2\\n - score_point: The agent succeeds in 30 rounds, the sum should be 465.\\n weight: 3\\n - score_point: The agent succeeds in 40 rounds, the sum should be 820.\\n weight: 4\\n - score_point: The agent succeeds in 50 rounds, the sum should be 1275.\\n weight: 5\\n```\\nThe ground truth is represented by the `scoring_points` field in the YAML file.\\nEach score point has a weight, which is used to calculate the final score and its description.\\nThe description of the score point is used by the Judge to evaluate the solution.\\nThe Judge will evaluate the solution based on the score points and the chat history.\\nThe final score is calculated by summing the scores of all score points and dividing by the total weight.\\nTherefore, the normalized score is between 0 and 1.\\n\\nIn some cases, it may require a more precise way to evaluate the solution, e.g., with code.\\nThis following task description is an example of such a case.\\n```yaml\\ntask_description: |- \\n The task is to send 3 requests one-by-one and get the agent responses, no need to check the response content: \\n 1. generate 1 random integer number and save it to a file named \'a.txt\', just tell me if the task is done\\n 2. tell me a random joke\\n 3. save the previously generated random number to a file named \'b.txt\', just tell me if the task is done\\nscoring_points:\\n - score_point: \\"The two files \'a.txt\' and \'b.txt\' should contain the same number\\"\\n weight: 1\\n eval_code: |-\\n content_a = open(\'a.txt\', \'r\').read().strip()\\n content_b = open(\'b.txt\', \'r\').read().strip()\\n assert content_a == content_b, f\\"content of a.txt: {content_a}, content of b.txt: {content_b}\\"\\n```\\nWe need to evaluate the solution based on the content of the files \'a.txt\' and \'b.txt\'.\\nThe `eval_code` field is used to write the evaluation code. \\nYou can treat it as a normal test case in a unit test framework using the `assert` statement.\\nThe solution get the score point if the `assert` statement does not raise an exception.\\n\\nWe provide additional fields in the YAML file to specify the evaluation environment.\\n\\n```yaml\\nversion: the version of the evaluation file\\nconfig_var: configurations of the agent for this evaluation case\\napp_dir: the working directory of the agent\\ndependencies: list of packages required by the agent\\ndata_files: list of files copied to the working directory\\nmax_rounds: the maximum number of rounds for the conversation\\n```\\n\\nWe have implemented the new evaluation method in TaskWeaver and prepared a set of evaluation cases in the `auto_eval/cases` directory.\\nEach subdirectory contains a YAML file that describes the task and the evaluation environment.\\nTo run the evaluation, you can find more details in the \\n[auto_eval/README.md](https://github.com/microsoft/TaskWeaver/blob/main/auto_eval/README.md) file.\\n\\n## How to adapt for other agents?\\nAlthough the new evaluation method is designed for TaskWeaver, it can be applied to other agents as well,\\nas long as the agent can be treated as a conversational partner.\\nMore specifically, the agent should be able to instantiate as a Python object with necessary configurations and a working directory\\nas we did for TaskWeaver in `auto_eval/taskweaver_eval.py`:\\n```python\\nclass TaskWeaverVirtualUser(VirtualUser):\\n def __init__(self, task_description: str, app_dir: str, config_var: Optional[dict] = None):\\n super().__init__(task_description)\\n\\n self.app = TaskWeaverApp(app_dir=app_dir, config=config_var)\\n self.session = self.app.get_session()\\n self.session_id = self.session.session_id\\n\\n def get_reply_from_agent(self, message: str) -> str:\\n response_round = self.session.send_message(\\n message,\\n event_handler=None,\\n )\\n assert response_round.state != \\"failed\\", \\"Failed to get response from agent.\\"\\n return response_round.post_list[-1].message\\n\\n def close(self):\\n self.app.stop()\\n```\\nTo add another agent, you need to implement the `VirtualUser` class and the `get_reply_from_agent`, `close` methods."},{"id":"/local_llm","metadata":{"permalink":"/TaskWeaver/blog/local_llm","editUrl":"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/local_llm.md","source":"@site/blog/local_llm.md","title":"Run TaskWeaver with Locally Deployed Not-that-Large Language Models","description":"The feature introduced in this blog post can cause incompatibility issue with the previous version of TaskWeaver","date":"2024-09-11T03:22:47.000Z","tags":[],"readingTime":5.55,"hasTruncateMarker":true,"authors":[],"frontMatter":{},"unlisted":false,"prevItem":{"title":"How to evaluate a LLM agent?","permalink":"/TaskWeaver/blog/evaluation"},"nextItem":{"title":"Plugins In-Depth","permalink":"/TaskWeaver/blog/plugin"}},"content":":::info\\nThe feature introduced in this blog post can cause incompatibility issue with the previous version of TaskWeaver\\nif you have customized the examples for the planner and code interpreter. \\nThe issue is easy to fix by changing the examples to the new schema.\\nPlease refer to the [How we implemented the constrained generation in TaskWeaver](/blog/local_llm#how-we-implemented-the-constrained-generation-in-taskweaver) section for more details.\\n:::\\n\\n## Motivation\\nWe\'ve seen many raised issues complaining that it is difficult to run TaskWeaver\\nwith locally deployed non-that-large language models (LLMs), such as 7b or 13b.\\nWhen we examine the issues, we find that the main problem is that the models failed \\nto generate responses following our formatting instructions in the prompt. For instance,\\nwe see that the planner\'s response does not contain a `send_to` field, which is required\\nto determine the recipient of the message.\\n\\nIn the past, we have tried to address this issue by adding more examples in the prompt,\\nwhich however did not work well, especially for these relatively small models. Another idea\\nwas to ask the model to re-generate the response if it does not follow the format. \\nWe include the format error in the prompt to help the model understand the error and\\ncorrect it. However, this approach also did not work well. \\n\\n\x3c!-- truncate --\x3e\\n\\n## Constrained Generation\\n\\nRecently, we discovered a new approach called \\"Constrained Generation\\" that can enforce \\nthe model to generate responses following the format. Popular frameworks include [Outlines](https://github.com/outlines-dev/outlines),\\n[Guidance](https://github.com/guidance-ai/guidance), [lm-format-enforcer](https://github.com/noamgat/lm-format-enforcer/tree/main), etc.\\nAll these frameworks support generating responses following a specific format, e.g., a JSON schema.\\nThis makes it possible to control the output format by providing it a schema.\\n\\nIn TaskWeaver, a relatively easy way to integrate this feature is to use a local deployment that supports\\nboth constrained generation and OpenAI compatible API, for instance, the [vllm](https://docs.vllm.ai/en/stable/serving/openai_compatible_server.html).\\nThere are other frameworks that support constrained generation, such as llama.cpp. \\nBut currently, we found that this feature is still not mature enough, so we start with vllm for experimentation.\\n\\nTo run vllm, you can follow the instructions in the [vllm documentation](https://docs.vllm.ai/en/stable/serving/openai_compatible_server.html). \\nA simple example is shown below:\\n```shell\\npython -m vllm.entrypoints.openai.api_server --model meta-llama/Meta-Llama-3-8B-Instruct --guided-decoding-backend lm-format-enforcer\\n```\\nwhere `--guided-decoding-backend lm-format-enforcer` is used to enable the constrained generation feature and \\nspecify the backend. Currently, vllm only supports `lm-format-enforcer` and `outlines`.\\n\\nHere is a sample code to test the vllm server:\\n```python\\nfrom openai import OpenAI\\n\\njson_schema = {\\n \\"type\\": \\"object\\",\\n \\"properties\\": {\\n \\"country_name\\": {\\n \\"type\\": \\"string\\"\\n }\\n },\\n \\"required\\": [\\"country_name\\"]\\n}\\n\\nopenai_api_key = \\"EMPTY\\"\\nopenai_api_base = \\"http://localhost:8000/v1\\"\\nclient = OpenAI(\\n api_key=openai_api_key,\\n base_url=openai_api_base,\\n)\\ncompletion = client.chat.completions.create(\\n model=\\"meta-llama/Meta-Llama-3-8B-Instruct\\",\\n messages = [\\n {\\"role\\": \\"system\\", \\"content\\": \\"You are a helpful assistant.\\"},\\n {\\"role\\": \\"user\\", \\"content\\": \\"Which country is San Francisco in?\\"}\\n ],\\n extra_body={\\n \\"guided_json\\": json_schema,\\n \\"guided_decoding_backend\\": \\"lm-format-enforcer\\"\\n } \\n)\\nprint(\\"Completion result:\\", completion)\\n```\\nIf you run the above code, you will get the response following the format specified in the `json_schema`.\\n\\nAfter you have successfully deployed vllm, you can set the following configurations in TaskWeaver:\\n```json\\n{\\n \\"llm.model\\": \\"meta-llama/Meta-Llama-3-8B-Instruct\\",\\n \\"llm.api_base\\": \\"http://localhost:8000/v1\\",\\n \\"llm.api_key\\": \\"null\\",\\n \\"llm.api_type\\": \\"openai\\",\\n \\"llm.openai.require_alternative_roles\\": false,\\n \\"llm.openai.support_system_role\\": true\\n}\\n```\\nThe `llm.openai.require_alternative_roles` and `llm.openai.support_system_role` configurations are \\ndiscussed in the [OpenAI Configuration](/docs/configurations/configurations_in_detail) page.\\nWith these configurations, TaskWeaver will send the messages to the vllm server and get the responses.\\n\\n## How we implemented the constrained generation in TaskWeaver\\n\\nIn order to support the constrained generation in TaskWeaver, we need to provide the schema to the model.\\nTherefore, we made a few changes in the code to support this feature.\\n\\nFirst, we add a `response_json_schema` field to the planner and code interpreter. For planner, you can find\\nit in `taskweaver/planner/planner_prompt.py`. It looks like this:\\n```yaml\\nresponse_json_schema: |-\\n {\\n \\"type\\": \\"object\\",\\n \\"properties\\": {\\n \\"response\\": {\\n \\"type\\": \\"object\\",\\n \\"properties\\": {\\n \\"init_plan\\": {\\n \\"type\\": \\"string\\"\\n },\\n \\"plan\\": {\\n \\"type\\": \\"string\\"\\n },\\n \\"current_plan_step\\": {\\n \\"type\\": \\"string\\"\\n },\\n \\"send_to\\": {\\n \\"type\\": \\"string\\"\\n },\\n \\"message\\": {\\n \\"type\\": \\"string\\"\\n }\\n },\\n \\"required\\": [\\n \\"init_plan\\",\\n \\"plan\\",\\n \\"current_plan_step\\",\\n \\"send_to\\",\\n \\"message\\"\\n ]\\n }\\n },\\n \\"required\\": [\\"response\\"]\\n }\\n```\\nIf you are familiar with the previous output schema, you may notice that we have changed the `response` field to an object\\nfrom an array of elements. This is because that it is much easier to express the schema in JSON format if \\nthe properties are in an object, not elements in an array.\\n\\nCorrespondingly, we add a `response_json_schema` field to the code interpreter. You can find it in `taskweaver/code_interpreter/code_interpreter/code_generator_prompt.py`,\\nwhich looks like this:\\n```yaml\\nresponse_json_schema: |-\\n {\\n \\"type\\": \\"object\\",\\n \\"properties\\": {\\n \\"response\\": {\\n \\"type\\": \\"object\\",\\n \\"properties\\": {\\n \\"thought\\": {\\n \\"type\\": \\"string\\"\\n },\\n \\"reply_type\\": {\\n \\"type\\": \\"string\\",\\n \\"enum\\": [\\"python\\", \\"text\\"]\\n },\\n \\"reply_content\\": {\\n \\"type\\": \\"string\\"\\n } \\n },\\n \\"required\\": [\\"thought\\", \\"reply_type\\", \\"reply_content\\"]\\n }\\n },\\n \\"required\\": [\\"response\\"]\\n } \\n```\\nWe also change the `response` field to an object from an array of elements in the code interpreter.\\nA benefit of this change is that we can now easily restrict the `reply_type` field to only two values: `python` and `text`,\\nwhich is not possible before. \\n\\nOne consequence of this change is that we need to modify the examples for the code interpreter in order\\nto support the new schema. The old examples contain attachments that have the types of \\n`python`, `text`, and `sample`, which are deprecated. We now need to change them to the new schema.\\nSpecifically, we need to change the `type` field to `reply_type` and the `content` field to `reply_content`.\\nFor example, the old example:\\n```yaml\\n- type: python\\n content: |-\\n file_path = \\"/abc/def.txt\\" \\n\\n with open(file_path, \\"r\\") as file: \\n file_contents = file.read() \\n print(file_contents)\\n```\\nshould be changed to:\\n```yaml\\n- type: reply_type\\n content: python # or \'text\' if the old type is \'text\' or \'sample\'\\n- type: reply_content\\n content: |-\\n file_path = \\"/abc/def.txt\\" \\n\\n with open(file_path, \\"r\\") as file: \\n file_contents = file.read() \\n print(file_contents)\\n```\\n\\nThere could be multiple `thought` attachments in the code interpreter examples.\\nBut in the new schema, there is only one `thought` field. So we have added code to do the conversion and no \\nmanual work is needed to modify the examples.\\nIf you have examples, after these changes, we can now support the constrained generation in TaskWeaver.\\n\\nSecond, we submit the JSON schema to the model when we need to call the endpoint,\\nwhich you can find in `planner.py` and `code_generator.py`, respectively.\\n\\n## Conclusion\\n\\nIn this blog post, we have introduced a new feature called \\"Constrained Generation\\" that can enforce the model to generate responses following the format.\\nWe have also shown how to run TaskWeaver with locally deployed non-that-large language models (LLMs) that support constrained generation.\\nWe have also explained how we implemented the constrained generation in TaskWeaver. We hope this feature can help you run TaskWeaver with LLMs more easily.\\nIf you have any questions or suggestions, please feel free to contact us."},{"id":"/plugin","metadata":{"permalink":"/TaskWeaver/blog/plugin","editUrl":"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/plugin.md","source":"@site/blog/plugin.md","title":"Plugins In-Depth","description":"**Pre-requisites**: Please refer to the Introduction and the Plugin Development","date":"2024-09-11T03:22:47.000Z","tags":[],"readingTime":4.7,"hasTruncateMarker":true,"authors":[],"frontMatter":{},"unlisted":false,"prevItem":{"title":"Run TaskWeaver with Locally Deployed Not-that-Large Language Models","permalink":"/TaskWeaver/blog/local_llm"},"nextItem":{"title":"Roles in TaskWeaver","permalink":"/TaskWeaver/blog/role"}},"content":"_**Pre-requisites**: Please refer to the [Introduction](/docs/plugin/plugin_intro) and the [Plugin Development](/docs/plugin/how_to_develop_a_new_plugin) \\npages for a better understanding of the plugin concept and its development process._\\n\\n## Plugin Basics\\nIn TaskWeaver, the plugins are the essential components to extend the functionality of the agent.\\nSpecifically, a plugin is a piece of code wrapped in a class that can be called as a function by the agent in the generated code snippets.\\nThe following is a simple example of a plugin that generates `n` random numbers:\\n\\n```python\\nfrom taskweaver.plugin import Plugin, register_plugin\\n\\n@register_plugin\\nclass RandomGenerator(Plugin):\\n def __call__(self, n: int):\\n import random\\n return [random.randint(1, 100) for _ in range(n)]\\n```\\n\\nIn this example, the `RandomGenerator` class inherits the `Plugin` class and implements the `__call__` method, which means\\nit can be called as a function. What would be the function signature of the plugin? \\nIt is defined in the associated YAML file. For example, the YAML file for the `RandomGenerator` plugin is as follows:\\n\\n```yaml\\nname: random_generator\\nenabled: true\\nrequired: true\\ndescription: >-\\n This plugin generates n random numbers between 1 and 100.\\nexamples: |-\\n result = random_generator(n=5)\\nparameters:\\n - name: n\\n type: int\\n required: true\\n description: >-\\n The number of random numbers to generate.\\n\\nreturns:\\n - name: result\\n type: list\\n description: >-\\n The list of random numbers.\\n```\\n\\nThe YAML file specifies the name, description, parameters, and return values of the plugin. \\nWhen the LLM generates the code snippets, it will use the information in the YAML file to generate the function signature.\\nWe did not check the discrepancy between the function signature in the Python implementation and the YAML file. \\nSo, it is important to keep them consistent.\\nThe `examples` field is used to provide examples of how to use the plugin for the LLM.\\n\\n\x3c!-- truncate --\x3e\\n\\n## Configurations and States\\n\\nAlthough the plugin is used as a function in the code snippets, it is more than a normal Python function.\\nThe plugin can have its own configurations and states.\\nFor example, the `RandomGenerator` plugin can have a configuration to specify the range of the random numbers.\\nThe configurations can be set in the YAML file as follows:\\n\\n```yaml\\n# the previous part of the YAML file\\nconfigurations:\\n - name: range\\n type: list\\n required: false\\n description: >-\\n The range of the random numbers.\\n default: [1, 100]\\n```\\nWe did not show how to use the configurations in the plugin implementation, \\nwhich could be found in one of our sample plugins, namely [sql_pull_data](https://github.com/microsoft/TaskWeaver/blob/main/project/plugins/sql_pull_data.yaml).\\nSupporting configurations in the plugin is a powerful feature to make the plugin more flexible and reusable.\\nFor example, we can have multiple YAML files pointing to the same Python implementation but with different configurations.\\nRead this [page](/docs/plugin/multi_yaml_single_impl) for more details. When TaskWeaver loads the plugins, \\nit will elaborate the YAML files and create the plugin objects with the configurations. Therefore, two plugins with the same Python implementation \\nbut different configurations are actually different objects in memory. \\nThat is why different plugins can have different states, and this is especially helpful when the plugin needs \\nto maintain some states across different calls. Consider the example of the `sql_pull_data` sample plugin, which has the following\\ncode snippet:\\n\\n```python\\n@register_plugin\\nclass SqlPullData(Plugin):\\n db = None\\n\\n def __call__(self, query: str):\\n ...\\n\\n if self.db is None:\\n self.db = SQLDatabase.from_uri(self.config.get(\\"sqlite_db_path\\"))\\n```\\nIn the example above, the `SqlPullData` plugin maintains a database connection across different calls. \\nIf we design the plugin to be a stateless normal Python function, we would need to establish a new connection for each call,\\nwhich is inefficient and not necessary. \\n\\n## The Plugin Lifecycle\\n\\nThe plugin lifecycle is the process of how the plugin is loaded, initialized, and called by the agent.\\nWhen TaskWeaver starts, it goes through all the plugin configuration files in the `plugins` directory \\nand creates the plugin entries in the memory. The Python implementation of the plugin is not loaded at this stage.\\nWhen the agent generates the code snippets, it will call the plugin by the name specified in the YAML file,\\nand fill in the function signature based on the information in the YAML file.\\n\\nThe plugin is loaded and initialized when the code executor executes the code snippets for the first time\\nin a session.\\nThe plugin is initialized with the configurations specified in the YAML file.\\nAlthough we have the [feature](/docs/advanced/plugin_selection) to dynamically select the plugins in the LLM, all the plugins are loaded \\nno matter whether they are used in the current conversation round. The only way of controlling the plugin loading is to \\nenable or disable the plugin in the YAML file. \\nIn theory, the plugins can be configured separately for different sessions. \\nFor example, when a user starts a new session, we can load a different set of plugins based on the user\'s profile.\\nBut this feature is **not** supported in TaskWeaver yet.\\n\\nThe plugin is called when the agent executes the code snippets. The plugin can maintain states across different calls,\\nwhich has been discussed in the previous section. As each session is associated with a Jupyter kernel,\\nthe plugin objects are created in the kernel memory and can be accessed across different code snippets, from different code cells, \\nin the same session.\\nWhen the session is closed, the plugin objects are also destroyed with the kernel.\\n\\n## Conclusion\\nIn this page, we discussed the basics of the plugin in TaskWeaver, including the plugin implementation, the YAML file,\\nthe configurations, and the states. We also introduced the plugin lifecycle, which is the process of how the plugin is loaded, initialized, and called by the agent.\\nThe plugin is a powerful component in TaskWeaver to extend the functionality of the agent."},{"id":"/role","metadata":{"permalink":"/TaskWeaver/blog/role","editUrl":"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/role.md","source":"@site/blog/role.md","title":"Roles in TaskWeaver","description":"We frame TaskWeaver as a code-first agent framework. The term \\"code-first\\" means that the agent is designed to","date":"2024-09-11T03:22:47.000Z","tags":[],"readingTime":6.15,"hasTruncateMarker":true,"authors":[],"frontMatter":{},"unlisted":false,"prevItem":{"title":"Plugins In-Depth","permalink":"/TaskWeaver/blog/plugin"}},"content":"We frame TaskWeaver as a **code-first** agent framework. The term \\"code-first\\" means that the agent is designed to\\nconvert the user\'s request into one or multiple runnable code snippets and then execute them to generate the response.\\nThe philosophy behind this design is to consider programming languages as the de facto language for communication in cyber-physical systems,\\njust like the natural language for human communication. Therefore, TaskWeaver translates the user\'s request in natural language into\\nprogramming languages, which can be executed by the system to perform the desired tasks.\\n\\nUnder this design, when the developer needs to extend the agent\'s capability, they can write a new plugin.\\nA plugin is a piece of code wrapped in a class that can be called as a function by the agent in the generated code snippets.\\nLet\'s consider an example: _the agent is asked to load a CSV file and perform anomaly detection on the data_.\\nThe workflow of the agent is in the diagram below. It is very natural to represent data to be processed in variables and this task in code snippets.\\n\\n```mermaid\\nflowchart TD\\n A[User] --\\"read a.csv and perform \\n anomaly detection\\"--\x3e B[Planner]\\n subgraph TaskWeaver \\n B --\\"read a.csv and call the \\n anomaly_detection plugin\\n to find anomalies in the data\\"--\x3e C[Code Generator]\\n subgraph Code Interpreter\\n C --\\"df=pd.read_csv(\'a.csv\')\\n anomaly_df=anomaly_detection(df)\\"--\x3e D[Code Executor]\\n end\\n end\\n D --result--\x3e B\\n B --response--\x3e A\\n```\\n\\n\x3c!-- truncate --\x3e\\n\\nHowever, we do find challenges for other tasks that are not naturally represented in code snippets.\\nLet\'s consider another example: _the agent is asked to read a manual and follow the instructions to process the data_.\\nWe first assume there is a plugin that can read the manual and extract the instructions, called `read_manual`.\\nThe workflow of the agent is in the diagram below. \\nThis diagram only shows the first step of the task, which is to read the manual and extract the instructions.\\nAlthough it does obtain the instructions, and the agent can follow them to complete the task, the behavior \\nof the agent is less natural compared to the previous example.\\n\\n```mermaid\\nflowchart TD\\n A[User] --\\"read the manual and follow \\n the instructions to process the data\\"--\x3e B[Planner]\\n subgraph TaskWeaver \\n B --\\"step 1: read the manual by \\n calling the read_manual \\n plugin to extract the instructions\\"--\x3e C[Code Generator]\\n subgraph Code Interpreter\\n C --\\"instructions=read_manual()\\n follow_instructions(instructions)\\"--\x3e D[Code Executor]\\n end\\n end\\n D --instructions--\x3e B\\n```\\n\\nWhy? First, there is no need to generate code to read the manual and extract the instructions.\\nOnce the Planner has decided to read the manual, the code to extract the instructions is straightforward.\\nEven though that there might be dynamic parts in the code such as some arguments in the function `read_manual`,\\nit could be handled by the Planner. Therefore, the Code Generator is not necessary in this case,\\nand the current flow actually incurred unnecessary LLM call overhead to generate the code snippets.\\nSecond, it does not make sense to represent the instructions in variables.\\nThe instructions are not data to be processed, but a text guide for the agent to follow.\\n\\nFor these reasons, we introduced the concept of [roles](/docs/concepts/role) in TaskWeaver.\\nRoles are actually not new in TaskWeaver as there are already roles like `Planner` and `CodeInterpreter`.\\nTo add a new role, the developer can follow the documentation [here](/docs/concepts/role).\\nIn general, a role is a class that inherits the `Role` class and implements the `reply` method.\\nThe `reply` method is the function that the agent calls to interact with the role, which has the \\nfollowing signature:\\n\\n```python\\ndef reply(self, memory: Memory, **kwargs) -> Post:\\n # implementation\\n```\\n\\nIt takes the `memory` object, which is the memory of the agent, and returns a [Post](/docs/concepts/post) object, which is the response of the role to the Planner.\\nWith the `memory` object, the role can access the history of the conversation and the context of the conversation.\\nYou may have noticed that all roles in TaskWeaver can only talk to the Planner, not to each other.\\nIf a role needs to talk to another role, it should go through the Planner.\\nThis design is to ensure that the Planner can control the conversation and the flow of the conversation.\\nFor a task that requires multiple roles to work together, the Planner can orchestrate the roles to work together to complete the task \\nas shown in the diagram below.\\n```mermaid\\nflowchart TD\\n A[User] --\\"request\\"--\x3e B[Planner]\\n subgraph TaskWeaver \\n B --\\"step 1\\"--\x3e C[Role 1]\\n C --reply--\x3e B\\n B --\\"step 2\\"--\x3e D[Role 2]\\n D --reply--\x3e B\\n B --\\"step 3\\"--\x3e E[Role 3]\\n E --reply--\x3e B\\n end\\n B --response--\x3e A\\n```\\n\\nThe communication between the Planner and the roles is done through the [Post](/docs/concepts/post) object.\\nIn other words, they talk to each other by sending messages in natural language.\\nWhat if a role needs to send some data to another role? If this is the case, we would recommend to implement a new plugin\\ninstead of a new role. Otherwise, you may need to store the data in an external storage like a database and let the other role to access it.\\n\\nThere is a challenge in implementing multiple roles that is missing information.\\nConsider the case in our previous example where the agent is asked to read a manual and follow the instructions to process the data.\\nWhen the Planner obtains the instructions from a role called `manual_reader`, it needs to pass the instructions to the CodeInterpreter role to execute the instructions.\\nSometimes, the Planner may miss critical information that is needed by the CodeInterpreter role.\\nEven though we can emphasize the importance of the Planner to pass all the necessary information to the roles in the prompt, \\nit is still possible that the Planner misses some information.\\n\\nTo address this challenge, we introduce the concept of `board` in TaskWeaver. \\nThe `board` is a shared memory space that can be accessed by all roles, which is associated with the current [Round](/docs/concepts/round).\\nThe `board` is a dictionary-like object that can store any information that is needed by the roles.\\nEach role can decide to write or read any information from the `board`.\\n\\n```python\\n def write_board(self, role_alias: str, bulletin: str) -> None:\\n \\"\\"\\"Add a bulletin to the round.\\"\\"\\"\\n self.board[role_alias] = bulletin\\n\\ndef read_board(self, role_alias: Optional[str] = None) -> Union[Dict[str, str], str]:\\n \\"\\"\\"Read the bulletin of the round.\\"\\"\\"\\n if role_alias is None:\\n return self.board\\n return self.board.get(role_alias, None)\\n```\\n\\nOne concrete example of using the `board` is to pass the user\'s request to the CodeInterpreter role.\\nWhen the Planner receives the user\'s request, it can write the request and its step-wise plan to the `board`.\\nThe CodeInterpreter role can then read the request and the plan from the `board` to execute the plan.\\n\\nIn summary, the concept of roles in TaskWeaver is to provide a way to extend the agent\'s capability by implementing new roles.\\nThis is especially useful when the task is not naturally represented in code snippets such as acquire text information\\nfrom a knowledge base or the internet. Implementing a new role is straightforward by inheriting the `Role` class and implementing the `reply` method.\\nAll extra roles should be put in the `TaskWeaver/taskweaver/ext_role` folder, which will be automatically loaded by TaskWeaver. \\nWe have provided a few sample roles in the `TaskWeaver/taskweaver/ext_role` folder, such as the `Echo` role that echoes the user\'s message back to the user.\\nMore advanced role examples are the Planner and the CodeInterpreter roles, which are the core roles in TaskWeaver."}]}}')}}]); \ No newline at end of file +"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[4950],{4124:e=>{e.exports=JSON.parse('{"archive":{"blogPosts":[{"id":"/evaluation","metadata":{"permalink":"/TaskWeaver/blog/evaluation","editUrl":"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/evaluation.md","source":"@site/blog/evaluation.md","title":"How to evaluate a LLM agent?","description":"The challenges","date":"2024-09-14T08:42:48.000Z","tags":[],"readingTime":6.305,"hasTruncateMarker":true,"authors":[],"frontMatter":{},"unlisted":false,"nextItem":{"title":"Run TaskWeaver with Locally Deployed Not-that-Large Language Models","permalink":"/TaskWeaver/blog/local_llm"}},"content":"## The challenges\\nIt is nontrivial to evaluate the performance of a LLM agent. \\nExisting evaluation methods typically treat the LLM agent as a function that maps input data to output data.\\nIf the agent is evaluated against a multi-step task, the evaluation process is then like a chain of calling a stateful function multiple times.\\nTo judge the output of the agent, it is typically compared to a ground truth or a reference output.\\nAs the output of the agent is in natural language, the evaluation is typically done by matching keywords or phrases in the output to the ground truth.\\n\\nThis evaluation method has its limitations due to its rigid nature. \\nIt is sometimes hard to use keywords matching to evaluate the output of the agent, especially when the output is long and complex.\\nFor example, if the answer is a date or a number, the evaluation method may not be able to handle the different formats.\\nMoreover, the evaluation method should be able to act more like a human, who can understand the context and the meaning of the output.\\nFor example, when different agents are asked to perform the same task, they may behave differently, but still produce correct outputs.\\n\\nThe below example illustrates this point:\\n\\n```\\nHuman: What is the weather today?\\nAgent 1: It is sunny today in New York.\\n```\\n\\n```\\nHuman: What is the weather today?\\nAgent 2: Do you want to know the weather in New York today?\\nHuman: Yes.\\nAgent 2: It is sunny today.\\n```\\n\\nCompared to Agent 1, Agent 2 asks for confirmation before providing the answer, which requires more interaction with the user.\\nHowever, both agents provide the correct answer to the question.\\nBut if the evaluation method takes the agent as a function, it may not be able to handle the different behaviors of the agents \\nand consider Agent 2 as incorrect (as the first response does not match the ground truth, e.g., \\"sunny\\").\\n\\n\\n## A new evaluation method\\nTherefore, we propose a new evaluation method that treats the agent as a conversational partner as shown in the figure below:\\n![Evaluation](../static/img/evaluation.png)\\n\\n\x3c!-- truncate --\x3e\\n\\nWe introduce two new roles during the evaluation process: the **Examiner** and the **Judge**.\\nFor each test case, the task description is first given to the Examiner.\\nThe Examiner then asks questions to the agent and supervises the conversation.\\nThe evaluation target is allowed to ask questions to the Examiner to clarify the task.\\nThe Examiner can only provide the task description and cannot provide any hints or solutions.\\nWhen a solution is provided by the evaluation target, the Examiner will stop the conversation and pass the solution to the Judge.\\nThe Judge will then evaluate the solution based on the ground truth.\\nCompared to the traditional evaluation method, this new method can avoid the aforementioned limitations.\\n\\nLet\'s see an example of how the new evaluation method works. The following YAML file is a task description for the task \\"Sum of 1 to 50\\".\\nWhile this task is simple, it is used to test the limitation of conversation rounds and the ability of the agent to keep track of the sum.\\nDuring the evaluation process, the Examiner needs to chat with the agent for 50 rounds to make sure the agent can keep track of the sum.\\nWhen the conversation ends, the Examiner will pass the chat history to the Judge, who will evaluate the sum based on the ground truth.\\n```yaml\\ntask_description: |-\\n The task has many rounds. The initial total sum is 0. \\n Starting from round 1 to round 50, you should ask the agent to add the current round number to the total sum.\\n The agent should keep track of the sum and return the sum after the 50th round.\\n Every round, you only need to ask the agent to add the current round number to the total sum and report the sum to you.\\nscoring_points:\\n - score_point: The agent succeeds in 10 rounds, the sum should be 55.\\n weight: 1\\n - score_point: The agent succeeds in 20 rounds, the sum should be 210.\\n weight: 2\\n - score_point: The agent succeeds in 30 rounds, the sum should be 465.\\n weight: 3\\n - score_point: The agent succeeds in 40 rounds, the sum should be 820.\\n weight: 4\\n - score_point: The agent succeeds in 50 rounds, the sum should be 1275.\\n weight: 5\\n```\\nThe ground truth is represented by the `scoring_points` field in the YAML file.\\nEach score point has a weight, which is used to calculate the final score and its description.\\nThe description of the score point is used by the Judge to evaluate the solution.\\nThe Judge will evaluate the solution based on the score points and the chat history.\\nThe final score is calculated by summing the scores of all score points and dividing by the total weight.\\nTherefore, the normalized score is between 0 and 1.\\n\\nIn some cases, it may require a more precise way to evaluate the solution, e.g., with code.\\nThis following task description is an example of such a case.\\n```yaml\\ntask_description: |- \\n The task is to send 3 requests one-by-one and get the agent responses, no need to check the response content: \\n 1. generate 1 random integer number and save it to a file named \'a.txt\', just tell me if the task is done\\n 2. tell me a random joke\\n 3. save the previously generated random number to a file named \'b.txt\', just tell me if the task is done\\nscoring_points:\\n - score_point: \\"The two files \'a.txt\' and \'b.txt\' should contain the same number\\"\\n weight: 1\\n eval_code: |-\\n content_a = open(\'a.txt\', \'r\').read().strip()\\n content_b = open(\'b.txt\', \'r\').read().strip()\\n assert content_a == content_b, f\\"content of a.txt: {content_a}, content of b.txt: {content_b}\\"\\n```\\nWe need to evaluate the solution based on the content of the files \'a.txt\' and \'b.txt\'.\\nThe `eval_code` field is used to write the evaluation code. \\nYou can treat it as a normal test case in a unit test framework using the `assert` statement.\\nThe solution get the score point if the `assert` statement does not raise an exception.\\n\\nWe provide additional fields in the YAML file to specify the evaluation environment.\\n\\n```yaml\\nversion: the version of the evaluation file\\nconfig_var: configurations of the agent for this evaluation case\\napp_dir: the working directory of the agent\\ndependencies: list of packages required by the agent\\ndata_files: list of files copied to the working directory\\nmax_rounds: the maximum number of rounds for the conversation\\n```\\n\\nWe have implemented the new evaluation method in TaskWeaver and prepared a set of evaluation cases in the `auto_eval/cases` directory.\\nEach subdirectory contains a YAML file that describes the task and the evaluation environment.\\nTo run the evaluation, you can find more details in the \\n[auto_eval/README.md](https://github.com/microsoft/TaskWeaver/blob/main/auto_eval/README.md) file.\\n\\n## How to adapt for other agents?\\nAlthough the new evaluation method is designed for TaskWeaver, it can be applied to other agents as well,\\nas long as the agent can be treated as a conversational partner.\\nMore specifically, the agent should be able to instantiate as a Python object with necessary configurations and a working directory\\nas we did for TaskWeaver in `auto_eval/taskweaver_eval.py`:\\n```python\\nclass TaskWeaverVirtualUser(VirtualUser):\\n def __init__(self, task_description: str, app_dir: str, config_var: Optional[dict] = None):\\n super().__init__(task_description)\\n\\n self.app = TaskWeaverApp(app_dir=app_dir, config=config_var)\\n self.session = self.app.get_session()\\n self.session_id = self.session.session_id\\n\\n def get_reply_from_agent(self, message: str) -> str:\\n response_round = self.session.send_message(\\n message,\\n event_handler=None,\\n )\\n assert response_round.state != \\"failed\\", \\"Failed to get response from agent.\\"\\n return response_round.post_list[-1].message\\n\\n def close(self):\\n self.app.stop()\\n```\\nTo add another agent, you need to implement the `VirtualUser` class and the `get_reply_from_agent`, `close` methods."},{"id":"/local_llm","metadata":{"permalink":"/TaskWeaver/blog/local_llm","editUrl":"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/local_llm.md","source":"@site/blog/local_llm.md","title":"Run TaskWeaver with Locally Deployed Not-that-Large Language Models","description":"The feature introduced in this blog post can cause incompatibility issue with the previous version of TaskWeaver","date":"2024-09-14T08:42:48.000Z","tags":[],"readingTime":5.55,"hasTruncateMarker":true,"authors":[],"frontMatter":{},"unlisted":false,"prevItem":{"title":"How to evaluate a LLM agent?","permalink":"/TaskWeaver/blog/evaluation"},"nextItem":{"title":"Plugins In-Depth","permalink":"/TaskWeaver/blog/plugin"}},"content":":::info\\nThe feature introduced in this blog post can cause incompatibility issue with the previous version of TaskWeaver\\nif you have customized the examples for the planner and code interpreter. \\nThe issue is easy to fix by changing the examples to the new schema.\\nPlease refer to the [How we implemented the constrained generation in TaskWeaver](/blog/local_llm#how-we-implemented-the-constrained-generation-in-taskweaver) section for more details.\\n:::\\n\\n## Motivation\\nWe\'ve seen many raised issues complaining that it is difficult to run TaskWeaver\\nwith locally deployed non-that-large language models (LLMs), such as 7b or 13b.\\nWhen we examine the issues, we find that the main problem is that the models failed \\nto generate responses following our formatting instructions in the prompt. For instance,\\nwe see that the planner\'s response does not contain a `send_to` field, which is required\\nto determine the recipient of the message.\\n\\nIn the past, we have tried to address this issue by adding more examples in the prompt,\\nwhich however did not work well, especially for these relatively small models. Another idea\\nwas to ask the model to re-generate the response if it does not follow the format. \\nWe include the format error in the prompt to help the model understand the error and\\ncorrect it. However, this approach also did not work well. \\n\\n\x3c!-- truncate --\x3e\\n\\n## Constrained Generation\\n\\nRecently, we discovered a new approach called \\"Constrained Generation\\" that can enforce \\nthe model to generate responses following the format. Popular frameworks include [Outlines](https://github.com/outlines-dev/outlines),\\n[Guidance](https://github.com/guidance-ai/guidance), [lm-format-enforcer](https://github.com/noamgat/lm-format-enforcer/tree/main), etc.\\nAll these frameworks support generating responses following a specific format, e.g., a JSON schema.\\nThis makes it possible to control the output format by providing it a schema.\\n\\nIn TaskWeaver, a relatively easy way to integrate this feature is to use a local deployment that supports\\nboth constrained generation and OpenAI compatible API, for instance, the [vllm](https://docs.vllm.ai/en/stable/serving/openai_compatible_server.html).\\nThere are other frameworks that support constrained generation, such as llama.cpp. \\nBut currently, we found that this feature is still not mature enough, so we start with vllm for experimentation.\\n\\nTo run vllm, you can follow the instructions in the [vllm documentation](https://docs.vllm.ai/en/stable/serving/openai_compatible_server.html). \\nA simple example is shown below:\\n```shell\\npython -m vllm.entrypoints.openai.api_server --model meta-llama/Meta-Llama-3-8B-Instruct --guided-decoding-backend lm-format-enforcer\\n```\\nwhere `--guided-decoding-backend lm-format-enforcer` is used to enable the constrained generation feature and \\nspecify the backend. Currently, vllm only supports `lm-format-enforcer` and `outlines`.\\n\\nHere is a sample code to test the vllm server:\\n```python\\nfrom openai import OpenAI\\n\\njson_schema = {\\n \\"type\\": \\"object\\",\\n \\"properties\\": {\\n \\"country_name\\": {\\n \\"type\\": \\"string\\"\\n }\\n },\\n \\"required\\": [\\"country_name\\"]\\n}\\n\\nopenai_api_key = \\"EMPTY\\"\\nopenai_api_base = \\"http://localhost:8000/v1\\"\\nclient = OpenAI(\\n api_key=openai_api_key,\\n base_url=openai_api_base,\\n)\\ncompletion = client.chat.completions.create(\\n model=\\"meta-llama/Meta-Llama-3-8B-Instruct\\",\\n messages = [\\n {\\"role\\": \\"system\\", \\"content\\": \\"You are a helpful assistant.\\"},\\n {\\"role\\": \\"user\\", \\"content\\": \\"Which country is San Francisco in?\\"}\\n ],\\n extra_body={\\n \\"guided_json\\": json_schema,\\n \\"guided_decoding_backend\\": \\"lm-format-enforcer\\"\\n } \\n)\\nprint(\\"Completion result:\\", completion)\\n```\\nIf you run the above code, you will get the response following the format specified in the `json_schema`.\\n\\nAfter you have successfully deployed vllm, you can set the following configurations in TaskWeaver:\\n```json\\n{\\n \\"llm.model\\": \\"meta-llama/Meta-Llama-3-8B-Instruct\\",\\n \\"llm.api_base\\": \\"http://localhost:8000/v1\\",\\n \\"llm.api_key\\": \\"null\\",\\n \\"llm.api_type\\": \\"openai\\",\\n \\"llm.openai.require_alternative_roles\\": false,\\n \\"llm.openai.support_system_role\\": true\\n}\\n```\\nThe `llm.openai.require_alternative_roles` and `llm.openai.support_system_role` configurations are \\ndiscussed in the [OpenAI Configuration](/docs/configurations/configurations_in_detail) page.\\nWith these configurations, TaskWeaver will send the messages to the vllm server and get the responses.\\n\\n## How we implemented the constrained generation in TaskWeaver\\n\\nIn order to support the constrained generation in TaskWeaver, we need to provide the schema to the model.\\nTherefore, we made a few changes in the code to support this feature.\\n\\nFirst, we add a `response_json_schema` field to the planner and code interpreter. For planner, you can find\\nit in `taskweaver/planner/planner_prompt.py`. It looks like this:\\n```yaml\\nresponse_json_schema: |-\\n {\\n \\"type\\": \\"object\\",\\n \\"properties\\": {\\n \\"response\\": {\\n \\"type\\": \\"object\\",\\n \\"properties\\": {\\n \\"init_plan\\": {\\n \\"type\\": \\"string\\"\\n },\\n \\"plan\\": {\\n \\"type\\": \\"string\\"\\n },\\n \\"current_plan_step\\": {\\n \\"type\\": \\"string\\"\\n },\\n \\"send_to\\": {\\n \\"type\\": \\"string\\"\\n },\\n \\"message\\": {\\n \\"type\\": \\"string\\"\\n }\\n },\\n \\"required\\": [\\n \\"init_plan\\",\\n \\"plan\\",\\n \\"current_plan_step\\",\\n \\"send_to\\",\\n \\"message\\"\\n ]\\n }\\n },\\n \\"required\\": [\\"response\\"]\\n }\\n```\\nIf you are familiar with the previous output schema, you may notice that we have changed the `response` field to an object\\nfrom an array of elements. This is because that it is much easier to express the schema in JSON format if \\nthe properties are in an object, not elements in an array.\\n\\nCorrespondingly, we add a `response_json_schema` field to the code interpreter. You can find it in `taskweaver/code_interpreter/code_interpreter/code_generator_prompt.py`,\\nwhich looks like this:\\n```yaml\\nresponse_json_schema: |-\\n {\\n \\"type\\": \\"object\\",\\n \\"properties\\": {\\n \\"response\\": {\\n \\"type\\": \\"object\\",\\n \\"properties\\": {\\n \\"thought\\": {\\n \\"type\\": \\"string\\"\\n },\\n \\"reply_type\\": {\\n \\"type\\": \\"string\\",\\n \\"enum\\": [\\"python\\", \\"text\\"]\\n },\\n \\"reply_content\\": {\\n \\"type\\": \\"string\\"\\n } \\n },\\n \\"required\\": [\\"thought\\", \\"reply_type\\", \\"reply_content\\"]\\n }\\n },\\n \\"required\\": [\\"response\\"]\\n } \\n```\\nWe also change the `response` field to an object from an array of elements in the code interpreter.\\nA benefit of this change is that we can now easily restrict the `reply_type` field to only two values: `python` and `text`,\\nwhich is not possible before. \\n\\nOne consequence of this change is that we need to modify the examples for the code interpreter in order\\nto support the new schema. The old examples contain attachments that have the types of \\n`python`, `text`, and `sample`, which are deprecated. We now need to change them to the new schema.\\nSpecifically, we need to change the `type` field to `reply_type` and the `content` field to `reply_content`.\\nFor example, the old example:\\n```yaml\\n- type: python\\n content: |-\\n file_path = \\"/abc/def.txt\\" \\n\\n with open(file_path, \\"r\\") as file: \\n file_contents = file.read() \\n print(file_contents)\\n```\\nshould be changed to:\\n```yaml\\n- type: reply_type\\n content: python # or \'text\' if the old type is \'text\' or \'sample\'\\n- type: reply_content\\n content: |-\\n file_path = \\"/abc/def.txt\\" \\n\\n with open(file_path, \\"r\\") as file: \\n file_contents = file.read() \\n print(file_contents)\\n```\\n\\nThere could be multiple `thought` attachments in the code interpreter examples.\\nBut in the new schema, there is only one `thought` field. So we have added code to do the conversion and no \\nmanual work is needed to modify the examples.\\nIf you have examples, after these changes, we can now support the constrained generation in TaskWeaver.\\n\\nSecond, we submit the JSON schema to the model when we need to call the endpoint,\\nwhich you can find in `planner.py` and `code_generator.py`, respectively.\\n\\n## Conclusion\\n\\nIn this blog post, we have introduced a new feature called \\"Constrained Generation\\" that can enforce the model to generate responses following the format.\\nWe have also shown how to run TaskWeaver with locally deployed non-that-large language models (LLMs) that support constrained generation.\\nWe have also explained how we implemented the constrained generation in TaskWeaver. We hope this feature can help you run TaskWeaver with LLMs more easily.\\nIf you have any questions or suggestions, please feel free to contact us."},{"id":"/plugin","metadata":{"permalink":"/TaskWeaver/blog/plugin","editUrl":"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/plugin.md","source":"@site/blog/plugin.md","title":"Plugins In-Depth","description":"**Pre-requisites**: Please refer to the Introduction and the Plugin Development","date":"2024-09-14T08:42:48.000Z","tags":[],"readingTime":4.7,"hasTruncateMarker":true,"authors":[],"frontMatter":{},"unlisted":false,"prevItem":{"title":"Run TaskWeaver with Locally Deployed Not-that-Large Language Models","permalink":"/TaskWeaver/blog/local_llm"},"nextItem":{"title":"Roles in TaskWeaver","permalink":"/TaskWeaver/blog/role"}},"content":"_**Pre-requisites**: Please refer to the [Introduction](/docs/plugin/plugin_intro) and the [Plugin Development](/docs/plugin/how_to_develop_a_new_plugin) \\npages for a better understanding of the plugin concept and its development process._\\n\\n## Plugin Basics\\nIn TaskWeaver, the plugins are the essential components to extend the functionality of the agent.\\nSpecifically, a plugin is a piece of code wrapped in a class that can be called as a function by the agent in the generated code snippets.\\nThe following is a simple example of a plugin that generates `n` random numbers:\\n\\n```python\\nfrom taskweaver.plugin import Plugin, register_plugin\\n\\n@register_plugin\\nclass RandomGenerator(Plugin):\\n def __call__(self, n: int):\\n import random\\n return [random.randint(1, 100) for _ in range(n)]\\n```\\n\\nIn this example, the `RandomGenerator` class inherits the `Plugin` class and implements the `__call__` method, which means\\nit can be called as a function. What would be the function signature of the plugin? \\nIt is defined in the associated YAML file. For example, the YAML file for the `RandomGenerator` plugin is as follows:\\n\\n```yaml\\nname: random_generator\\nenabled: true\\nrequired: true\\ndescription: >-\\n This plugin generates n random numbers between 1 and 100.\\nexamples: |-\\n result = random_generator(n=5)\\nparameters:\\n - name: n\\n type: int\\n required: true\\n description: >-\\n The number of random numbers to generate.\\n\\nreturns:\\n - name: result\\n type: list\\n description: >-\\n The list of random numbers.\\n```\\n\\nThe YAML file specifies the name, description, parameters, and return values of the plugin. \\nWhen the LLM generates the code snippets, it will use the information in the YAML file to generate the function signature.\\nWe did not check the discrepancy between the function signature in the Python implementation and the YAML file. \\nSo, it is important to keep them consistent.\\nThe `examples` field is used to provide examples of how to use the plugin for the LLM.\\n\\n\x3c!-- truncate --\x3e\\n\\n## Configurations and States\\n\\nAlthough the plugin is used as a function in the code snippets, it is more than a normal Python function.\\nThe plugin can have its own configurations and states.\\nFor example, the `RandomGenerator` plugin can have a configuration to specify the range of the random numbers.\\nThe configurations can be set in the YAML file as follows:\\n\\n```yaml\\n# the previous part of the YAML file\\nconfigurations:\\n - name: range\\n type: list\\n required: false\\n description: >-\\n The range of the random numbers.\\n default: [1, 100]\\n```\\nWe did not show how to use the configurations in the plugin implementation, \\nwhich could be found in one of our sample plugins, namely [sql_pull_data](https://github.com/microsoft/TaskWeaver/blob/main/project/plugins/sql_pull_data.yaml).\\nSupporting configurations in the plugin is a powerful feature to make the plugin more flexible and reusable.\\nFor example, we can have multiple YAML files pointing to the same Python implementation but with different configurations.\\nRead this [page](/docs/plugin/multi_yaml_single_impl) for more details. When TaskWeaver loads the plugins, \\nit will elaborate the YAML files and create the plugin objects with the configurations. Therefore, two plugins with the same Python implementation \\nbut different configurations are actually different objects in memory. \\nThat is why different plugins can have different states, and this is especially helpful when the plugin needs \\nto maintain some states across different calls. Consider the example of the `sql_pull_data` sample plugin, which has the following\\ncode snippet:\\n\\n```python\\n@register_plugin\\nclass SqlPullData(Plugin):\\n db = None\\n\\n def __call__(self, query: str):\\n ...\\n\\n if self.db is None:\\n self.db = SQLDatabase.from_uri(self.config.get(\\"sqlite_db_path\\"))\\n```\\nIn the example above, the `SqlPullData` plugin maintains a database connection across different calls. \\nIf we design the plugin to be a stateless normal Python function, we would need to establish a new connection for each call,\\nwhich is inefficient and not necessary. \\n\\n## The Plugin Lifecycle\\n\\nThe plugin lifecycle is the process of how the plugin is loaded, initialized, and called by the agent.\\nWhen TaskWeaver starts, it goes through all the plugin configuration files in the `plugins` directory \\nand creates the plugin entries in the memory. The Python implementation of the plugin is not loaded at this stage.\\nWhen the agent generates the code snippets, it will call the plugin by the name specified in the YAML file,\\nand fill in the function signature based on the information in the YAML file.\\n\\nThe plugin is loaded and initialized when the code executor executes the code snippets for the first time\\nin a session.\\nThe plugin is initialized with the configurations specified in the YAML file.\\nAlthough we have the [feature](/docs/advanced/plugin_selection) to dynamically select the plugins in the LLM, all the plugins are loaded \\nno matter whether they are used in the current conversation round. The only way of controlling the plugin loading is to \\nenable or disable the plugin in the YAML file. \\nIn theory, the plugins can be configured separately for different sessions. \\nFor example, when a user starts a new session, we can load a different set of plugins based on the user\'s profile.\\nBut this feature is **not** supported in TaskWeaver yet.\\n\\nThe plugin is called when the agent executes the code snippets. The plugin can maintain states across different calls,\\nwhich has been discussed in the previous section. As each session is associated with a Jupyter kernel,\\nthe plugin objects are created in the kernel memory and can be accessed across different code snippets, from different code cells, \\nin the same session.\\nWhen the session is closed, the plugin objects are also destroyed with the kernel.\\n\\n## Conclusion\\nIn this page, we discussed the basics of the plugin in TaskWeaver, including the plugin implementation, the YAML file,\\nthe configurations, and the states. We also introduced the plugin lifecycle, which is the process of how the plugin is loaded, initialized, and called by the agent.\\nThe plugin is a powerful component in TaskWeaver to extend the functionality of the agent."},{"id":"/role","metadata":{"permalink":"/TaskWeaver/blog/role","editUrl":"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/role.md","source":"@site/blog/role.md","title":"Roles in TaskWeaver","description":"We frame TaskWeaver as a code-first agent framework. The term \\"code-first\\" means that the agent is designed to","date":"2024-09-14T08:42:48.000Z","tags":[],"readingTime":6.15,"hasTruncateMarker":true,"authors":[],"frontMatter":{},"unlisted":false,"prevItem":{"title":"Plugins In-Depth","permalink":"/TaskWeaver/blog/plugin"}},"content":"We frame TaskWeaver as a **code-first** agent framework. The term \\"code-first\\" means that the agent is designed to\\nconvert the user\'s request into one or multiple runnable code snippets and then execute them to generate the response.\\nThe philosophy behind this design is to consider programming languages as the de facto language for communication in cyber-physical systems,\\njust like the natural language for human communication. Therefore, TaskWeaver translates the user\'s request in natural language into\\nprogramming languages, which can be executed by the system to perform the desired tasks.\\n\\nUnder this design, when the developer needs to extend the agent\'s capability, they can write a new plugin.\\nA plugin is a piece of code wrapped in a class that can be called as a function by the agent in the generated code snippets.\\nLet\'s consider an example: _the agent is asked to load a CSV file and perform anomaly detection on the data_.\\nThe workflow of the agent is in the diagram below. It is very natural to represent data to be processed in variables and this task in code snippets.\\n\\n```mermaid\\nflowchart TD\\n A[User] --\\"read a.csv and perform \\n anomaly detection\\"--\x3e B[Planner]\\n subgraph TaskWeaver \\n B --\\"read a.csv and call the \\n anomaly_detection plugin\\n to find anomalies in the data\\"--\x3e C[Code Generator]\\n subgraph Code Interpreter\\n C --\\"df=pd.read_csv(\'a.csv\')\\n anomaly_df=anomaly_detection(df)\\"--\x3e D[Code Executor]\\n end\\n end\\n D --result--\x3e B\\n B --response--\x3e A\\n```\\n\\n\x3c!-- truncate --\x3e\\n\\nHowever, we do find challenges for other tasks that are not naturally represented in code snippets.\\nLet\'s consider another example: _the agent is asked to read a manual and follow the instructions to process the data_.\\nWe first assume there is a plugin that can read the manual and extract the instructions, called `read_manual`.\\nThe workflow of the agent is in the diagram below. \\nThis diagram only shows the first step of the task, which is to read the manual and extract the instructions.\\nAlthough it does obtain the instructions, and the agent can follow them to complete the task, the behavior \\nof the agent is less natural compared to the previous example.\\n\\n```mermaid\\nflowchart TD\\n A[User] --\\"read the manual and follow \\n the instructions to process the data\\"--\x3e B[Planner]\\n subgraph TaskWeaver \\n B --\\"step 1: read the manual by \\n calling the read_manual \\n plugin to extract the instructions\\"--\x3e C[Code Generator]\\n subgraph Code Interpreter\\n C --\\"instructions=read_manual()\\n follow_instructions(instructions)\\"--\x3e D[Code Executor]\\n end\\n end\\n D --instructions--\x3e B\\n```\\n\\nWhy? First, there is no need to generate code to read the manual and extract the instructions.\\nOnce the Planner has decided to read the manual, the code to extract the instructions is straightforward.\\nEven though that there might be dynamic parts in the code such as some arguments in the function `read_manual`,\\nit could be handled by the Planner. Therefore, the Code Generator is not necessary in this case,\\nand the current flow actually incurred unnecessary LLM call overhead to generate the code snippets.\\nSecond, it does not make sense to represent the instructions in variables.\\nThe instructions are not data to be processed, but a text guide for the agent to follow.\\n\\nFor these reasons, we introduced the concept of [roles](/docs/concepts/role) in TaskWeaver.\\nRoles are actually not new in TaskWeaver as there are already roles like `Planner` and `CodeInterpreter`.\\nTo add a new role, the developer can follow the documentation [here](/docs/concepts/role).\\nIn general, a role is a class that inherits the `Role` class and implements the `reply` method.\\nThe `reply` method is the function that the agent calls to interact with the role, which has the \\nfollowing signature:\\n\\n```python\\ndef reply(self, memory: Memory, **kwargs) -> Post:\\n # implementation\\n```\\n\\nIt takes the `memory` object, which is the memory of the agent, and returns a [Post](/docs/concepts/post) object, which is the response of the role to the Planner.\\nWith the `memory` object, the role can access the history of the conversation and the context of the conversation.\\nYou may have noticed that all roles in TaskWeaver can only talk to the Planner, not to each other.\\nIf a role needs to talk to another role, it should go through the Planner.\\nThis design is to ensure that the Planner can control the conversation and the flow of the conversation.\\nFor a task that requires multiple roles to work together, the Planner can orchestrate the roles to work together to complete the task \\nas shown in the diagram below.\\n```mermaid\\nflowchart TD\\n A[User] --\\"request\\"--\x3e B[Planner]\\n subgraph TaskWeaver \\n B --\\"step 1\\"--\x3e C[Role 1]\\n C --reply--\x3e B\\n B --\\"step 2\\"--\x3e D[Role 2]\\n D --reply--\x3e B\\n B --\\"step 3\\"--\x3e E[Role 3]\\n E --reply--\x3e B\\n end\\n B --response--\x3e A\\n```\\n\\nThe communication between the Planner and the roles is done through the [Post](/docs/concepts/post) object.\\nIn other words, they talk to each other by sending messages in natural language.\\nWhat if a role needs to send some data to another role? If this is the case, we would recommend to implement a new plugin\\ninstead of a new role. Otherwise, you may need to store the data in an external storage like a database and let the other role to access it.\\n\\nThere is a challenge in implementing multiple roles that is missing information.\\nConsider the case in our previous example where the agent is asked to read a manual and follow the instructions to process the data.\\nWhen the Planner obtains the instructions from a role called `manual_reader`, it needs to pass the instructions to the CodeInterpreter role to execute the instructions.\\nSometimes, the Planner may miss critical information that is needed by the CodeInterpreter role.\\nEven though we can emphasize the importance of the Planner to pass all the necessary information to the roles in the prompt, \\nit is still possible that the Planner misses some information.\\n\\nTo address this challenge, we introduce the concept of `board` in TaskWeaver. \\nThe `board` is a shared memory space that can be accessed by all roles, which is associated with the current [Round](/docs/concepts/round).\\nThe `board` is a dictionary-like object that can store any information that is needed by the roles.\\nEach role can decide to write or read any information from the `board`.\\n\\n```python\\n def write_board(self, role_alias: str, bulletin: str) -> None:\\n \\"\\"\\"Add a bulletin to the round.\\"\\"\\"\\n self.board[role_alias] = bulletin\\n\\ndef read_board(self, role_alias: Optional[str] = None) -> Union[Dict[str, str], str]:\\n \\"\\"\\"Read the bulletin of the round.\\"\\"\\"\\n if role_alias is None:\\n return self.board\\n return self.board.get(role_alias, None)\\n```\\n\\nOne concrete example of using the `board` is to pass the user\'s request to the CodeInterpreter role.\\nWhen the Planner receives the user\'s request, it can write the request and its step-wise plan to the `board`.\\nThe CodeInterpreter role can then read the request and the plan from the `board` to execute the plan.\\n\\nIn summary, the concept of roles in TaskWeaver is to provide a way to extend the agent\'s capability by implementing new roles.\\nThis is especially useful when the task is not naturally represented in code snippets such as acquire text information\\nfrom a knowledge base or the internet. Implementing a new role is straightforward by inheriting the `Role` class and implementing the `reply` method.\\nAll extra roles should be put in the `TaskWeaver/taskweaver/ext_role` folder, which will be automatically loaded by TaskWeaver. \\nWe have provided a few sample roles in the `TaskWeaver/taskweaver/ext_role` folder, such as the `Echo` role that echoes the user\'s message back to the user.\\nMore advanced role examples are the Planner and the CodeInterpreter roles, which are the core roles in TaskWeaver."}]}}')}}]); \ No newline at end of file diff --git a/assets/js/223de7e7.7aec80d7.js b/assets/js/223de7e7.18ed45ad.js similarity index 98% rename from assets/js/223de7e7.7aec80d7.js rename to assets/js/223de7e7.18ed45ad.js index 927d9fd1..0051a1b5 100644 --- a/assets/js/223de7e7.7aec80d7.js +++ b/assets/js/223de7e7.18ed45ad.js @@ -1 +1 @@ -"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[8862],{9321:(e,n,t)=>{t.r(n),t.d(n,{assets:()=>l,contentTitle:()=>a,default:()=>u,frontMatter:()=>s,metadata:()=>o,toc:()=>c});var i=t(4848),r=t(8453);const s={},a="Plugins In-Depth",o={permalink:"/TaskWeaver/blog/plugin",editUrl:"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/plugin.md",source:"@site/blog/plugin.md",title:"Plugins In-Depth",description:"**Pre-requisites**: Please refer to the Introduction and the Plugin Development",date:"2024-09-11T03:22:47.000Z",tags:[],readingTime:4.7,hasTruncateMarker:!0,authors:[],frontMatter:{},unlisted:!1,prevItem:{title:"Run TaskWeaver with Locally Deployed Not-that-Large Language Models",permalink:"/TaskWeaver/blog/local_llm"},nextItem:{title:"Roles in TaskWeaver",permalink:"/TaskWeaver/blog/role"}},l={authorsImageUrls:[]},c=[{value:"Plugin Basics",id:"plugin-basics",level:2}];function d(e){const n={a:"a",code:"code",em:"em",h2:"h2",p:"p",pre:"pre",strong:"strong",...(0,r.R)(),...e.components};return(0,i.jsxs)(i.Fragment,{children:[(0,i.jsx)(n.p,{children:(0,i.jsxs)(n.em,{children:[(0,i.jsx)(n.strong,{children:"Pre-requisites"}),": Please refer to the ",(0,i.jsx)(n.a,{href:"/docs/plugin/plugin_intro",children:"Introduction"})," and the ",(0,i.jsx)(n.a,{href:"/docs/plugin/how_to_develop_a_new_plugin",children:"Plugin Development"}),"\npages for a better understanding of the plugin concept and its development process."]})}),"\n",(0,i.jsx)(n.h2,{id:"plugin-basics",children:"Plugin Basics"}),"\n",(0,i.jsxs)(n.p,{children:["In TaskWeaver, the plugins are the essential components to extend the functionality of the agent.\nSpecifically, a plugin is a piece of code wrapped in a class that can be called as a function by the agent in the generated code snippets.\nThe following is a simple example of a plugin that generates ",(0,i.jsx)(n.code,{children:"n"})," random numbers:"]}),"\n",(0,i.jsx)(n.pre,{children:(0,i.jsx)(n.code,{className:"language-python",children:"from taskweaver.plugin import Plugin, register_plugin\n\n@register_plugin\nclass RandomGenerator(Plugin):\n def __call__(self, n: int):\n import random\n return [random.randint(1, 100) for _ in range(n)]\n"})}),"\n",(0,i.jsxs)(n.p,{children:["In this example, the ",(0,i.jsx)(n.code,{children:"RandomGenerator"})," class inherits the ",(0,i.jsx)(n.code,{children:"Plugin"})," class and implements the ",(0,i.jsx)(n.code,{children:"__call__"})," method, which means\nit can be called as a function. What would be the function signature of the plugin?\nIt is defined in the associated YAML file. For example, the YAML file for the ",(0,i.jsx)(n.code,{children:"RandomGenerator"})," plugin is as follows:"]}),"\n",(0,i.jsx)(n.pre,{children:(0,i.jsx)(n.code,{className:"language-yaml",children:"name: random_generator\nenabled: true\nrequired: true\ndescription: >-\n This plugin generates n random numbers between 1 and 100.\nexamples: |-\n result = random_generator(n=5)\nparameters:\n - name: n\n type: int\n required: true\n description: >-\n The number of random numbers to generate.\n\nreturns:\n - name: result\n type: list\n description: >-\n The list of random numbers.\n"})}),"\n",(0,i.jsxs)(n.p,{children:["The YAML file specifies the name, description, parameters, and return values of the plugin.\nWhen the LLM generates the code snippets, it will use the information in the YAML file to generate the function signature.\nWe did not check the discrepancy between the function signature in the Python implementation and the YAML file.\nSo, it is important to keep them consistent.\nThe ",(0,i.jsx)(n.code,{children:"examples"})," field is used to provide examples of how to use the plugin for the LLM."]})]})}function u(e={}){const{wrapper:n}={...(0,r.R)(),...e.components};return n?(0,i.jsx)(n,{...e,children:(0,i.jsx)(d,{...e})}):d(e)}},8453:(e,n,t)=>{t.d(n,{R:()=>a,x:()=>o});var i=t(6540);const r={},s=i.createContext(r);function a(e){const n=i.useContext(s);return i.useMemo((function(){return"function"==typeof e?e(n):{...n,...e}}),[n,e])}function o(e){let n;return n=e.disableParentContext?"function"==typeof e.components?e.components(r):e.components||r:a(e.components),i.createElement(s.Provider,{value:n},e.children)}}}]); \ No newline at end of file +"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[8862],{9321:(e,n,t)=>{t.r(n),t.d(n,{assets:()=>l,contentTitle:()=>a,default:()=>u,frontMatter:()=>s,metadata:()=>o,toc:()=>c});var i=t(4848),r=t(8453);const s={},a="Plugins In-Depth",o={permalink:"/TaskWeaver/blog/plugin",editUrl:"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/plugin.md",source:"@site/blog/plugin.md",title:"Plugins In-Depth",description:"**Pre-requisites**: Please refer to the Introduction and the Plugin Development",date:"2024-09-14T08:42:48.000Z",tags:[],readingTime:4.7,hasTruncateMarker:!0,authors:[],frontMatter:{},unlisted:!1,prevItem:{title:"Run TaskWeaver with Locally Deployed Not-that-Large Language Models",permalink:"/TaskWeaver/blog/local_llm"},nextItem:{title:"Roles in TaskWeaver",permalink:"/TaskWeaver/blog/role"}},l={authorsImageUrls:[]},c=[{value:"Plugin Basics",id:"plugin-basics",level:2}];function d(e){const n={a:"a",code:"code",em:"em",h2:"h2",p:"p",pre:"pre",strong:"strong",...(0,r.R)(),...e.components};return(0,i.jsxs)(i.Fragment,{children:[(0,i.jsx)(n.p,{children:(0,i.jsxs)(n.em,{children:[(0,i.jsx)(n.strong,{children:"Pre-requisites"}),": Please refer to the ",(0,i.jsx)(n.a,{href:"/docs/plugin/plugin_intro",children:"Introduction"})," and the ",(0,i.jsx)(n.a,{href:"/docs/plugin/how_to_develop_a_new_plugin",children:"Plugin Development"}),"\npages for a better understanding of the plugin concept and its development process."]})}),"\n",(0,i.jsx)(n.h2,{id:"plugin-basics",children:"Plugin Basics"}),"\n",(0,i.jsxs)(n.p,{children:["In TaskWeaver, the plugins are the essential components to extend the functionality of the agent.\nSpecifically, a plugin is a piece of code wrapped in a class that can be called as a function by the agent in the generated code snippets.\nThe following is a simple example of a plugin that generates ",(0,i.jsx)(n.code,{children:"n"})," random numbers:"]}),"\n",(0,i.jsx)(n.pre,{children:(0,i.jsx)(n.code,{className:"language-python",children:"from taskweaver.plugin import Plugin, register_plugin\n\n@register_plugin\nclass RandomGenerator(Plugin):\n def __call__(self, n: int):\n import random\n return [random.randint(1, 100) for _ in range(n)]\n"})}),"\n",(0,i.jsxs)(n.p,{children:["In this example, the ",(0,i.jsx)(n.code,{children:"RandomGenerator"})," class inherits the ",(0,i.jsx)(n.code,{children:"Plugin"})," class and implements the ",(0,i.jsx)(n.code,{children:"__call__"})," method, which means\nit can be called as a function. What would be the function signature of the plugin?\nIt is defined in the associated YAML file. For example, the YAML file for the ",(0,i.jsx)(n.code,{children:"RandomGenerator"})," plugin is as follows:"]}),"\n",(0,i.jsx)(n.pre,{children:(0,i.jsx)(n.code,{className:"language-yaml",children:"name: random_generator\nenabled: true\nrequired: true\ndescription: >-\n This plugin generates n random numbers between 1 and 100.\nexamples: |-\n result = random_generator(n=5)\nparameters:\n - name: n\n type: int\n required: true\n description: >-\n The number of random numbers to generate.\n\nreturns:\n - name: result\n type: list\n description: >-\n The list of random numbers.\n"})}),"\n",(0,i.jsxs)(n.p,{children:["The YAML file specifies the name, description, parameters, and return values of the plugin.\nWhen the LLM generates the code snippets, it will use the information in the YAML file to generate the function signature.\nWe did not check the discrepancy between the function signature in the Python implementation and the YAML file.\nSo, it is important to keep them consistent.\nThe ",(0,i.jsx)(n.code,{children:"examples"})," field is used to provide examples of how to use the plugin for the LLM."]})]})}function u(e={}){const{wrapper:n}={...(0,r.R)(),...e.components};return n?(0,i.jsx)(n,{...e,children:(0,i.jsx)(d,{...e})}):d(e)}},8453:(e,n,t)=>{t.d(n,{R:()=>a,x:()=>o});var i=t(6540);const r={},s=i.createContext(r);function a(e){const n=i.useContext(s);return i.useMemo((function(){return"function"==typeof e?e(n):{...n,...e}}),[n,e])}function o(e){let n;return n=e.disableParentContext?"function"==typeof e.components?e.components(r):e.components||r:a(e.components),i.createElement(s.Provider,{value:n},e.children)}}}]); \ No newline at end of file diff --git a/assets/js/3e02b86d.ca212fa3.js b/assets/js/3e02b86d.8cfae61a.js similarity index 99% rename from assets/js/3e02b86d.ca212fa3.js rename to assets/js/3e02b86d.8cfae61a.js index 6b78b511..7214c20d 100644 --- a/assets/js/3e02b86d.ca212fa3.js +++ b/assets/js/3e02b86d.8cfae61a.js @@ -1 +1 @@ -"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[421],{8857:(e,n,t)=>{t.r(n),t.d(n,{assets:()=>l,contentTitle:()=>r,default:()=>d,frontMatter:()=>s,metadata:()=>i,toc:()=>c});var o=t(4848),a=t(8453);const s={},r="Run TaskWeaver with Locally Deployed Not-that-Large Language Models",i={permalink:"/TaskWeaver/blog/local_llm",editUrl:"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/local_llm.md",source:"@site/blog/local_llm.md",title:"Run TaskWeaver with Locally Deployed Not-that-Large Language Models",description:"The feature introduced in this blog post can cause incompatibility issue with the previous version of TaskWeaver",date:"2024-09-11T03:22:47.000Z",tags:[],readingTime:5.55,hasTruncateMarker:!0,authors:[],frontMatter:{},unlisted:!1,prevItem:{title:"How to evaluate a LLM agent?",permalink:"/TaskWeaver/blog/evaluation"},nextItem:{title:"Plugins In-Depth",permalink:"/TaskWeaver/blog/plugin"}},l={authorsImageUrls:[]},c=[{value:"Motivation",id:"motivation",level:2},{value:"Constrained Generation",id:"constrained-generation",level:2},{value:"How we implemented the constrained generation in TaskWeaver",id:"how-we-implemented-the-constrained-generation-in-taskweaver",level:2},{value:"Conclusion",id:"conclusion",level:2}];function h(e){const n={a:"a",admonition:"admonition",code:"code",h2:"h2",p:"p",pre:"pre",...(0,a.R)(),...e.components};return(0,o.jsxs)(o.Fragment,{children:[(0,o.jsx)(n.admonition,{type:"info",children:(0,o.jsxs)(n.p,{children:["The feature introduced in this blog post can cause incompatibility issue with the previous version of TaskWeaver\nif you have customized the examples for the planner and code interpreter.\nThe issue is easy to fix by changing the examples to the new schema.\nPlease refer to the ",(0,o.jsx)(n.a,{href:"/blog/local_llm#how-we-implemented-the-constrained-generation-in-taskweaver",children:"How we implemented the constrained generation in TaskWeaver"})," section for more details."]})}),"\n",(0,o.jsx)(n.h2,{id:"motivation",children:"Motivation"}),"\n",(0,o.jsxs)(n.p,{children:["We've seen many raised issues complaining that it is difficult to run TaskWeaver\nwith locally deployed non-that-large language models (LLMs), such as 7b or 13b.\nWhen we examine the issues, we find that the main problem is that the models failed\nto generate responses following our formatting instructions in the prompt. For instance,\nwe see that the planner's response does not contain a ",(0,o.jsx)(n.code,{children:"send_to"})," field, which is required\nto determine the recipient of the message."]}),"\n",(0,o.jsx)(n.p,{children:"In the past, we have tried to address this issue by adding more examples in the prompt,\nwhich however did not work well, especially for these relatively small models. Another idea\nwas to ask the model to re-generate the response if it does not follow the format.\nWe include the format error in the prompt to help the model understand the error and\ncorrect it. However, this approach also did not work well."}),"\n",(0,o.jsx)(n.h2,{id:"constrained-generation",children:"Constrained Generation"}),"\n",(0,o.jsxs)(n.p,{children:['Recently, we discovered a new approach called "Constrained Generation" that can enforce\nthe model to generate responses following the format. Popular frameworks include ',(0,o.jsx)(n.a,{href:"https://github.com/outlines-dev/outlines",children:"Outlines"}),",\n",(0,o.jsx)(n.a,{href:"https://github.com/guidance-ai/guidance",children:"Guidance"}),", ",(0,o.jsx)(n.a,{href:"https://github.com/noamgat/lm-format-enforcer/tree/main",children:"lm-format-enforcer"}),", etc.\nAll these frameworks support generating responses following a specific format, e.g., a JSON schema.\nThis makes it possible to control the output format by providing it a schema."]}),"\n",(0,o.jsxs)(n.p,{children:["In TaskWeaver, a relatively easy way to integrate this feature is to use a local deployment that supports\nboth constrained generation and OpenAI compatible API, for instance, the ",(0,o.jsx)(n.a,{href:"https://docs.vllm.ai/en/stable/serving/openai_compatible_server.html",children:"vllm"}),".\nThere are other frameworks that support constrained generation, such as llama.cpp.\nBut currently, we found that this feature is still not mature enough, so we start with vllm for experimentation."]}),"\n",(0,o.jsxs)(n.p,{children:["To run vllm, you can follow the instructions in the ",(0,o.jsx)(n.a,{href:"https://docs.vllm.ai/en/stable/serving/openai_compatible_server.html",children:"vllm documentation"}),".\nA simple example is shown below:"]}),"\n",(0,o.jsx)(n.pre,{children:(0,o.jsx)(n.code,{className:"language-shell",children:"python -m vllm.entrypoints.openai.api_server --model meta-llama/Meta-Llama-3-8B-Instruct --guided-decoding-backend lm-format-enforcer\n"})}),"\n",(0,o.jsxs)(n.p,{children:["where ",(0,o.jsx)(n.code,{children:"--guided-decoding-backend lm-format-enforcer"})," is used to enable the constrained generation feature and\nspecify the backend. Currently, vllm only supports ",(0,o.jsx)(n.code,{children:"lm-format-enforcer"})," and ",(0,o.jsx)(n.code,{children:"outlines"}),"."]}),"\n",(0,o.jsx)(n.p,{children:"Here is a sample code to test the vllm server:"}),"\n",(0,o.jsx)(n.pre,{children:(0,o.jsx)(n.code,{className:"language-python",children:'from openai import OpenAI\n\njson_schema = {\n "type": "object",\n "properties": {\n "country_name": {\n "type": "string"\n }\n },\n "required": ["country_name"]\n}\n\nopenai_api_key = "EMPTY"\nopenai_api_base = "http://localhost:8000/v1"\nclient = OpenAI(\n api_key=openai_api_key,\n base_url=openai_api_base,\n)\ncompletion = client.chat.completions.create(\n model="meta-llama/Meta-Llama-3-8B-Instruct",\n messages = [\n {"role": "system", "content": "You are a helpful assistant."},\n {"role": "user", "content": "Which country is San Francisco in?"}\n ],\n extra_body={\n "guided_json": json_schema,\n "guided_decoding_backend": "lm-format-enforcer"\n } \n)\nprint("Completion result:", completion)\n'})}),"\n",(0,o.jsxs)(n.p,{children:["If you run the above code, you will get the response following the format specified in the ",(0,o.jsx)(n.code,{children:"json_schema"}),"."]}),"\n",(0,o.jsx)(n.p,{children:"After you have successfully deployed vllm, you can set the following configurations in TaskWeaver:"}),"\n",(0,o.jsx)(n.pre,{children:(0,o.jsx)(n.code,{className:"language-json",children:'{\n "llm.model": "meta-llama/Meta-Llama-3-8B-Instruct",\n "llm.api_base": "http://localhost:8000/v1",\n "llm.api_key": "null",\n "llm.api_type": "openai",\n "llm.openai.require_alternative_roles": false,\n "llm.openai.support_system_role": true\n}\n'})}),"\n",(0,o.jsxs)(n.p,{children:["The ",(0,o.jsx)(n.code,{children:"llm.openai.require_alternative_roles"})," and ",(0,o.jsx)(n.code,{children:"llm.openai.support_system_role"})," configurations are\ndiscussed in the ",(0,o.jsx)(n.a,{href:"/docs/configurations/configurations_in_detail",children:"OpenAI Configuration"})," page.\nWith these configurations, TaskWeaver will send the messages to the vllm server and get the responses."]}),"\n",(0,o.jsx)(n.h2,{id:"how-we-implemented-the-constrained-generation-in-taskweaver",children:"How we implemented the constrained generation in TaskWeaver"}),"\n",(0,o.jsx)(n.p,{children:"In order to support the constrained generation in TaskWeaver, we need to provide the schema to the model.\nTherefore, we made a few changes in the code to support this feature."}),"\n",(0,o.jsxs)(n.p,{children:["First, we add a ",(0,o.jsx)(n.code,{children:"response_json_schema"})," field to the planner and code interpreter. For planner, you can find\nit in ",(0,o.jsx)(n.code,{children:"taskweaver/planner/planner_prompt.py"}),". It looks like this:"]}),"\n",(0,o.jsx)(n.pre,{children:(0,o.jsx)(n.code,{className:"language-yaml",children:'response_json_schema: |-\n {\n "type": "object",\n "properties": {\n "response": {\n "type": "object",\n "properties": {\n "init_plan": {\n "type": "string"\n },\n "plan": {\n "type": "string"\n },\n "current_plan_step": {\n "type": "string"\n },\n "send_to": {\n "type": "string"\n },\n "message": {\n "type": "string"\n }\n },\n "required": [\n "init_plan",\n "plan",\n "current_plan_step",\n "send_to",\n "message"\n ]\n }\n },\n "required": ["response"]\n }\n'})}),"\n",(0,o.jsxs)(n.p,{children:["If you are familiar with the previous output schema, you may notice that we have changed the ",(0,o.jsx)(n.code,{children:"response"})," field to an object\nfrom an array of elements. This is because that it is much easier to express the schema in JSON format if\nthe properties are in an object, not elements in an array."]}),"\n",(0,o.jsxs)(n.p,{children:["Correspondingly, we add a ",(0,o.jsx)(n.code,{children:"response_json_schema"})," field to the code interpreter. You can find it in ",(0,o.jsx)(n.code,{children:"taskweaver/code_interpreter/code_interpreter/code_generator_prompt.py"}),",\nwhich looks like this:"]}),"\n",(0,o.jsx)(n.pre,{children:(0,o.jsx)(n.code,{className:"language-yaml",children:'response_json_schema: |-\n {\n "type": "object",\n "properties": {\n "response": {\n "type": "object",\n "properties": {\n "thought": {\n "type": "string"\n },\n "reply_type": {\n "type": "string",\n "enum": ["python", "text"]\n },\n "reply_content": {\n "type": "string"\n } \n },\n "required": ["thought", "reply_type", "reply_content"]\n }\n },\n "required": ["response"]\n } \n'})}),"\n",(0,o.jsxs)(n.p,{children:["We also change the ",(0,o.jsx)(n.code,{children:"response"})," field to an object from an array of elements in the code interpreter.\nA benefit of this change is that we can now easily restrict the ",(0,o.jsx)(n.code,{children:"reply_type"})," field to only two values: ",(0,o.jsx)(n.code,{children:"python"})," and ",(0,o.jsx)(n.code,{children:"text"}),",\nwhich is not possible before."]}),"\n",(0,o.jsxs)(n.p,{children:["One consequence of this change is that we need to modify the examples for the code interpreter in order\nto support the new schema. The old examples contain attachments that have the types of\n",(0,o.jsx)(n.code,{children:"python"}),", ",(0,o.jsx)(n.code,{children:"text"}),", and ",(0,o.jsx)(n.code,{children:"sample"}),", which are deprecated. We now need to change them to the new schema.\nSpecifically, we need to change the ",(0,o.jsx)(n.code,{children:"type"})," field to ",(0,o.jsx)(n.code,{children:"reply_type"})," and the ",(0,o.jsx)(n.code,{children:"content"})," field to ",(0,o.jsx)(n.code,{children:"reply_content"}),".\nFor example, the old example:"]}),"\n",(0,o.jsx)(n.pre,{children:(0,o.jsx)(n.code,{className:"language-yaml",children:'- type: python\n content: |-\n file_path = "/abc/def.txt" \n\n with open(file_path, "r") as file: \n file_contents = file.read() \n print(file_contents)\n'})}),"\n",(0,o.jsx)(n.p,{children:"should be changed to:"}),"\n",(0,o.jsx)(n.pre,{children:(0,o.jsx)(n.code,{className:"language-yaml",children:"- type: reply_type\n content: python # or 'text' if the old type is 'text' or 'sample'\n- type: reply_content\n content: |-\n file_path = \"/abc/def.txt\" \n\n with open(file_path, \"r\") as file: \n file_contents = file.read() \n print(file_contents)\n"})}),"\n",(0,o.jsxs)(n.p,{children:["There could be multiple ",(0,o.jsx)(n.code,{children:"thought"})," attachments in the code interpreter examples.\nBut in the new schema, there is only one ",(0,o.jsx)(n.code,{children:"thought"})," field. So we have added code to do the conversion and no\nmanual work is needed to modify the examples.\nIf you have examples, after these changes, we can now support the constrained generation in TaskWeaver."]}),"\n",(0,o.jsxs)(n.p,{children:["Second, we submit the JSON schema to the model when we need to call the endpoint,\nwhich you can find in ",(0,o.jsx)(n.code,{children:"planner.py"})," and ",(0,o.jsx)(n.code,{children:"code_generator.py"}),", respectively."]}),"\n",(0,o.jsx)(n.h2,{id:"conclusion",children:"Conclusion"}),"\n",(0,o.jsx)(n.p,{children:'In this blog post, we have introduced a new feature called "Constrained Generation" that can enforce the model to generate responses following the format.\nWe have also shown how to run TaskWeaver with locally deployed non-that-large language models (LLMs) that support constrained generation.\nWe have also explained how we implemented the constrained generation in TaskWeaver. We hope this feature can help you run TaskWeaver with LLMs more easily.\nIf you have any questions or suggestions, please feel free to contact us.'})]})}function d(e={}){const{wrapper:n}={...(0,a.R)(),...e.components};return n?(0,o.jsx)(n,{...e,children:(0,o.jsx)(h,{...e})}):h(e)}},8453:(e,n,t)=>{t.d(n,{R:()=>r,x:()=>i});var o=t(6540);const a={},s=o.createContext(a);function r(e){const n=o.useContext(s);return o.useMemo((function(){return"function"==typeof e?e(n):{...n,...e}}),[n,e])}function i(e){let n;return n=e.disableParentContext?"function"==typeof e.components?e.components(a):e.components||a:r(e.components),o.createElement(s.Provider,{value:n},e.children)}}}]); \ No newline at end of file +"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[421],{8857:(e,n,t)=>{t.r(n),t.d(n,{assets:()=>l,contentTitle:()=>r,default:()=>d,frontMatter:()=>s,metadata:()=>i,toc:()=>c});var o=t(4848),a=t(8453);const s={},r="Run TaskWeaver with Locally Deployed Not-that-Large Language Models",i={permalink:"/TaskWeaver/blog/local_llm",editUrl:"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/local_llm.md",source:"@site/blog/local_llm.md",title:"Run TaskWeaver with Locally Deployed Not-that-Large Language Models",description:"The feature introduced in this blog post can cause incompatibility issue with the previous version of TaskWeaver",date:"2024-09-14T08:42:48.000Z",tags:[],readingTime:5.55,hasTruncateMarker:!0,authors:[],frontMatter:{},unlisted:!1,prevItem:{title:"How to evaluate a LLM agent?",permalink:"/TaskWeaver/blog/evaluation"},nextItem:{title:"Plugins In-Depth",permalink:"/TaskWeaver/blog/plugin"}},l={authorsImageUrls:[]},c=[{value:"Motivation",id:"motivation",level:2},{value:"Constrained Generation",id:"constrained-generation",level:2},{value:"How we implemented the constrained generation in TaskWeaver",id:"how-we-implemented-the-constrained-generation-in-taskweaver",level:2},{value:"Conclusion",id:"conclusion",level:2}];function h(e){const n={a:"a",admonition:"admonition",code:"code",h2:"h2",p:"p",pre:"pre",...(0,a.R)(),...e.components};return(0,o.jsxs)(o.Fragment,{children:[(0,o.jsx)(n.admonition,{type:"info",children:(0,o.jsxs)(n.p,{children:["The feature introduced in this blog post can cause incompatibility issue with the previous version of TaskWeaver\nif you have customized the examples for the planner and code interpreter.\nThe issue is easy to fix by changing the examples to the new schema.\nPlease refer to the ",(0,o.jsx)(n.a,{href:"/blog/local_llm#how-we-implemented-the-constrained-generation-in-taskweaver",children:"How we implemented the constrained generation in TaskWeaver"})," section for more details."]})}),"\n",(0,o.jsx)(n.h2,{id:"motivation",children:"Motivation"}),"\n",(0,o.jsxs)(n.p,{children:["We've seen many raised issues complaining that it is difficult to run TaskWeaver\nwith locally deployed non-that-large language models (LLMs), such as 7b or 13b.\nWhen we examine the issues, we find that the main problem is that the models failed\nto generate responses following our formatting instructions in the prompt. For instance,\nwe see that the planner's response does not contain a ",(0,o.jsx)(n.code,{children:"send_to"})," field, which is required\nto determine the recipient of the message."]}),"\n",(0,o.jsx)(n.p,{children:"In the past, we have tried to address this issue by adding more examples in the prompt,\nwhich however did not work well, especially for these relatively small models. Another idea\nwas to ask the model to re-generate the response if it does not follow the format.\nWe include the format error in the prompt to help the model understand the error and\ncorrect it. However, this approach also did not work well."}),"\n",(0,o.jsx)(n.h2,{id:"constrained-generation",children:"Constrained Generation"}),"\n",(0,o.jsxs)(n.p,{children:['Recently, we discovered a new approach called "Constrained Generation" that can enforce\nthe model to generate responses following the format. Popular frameworks include ',(0,o.jsx)(n.a,{href:"https://github.com/outlines-dev/outlines",children:"Outlines"}),",\n",(0,o.jsx)(n.a,{href:"https://github.com/guidance-ai/guidance",children:"Guidance"}),", ",(0,o.jsx)(n.a,{href:"https://github.com/noamgat/lm-format-enforcer/tree/main",children:"lm-format-enforcer"}),", etc.\nAll these frameworks support generating responses following a specific format, e.g., a JSON schema.\nThis makes it possible to control the output format by providing it a schema."]}),"\n",(0,o.jsxs)(n.p,{children:["In TaskWeaver, a relatively easy way to integrate this feature is to use a local deployment that supports\nboth constrained generation and OpenAI compatible API, for instance, the ",(0,o.jsx)(n.a,{href:"https://docs.vllm.ai/en/stable/serving/openai_compatible_server.html",children:"vllm"}),".\nThere are other frameworks that support constrained generation, such as llama.cpp.\nBut currently, we found that this feature is still not mature enough, so we start with vllm for experimentation."]}),"\n",(0,o.jsxs)(n.p,{children:["To run vllm, you can follow the instructions in the ",(0,o.jsx)(n.a,{href:"https://docs.vllm.ai/en/stable/serving/openai_compatible_server.html",children:"vllm documentation"}),".\nA simple example is shown below:"]}),"\n",(0,o.jsx)(n.pre,{children:(0,o.jsx)(n.code,{className:"language-shell",children:"python -m vllm.entrypoints.openai.api_server --model meta-llama/Meta-Llama-3-8B-Instruct --guided-decoding-backend lm-format-enforcer\n"})}),"\n",(0,o.jsxs)(n.p,{children:["where ",(0,o.jsx)(n.code,{children:"--guided-decoding-backend lm-format-enforcer"})," is used to enable the constrained generation feature and\nspecify the backend. Currently, vllm only supports ",(0,o.jsx)(n.code,{children:"lm-format-enforcer"})," and ",(0,o.jsx)(n.code,{children:"outlines"}),"."]}),"\n",(0,o.jsx)(n.p,{children:"Here is a sample code to test the vllm server:"}),"\n",(0,o.jsx)(n.pre,{children:(0,o.jsx)(n.code,{className:"language-python",children:'from openai import OpenAI\n\njson_schema = {\n "type": "object",\n "properties": {\n "country_name": {\n "type": "string"\n }\n },\n "required": ["country_name"]\n}\n\nopenai_api_key = "EMPTY"\nopenai_api_base = "http://localhost:8000/v1"\nclient = OpenAI(\n api_key=openai_api_key,\n base_url=openai_api_base,\n)\ncompletion = client.chat.completions.create(\n model="meta-llama/Meta-Llama-3-8B-Instruct",\n messages = [\n {"role": "system", "content": "You are a helpful assistant."},\n {"role": "user", "content": "Which country is San Francisco in?"}\n ],\n extra_body={\n "guided_json": json_schema,\n "guided_decoding_backend": "lm-format-enforcer"\n } \n)\nprint("Completion result:", completion)\n'})}),"\n",(0,o.jsxs)(n.p,{children:["If you run the above code, you will get the response following the format specified in the ",(0,o.jsx)(n.code,{children:"json_schema"}),"."]}),"\n",(0,o.jsx)(n.p,{children:"After you have successfully deployed vllm, you can set the following configurations in TaskWeaver:"}),"\n",(0,o.jsx)(n.pre,{children:(0,o.jsx)(n.code,{className:"language-json",children:'{\n "llm.model": "meta-llama/Meta-Llama-3-8B-Instruct",\n "llm.api_base": "http://localhost:8000/v1",\n "llm.api_key": "null",\n "llm.api_type": "openai",\n "llm.openai.require_alternative_roles": false,\n "llm.openai.support_system_role": true\n}\n'})}),"\n",(0,o.jsxs)(n.p,{children:["The ",(0,o.jsx)(n.code,{children:"llm.openai.require_alternative_roles"})," and ",(0,o.jsx)(n.code,{children:"llm.openai.support_system_role"})," configurations are\ndiscussed in the ",(0,o.jsx)(n.a,{href:"/docs/configurations/configurations_in_detail",children:"OpenAI Configuration"})," page.\nWith these configurations, TaskWeaver will send the messages to the vllm server and get the responses."]}),"\n",(0,o.jsx)(n.h2,{id:"how-we-implemented-the-constrained-generation-in-taskweaver",children:"How we implemented the constrained generation in TaskWeaver"}),"\n",(0,o.jsx)(n.p,{children:"In order to support the constrained generation in TaskWeaver, we need to provide the schema to the model.\nTherefore, we made a few changes in the code to support this feature."}),"\n",(0,o.jsxs)(n.p,{children:["First, we add a ",(0,o.jsx)(n.code,{children:"response_json_schema"})," field to the planner and code interpreter. For planner, you can find\nit in ",(0,o.jsx)(n.code,{children:"taskweaver/planner/planner_prompt.py"}),". It looks like this:"]}),"\n",(0,o.jsx)(n.pre,{children:(0,o.jsx)(n.code,{className:"language-yaml",children:'response_json_schema: |-\n {\n "type": "object",\n "properties": {\n "response": {\n "type": "object",\n "properties": {\n "init_plan": {\n "type": "string"\n },\n "plan": {\n "type": "string"\n },\n "current_plan_step": {\n "type": "string"\n },\n "send_to": {\n "type": "string"\n },\n "message": {\n "type": "string"\n }\n },\n "required": [\n "init_plan",\n "plan",\n "current_plan_step",\n "send_to",\n "message"\n ]\n }\n },\n "required": ["response"]\n }\n'})}),"\n",(0,o.jsxs)(n.p,{children:["If you are familiar with the previous output schema, you may notice that we have changed the ",(0,o.jsx)(n.code,{children:"response"})," field to an object\nfrom an array of elements. This is because that it is much easier to express the schema in JSON format if\nthe properties are in an object, not elements in an array."]}),"\n",(0,o.jsxs)(n.p,{children:["Correspondingly, we add a ",(0,o.jsx)(n.code,{children:"response_json_schema"})," field to the code interpreter. You can find it in ",(0,o.jsx)(n.code,{children:"taskweaver/code_interpreter/code_interpreter/code_generator_prompt.py"}),",\nwhich looks like this:"]}),"\n",(0,o.jsx)(n.pre,{children:(0,o.jsx)(n.code,{className:"language-yaml",children:'response_json_schema: |-\n {\n "type": "object",\n "properties": {\n "response": {\n "type": "object",\n "properties": {\n "thought": {\n "type": "string"\n },\n "reply_type": {\n "type": "string",\n "enum": ["python", "text"]\n },\n "reply_content": {\n "type": "string"\n } \n },\n "required": ["thought", "reply_type", "reply_content"]\n }\n },\n "required": ["response"]\n } \n'})}),"\n",(0,o.jsxs)(n.p,{children:["We also change the ",(0,o.jsx)(n.code,{children:"response"})," field to an object from an array of elements in the code interpreter.\nA benefit of this change is that we can now easily restrict the ",(0,o.jsx)(n.code,{children:"reply_type"})," field to only two values: ",(0,o.jsx)(n.code,{children:"python"})," and ",(0,o.jsx)(n.code,{children:"text"}),",\nwhich is not possible before."]}),"\n",(0,o.jsxs)(n.p,{children:["One consequence of this change is that we need to modify the examples for the code interpreter in order\nto support the new schema. The old examples contain attachments that have the types of\n",(0,o.jsx)(n.code,{children:"python"}),", ",(0,o.jsx)(n.code,{children:"text"}),", and ",(0,o.jsx)(n.code,{children:"sample"}),", which are deprecated. We now need to change them to the new schema.\nSpecifically, we need to change the ",(0,o.jsx)(n.code,{children:"type"})," field to ",(0,o.jsx)(n.code,{children:"reply_type"})," and the ",(0,o.jsx)(n.code,{children:"content"})," field to ",(0,o.jsx)(n.code,{children:"reply_content"}),".\nFor example, the old example:"]}),"\n",(0,o.jsx)(n.pre,{children:(0,o.jsx)(n.code,{className:"language-yaml",children:'- type: python\n content: |-\n file_path = "/abc/def.txt" \n\n with open(file_path, "r") as file: \n file_contents = file.read() \n print(file_contents)\n'})}),"\n",(0,o.jsx)(n.p,{children:"should be changed to:"}),"\n",(0,o.jsx)(n.pre,{children:(0,o.jsx)(n.code,{className:"language-yaml",children:"- type: reply_type\n content: python # or 'text' if the old type is 'text' or 'sample'\n- type: reply_content\n content: |-\n file_path = \"/abc/def.txt\" \n\n with open(file_path, \"r\") as file: \n file_contents = file.read() \n print(file_contents)\n"})}),"\n",(0,o.jsxs)(n.p,{children:["There could be multiple ",(0,o.jsx)(n.code,{children:"thought"})," attachments in the code interpreter examples.\nBut in the new schema, there is only one ",(0,o.jsx)(n.code,{children:"thought"})," field. So we have added code to do the conversion and no\nmanual work is needed to modify the examples.\nIf you have examples, after these changes, we can now support the constrained generation in TaskWeaver."]}),"\n",(0,o.jsxs)(n.p,{children:["Second, we submit the JSON schema to the model when we need to call the endpoint,\nwhich you can find in ",(0,o.jsx)(n.code,{children:"planner.py"})," and ",(0,o.jsx)(n.code,{children:"code_generator.py"}),", respectively."]}),"\n",(0,o.jsx)(n.h2,{id:"conclusion",children:"Conclusion"}),"\n",(0,o.jsx)(n.p,{children:'In this blog post, we have introduced a new feature called "Constrained Generation" that can enforce the model to generate responses following the format.\nWe have also shown how to run TaskWeaver with locally deployed non-that-large language models (LLMs) that support constrained generation.\nWe have also explained how we implemented the constrained generation in TaskWeaver. We hope this feature can help you run TaskWeaver with LLMs more easily.\nIf you have any questions or suggestions, please feel free to contact us.'})]})}function d(e={}){const{wrapper:n}={...(0,a.R)(),...e.components};return n?(0,o.jsx)(n,{...e,children:(0,o.jsx)(h,{...e})}):h(e)}},8453:(e,n,t)=>{t.d(n,{R:()=>r,x:()=>i});var o=t(6540);const a={},s=o.createContext(a);function r(e){const n=o.useContext(s);return o.useMemo((function(){return"function"==typeof e?e(n):{...n,...e}}),[n,e])}function i(e){let n;return n=e.disableParentContext?"function"==typeof e.components?e.components(a):e.components||a:r(e.components),o.createElement(s.Provider,{value:n},e.children)}}}]); \ No newline at end of file diff --git a/assets/js/814f3328.e0b772de.js b/assets/js/814f3328.69d177d4.js similarity index 64% rename from assets/js/814f3328.e0b772de.js rename to assets/js/814f3328.69d177d4.js index c637f8cf..cb987d81 100644 --- a/assets/js/814f3328.e0b772de.js +++ b/assets/js/814f3328.69d177d4.js @@ -1 +1 @@ -"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[7472],{5513:e=>{e.exports=JSON.parse('{"title":"Recent posts","items":[{"title":"How to evaluate a LLM agent?","permalink":"/TaskWeaver/blog/evaluation","unlisted":false,"date":"2024-09-11T03:22:47.000Z"},{"title":"Run TaskWeaver with Locally Deployed Not-that-Large Language Models","permalink":"/TaskWeaver/blog/local_llm","unlisted":false,"date":"2024-09-11T03:22:47.000Z"},{"title":"Plugins In-Depth","permalink":"/TaskWeaver/blog/plugin","unlisted":false,"date":"2024-09-11T03:22:47.000Z"},{"title":"Roles in TaskWeaver","permalink":"/TaskWeaver/blog/role","unlisted":false,"date":"2024-09-11T03:22:47.000Z"}]}')}}]); \ No newline at end of file +"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[7472],{5513:e=>{e.exports=JSON.parse('{"title":"Recent posts","items":[{"title":"How to evaluate a LLM agent?","permalink":"/TaskWeaver/blog/evaluation","unlisted":false,"date":"2024-09-14T08:42:48.000Z"},{"title":"Run TaskWeaver with Locally Deployed Not-that-Large Language Models","permalink":"/TaskWeaver/blog/local_llm","unlisted":false,"date":"2024-09-14T08:42:48.000Z"},{"title":"Plugins In-Depth","permalink":"/TaskWeaver/blog/plugin","unlisted":false,"date":"2024-09-14T08:42:48.000Z"},{"title":"Roles in TaskWeaver","permalink":"/TaskWeaver/blog/role","unlisted":false,"date":"2024-09-14T08:42:48.000Z"}]}')}}]); \ No newline at end of file diff --git a/assets/js/9fc0d84d.8d35b070.js b/assets/js/9fc0d84d.e4a60a3c.js similarity index 98% rename from assets/js/9fc0d84d.8d35b070.js rename to assets/js/9fc0d84d.e4a60a3c.js index fcfdbfa7..3f18c9c1 100644 --- a/assets/js/9fc0d84d.8d35b070.js +++ b/assets/js/9fc0d84d.e4a60a3c.js @@ -1 +1 @@ -"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[6011],{9291:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>l,contentTitle:()=>s,default:()=>d,frontMatter:()=>i,metadata:()=>r,toc:()=>h});var o=n(4848),a=n(8453);const i={},s="Run TaskWeaver with Locally Deployed Not-that-Large Language Models",r={permalink:"/TaskWeaver/blog/local_llm",editUrl:"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/local_llm.md",source:"@site/blog/local_llm.md",title:"Run TaskWeaver with Locally Deployed Not-that-Large Language Models",description:"The feature introduced in this blog post can cause incompatibility issue with the previous version of TaskWeaver",date:"2024-09-11T03:22:47.000Z",tags:[],readingTime:5.55,hasTruncateMarker:!0,authors:[],frontMatter:{},unlisted:!1,prevItem:{title:"How to evaluate a LLM agent?",permalink:"/TaskWeaver/blog/evaluation"},nextItem:{title:"Plugins In-Depth",permalink:"/TaskWeaver/blog/plugin"}},l={authorsImageUrls:[]},h=[{value:"Motivation",id:"motivation",level:2}];function c(e){const t={a:"a",admonition:"admonition",code:"code",h2:"h2",p:"p",...(0,a.R)(),...e.components};return(0,o.jsxs)(o.Fragment,{children:[(0,o.jsx)(t.admonition,{type:"info",children:(0,o.jsxs)(t.p,{children:["The feature introduced in this blog post can cause incompatibility issue with the previous version of TaskWeaver\nif you have customized the examples for the planner and code interpreter.\nThe issue is easy to fix by changing the examples to the new schema.\nPlease refer to the ",(0,o.jsx)(t.a,{href:"/blog/local_llm#how-we-implemented-the-constrained-generation-in-taskweaver",children:"How we implemented the constrained generation in TaskWeaver"})," section for more details."]})}),"\n",(0,o.jsx)(t.h2,{id:"motivation",children:"Motivation"}),"\n",(0,o.jsxs)(t.p,{children:["We've seen many raised issues complaining that it is difficult to run TaskWeaver\nwith locally deployed non-that-large language models (LLMs), such as 7b or 13b.\nWhen we examine the issues, we find that the main problem is that the models failed\nto generate responses following our formatting instructions in the prompt. For instance,\nwe see that the planner's response does not contain a ",(0,o.jsx)(t.code,{children:"send_to"})," field, which is required\nto determine the recipient of the message."]}),"\n",(0,o.jsx)(t.p,{children:"In the past, we have tried to address this issue by adding more examples in the prompt,\nwhich however did not work well, especially for these relatively small models. Another idea\nwas to ask the model to re-generate the response if it does not follow the format.\nWe include the format error in the prompt to help the model understand the error and\ncorrect it. However, this approach also did not work well."})]})}function d(e={}){const{wrapper:t}={...(0,a.R)(),...e.components};return t?(0,o.jsx)(t,{...e,children:(0,o.jsx)(c,{...e})}):c(e)}},8453:(e,t,n)=>{n.d(t,{R:()=>s,x:()=>r});var o=n(6540);const a={},i=o.createContext(a);function s(e){const t=o.useContext(i);return o.useMemo((function(){return"function"==typeof e?e(t):{...t,...e}}),[t,e])}function r(e){let t;return t=e.disableParentContext?"function"==typeof e.components?e.components(a):e.components||a:s(e.components),o.createElement(i.Provider,{value:t},e.children)}}}]); \ No newline at end of file +"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[6011],{9291:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>l,contentTitle:()=>s,default:()=>d,frontMatter:()=>i,metadata:()=>r,toc:()=>h});var o=n(4848),a=n(8453);const i={},s="Run TaskWeaver with Locally Deployed Not-that-Large Language Models",r={permalink:"/TaskWeaver/blog/local_llm",editUrl:"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/local_llm.md",source:"@site/blog/local_llm.md",title:"Run TaskWeaver with Locally Deployed Not-that-Large Language Models",description:"The feature introduced in this blog post can cause incompatibility issue with the previous version of TaskWeaver",date:"2024-09-14T08:42:48.000Z",tags:[],readingTime:5.55,hasTruncateMarker:!0,authors:[],frontMatter:{},unlisted:!1,prevItem:{title:"How to evaluate a LLM agent?",permalink:"/TaskWeaver/blog/evaluation"},nextItem:{title:"Plugins In-Depth",permalink:"/TaskWeaver/blog/plugin"}},l={authorsImageUrls:[]},h=[{value:"Motivation",id:"motivation",level:2}];function c(e){const t={a:"a",admonition:"admonition",code:"code",h2:"h2",p:"p",...(0,a.R)(),...e.components};return(0,o.jsxs)(o.Fragment,{children:[(0,o.jsx)(t.admonition,{type:"info",children:(0,o.jsxs)(t.p,{children:["The feature introduced in this blog post can cause incompatibility issue with the previous version of TaskWeaver\nif you have customized the examples for the planner and code interpreter.\nThe issue is easy to fix by changing the examples to the new schema.\nPlease refer to the ",(0,o.jsx)(t.a,{href:"/blog/local_llm#how-we-implemented-the-constrained-generation-in-taskweaver",children:"How we implemented the constrained generation in TaskWeaver"})," section for more details."]})}),"\n",(0,o.jsx)(t.h2,{id:"motivation",children:"Motivation"}),"\n",(0,o.jsxs)(t.p,{children:["We've seen many raised issues complaining that it is difficult to run TaskWeaver\nwith locally deployed non-that-large language models (LLMs), such as 7b or 13b.\nWhen we examine the issues, we find that the main problem is that the models failed\nto generate responses following our formatting instructions in the prompt. For instance,\nwe see that the planner's response does not contain a ",(0,o.jsx)(t.code,{children:"send_to"})," field, which is required\nto determine the recipient of the message."]}),"\n",(0,o.jsx)(t.p,{children:"In the past, we have tried to address this issue by adding more examples in the prompt,\nwhich however did not work well, especially for these relatively small models. Another idea\nwas to ask the model to re-generate the response if it does not follow the format.\nWe include the format error in the prompt to help the model understand the error and\ncorrect it. However, this approach also did not work well."})]})}function d(e={}){const{wrapper:t}={...(0,a.R)(),...e.components};return t?(0,o.jsx)(t,{...e,children:(0,o.jsx)(c,{...e})}):c(e)}},8453:(e,t,n)=>{n.d(t,{R:()=>s,x:()=>r});var o=n(6540);const a={},i=o.createContext(a);function s(e){const t=o.useContext(i);return o.useMemo((function(){return"function"==typeof e?e(t):{...t,...e}}),[t,e])}function r(e){let t;return t=e.disableParentContext?"function"==typeof e.components?e.components(a):e.components||a:s(e.components),o.createElement(i.Provider,{value:t},e.children)}}}]); \ No newline at end of file diff --git a/assets/js/a27d32e8.e190951e.js b/assets/js/a27d32e8.1da90e14.js similarity index 99% rename from assets/js/a27d32e8.e190951e.js rename to assets/js/a27d32e8.1da90e14.js index 8ddfb305..481bade0 100644 --- a/assets/js/a27d32e8.e190951e.js +++ b/assets/js/a27d32e8.1da90e14.js @@ -1 +1 @@ -"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[9395],{2340:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>h,contentTitle:()=>s,default:()=>c,frontMatter:()=>o,metadata:()=>i,toc:()=>l});var a=n(4848),r=n(8453);const o={},s="Roles in TaskWeaver",i={permalink:"/TaskWeaver/blog/role",editUrl:"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/role.md",source:"@site/blog/role.md",title:"Roles in TaskWeaver",description:'We frame TaskWeaver as a code-first agent framework. The term "code-first" means that the agent is designed to',date:"2024-09-11T03:22:47.000Z",tags:[],readingTime:6.15,hasTruncateMarker:!0,authors:[],frontMatter:{},unlisted:!1,prevItem:{title:"Plugins In-Depth",permalink:"/TaskWeaver/blog/plugin"}},h={authorsImageUrls:[]},l=[];function d(e){const t={a:"a",code:"code",em:"em",mermaid:"mermaid",p:"p",pre:"pre",strong:"strong",...(0,r.R)(),...e.components};return(0,a.jsxs)(a.Fragment,{children:[(0,a.jsxs)(t.p,{children:["We frame TaskWeaver as a ",(0,a.jsx)(t.strong,{children:"code-first"})," agent framework. The term \"code-first\" means that the agent is designed to\nconvert the user's request into one or multiple runnable code snippets and then execute them to generate the response.\nThe philosophy behind this design is to consider programming languages as the de facto language for communication in cyber-physical systems,\njust like the natural language for human communication. Therefore, TaskWeaver translates the user's request in natural language into\nprogramming languages, which can be executed by the system to perform the desired tasks."]}),"\n",(0,a.jsxs)(t.p,{children:["Under this design, when the developer needs to extend the agent's capability, they can write a new plugin.\nA plugin is a piece of code wrapped in a class that can be called as a function by the agent in the generated code snippets.\nLet's consider an example: ",(0,a.jsx)(t.em,{children:"the agent is asked to load a CSV file and perform anomaly detection on the data"}),".\nThe workflow of the agent is in the diagram below. It is very natural to represent data to be processed in variables and this task in code snippets."]}),"\n",(0,a.jsx)(t.mermaid,{value:'flowchart TD\n A[User] --"read a.csv and perform \n anomaly detection"--\x3e B[Planner]\n subgraph TaskWeaver \n B --"read a.csv and call the \n anomaly_detection plugin\n to find anomalies in the data"--\x3e C[Code Generator]\n subgraph Code Interpreter\n C --"df=pd.read_csv(\'a.csv\')\n anomaly_df=anomaly_detection(df)"--\x3e D[Code Executor]\n end\n end\n D --result--\x3e B\n B --response--\x3e A'}),"\n",(0,a.jsxs)(t.p,{children:["However, we do find challenges for other tasks that are not naturally represented in code snippets.\nLet's consider another example: ",(0,a.jsx)(t.em,{children:"the agent is asked to read a manual and follow the instructions to process the data"}),".\nWe first assume there is a plugin that can read the manual and extract the instructions, called ",(0,a.jsx)(t.code,{children:"read_manual"}),".\nThe workflow of the agent is in the diagram below.\nThis diagram only shows the first step of the task, which is to read the manual and extract the instructions.\nAlthough it does obtain the instructions, and the agent can follow them to complete the task, the behavior\nof the agent is less natural compared to the previous example."]}),"\n",(0,a.jsx)(t.mermaid,{value:'flowchart TD\n A[User] --"read the manual and follow \n the instructions to process the data"--\x3e B[Planner]\n subgraph TaskWeaver \n B --"step 1: read the manual by \n calling the read_manual \n plugin to extract the instructions"--\x3e C[Code Generator]\n subgraph Code Interpreter\n C --"instructions=read_manual()\n follow_instructions(instructions)"--\x3e D[Code Executor]\n end\n end\n D --instructions--\x3e B'}),"\n",(0,a.jsxs)(t.p,{children:["Why? First, there is no need to generate code to read the manual and extract the instructions.\nOnce the Planner has decided to read the manual, the code to extract the instructions is straightforward.\nEven though that there might be dynamic parts in the code such as some arguments in the function ",(0,a.jsx)(t.code,{children:"read_manual"}),",\nit could be handled by the Planner. Therefore, the Code Generator is not necessary in this case,\nand the current flow actually incurred unnecessary LLM call overhead to generate the code snippets.\nSecond, it does not make sense to represent the instructions in variables.\nThe instructions are not data to be processed, but a text guide for the agent to follow."]}),"\n",(0,a.jsxs)(t.p,{children:["For these reasons, we introduced the concept of ",(0,a.jsx)(t.a,{href:"/docs/concepts/role",children:"roles"})," in TaskWeaver.\nRoles are actually not new in TaskWeaver as there are already roles like ",(0,a.jsx)(t.code,{children:"Planner"})," and ",(0,a.jsx)(t.code,{children:"CodeInterpreter"}),".\nTo add a new role, the developer can follow the documentation ",(0,a.jsx)(t.a,{href:"/docs/concepts/role",children:"here"}),".\nIn general, a role is a class that inherits the ",(0,a.jsx)(t.code,{children:"Role"})," class and implements the ",(0,a.jsx)(t.code,{children:"reply"})," method.\nThe ",(0,a.jsx)(t.code,{children:"reply"})," method is the function that the agent calls to interact with the role, which has the\nfollowing signature:"]}),"\n",(0,a.jsx)(t.pre,{children:(0,a.jsx)(t.code,{className:"language-python",children:"def reply(self, memory: Memory, **kwargs) -> Post:\n # implementation\n"})}),"\n",(0,a.jsxs)(t.p,{children:["It takes the ",(0,a.jsx)(t.code,{children:"memory"})," object, which is the memory of the agent, and returns a ",(0,a.jsx)(t.a,{href:"/docs/concepts/post",children:"Post"})," object, which is the response of the role to the Planner.\nWith the ",(0,a.jsx)(t.code,{children:"memory"})," object, the role can access the history of the conversation and the context of the conversation.\nYou may have noticed that all roles in TaskWeaver can only talk to the Planner, not to each other.\nIf a role needs to talk to another role, it should go through the Planner.\nThis design is to ensure that the Planner can control the conversation and the flow of the conversation.\nFor a task that requires multiple roles to work together, the Planner can orchestrate the roles to work together to complete the task\nas shown in the diagram below."]}),"\n",(0,a.jsx)(t.mermaid,{value:'flowchart TD\n A[User] --"request"--\x3e B[Planner]\n subgraph TaskWeaver \n B --"step 1"--\x3e C[Role 1]\n C --reply--\x3e B\n B --"step 2"--\x3e D[Role 2]\n D --reply--\x3e B\n B --"step 3"--\x3e E[Role 3]\n E --reply--\x3e B\n end\n B --response--\x3e A'}),"\n",(0,a.jsxs)(t.p,{children:["The communication between the Planner and the roles is done through the ",(0,a.jsx)(t.a,{href:"/docs/concepts/post",children:"Post"})," object.\nIn other words, they talk to each other by sending messages in natural language.\nWhat if a role needs to send some data to another role? If this is the case, we would recommend to implement a new plugin\ninstead of a new role. Otherwise, you may need to store the data in an external storage like a database and let the other role to access it."]}),"\n",(0,a.jsxs)(t.p,{children:["There is a challenge in implementing multiple roles that is missing information.\nConsider the case in our previous example where the agent is asked to read a manual and follow the instructions to process the data.\nWhen the Planner obtains the instructions from a role called ",(0,a.jsx)(t.code,{children:"manual_reader"}),", it needs to pass the instructions to the CodeInterpreter role to execute the instructions.\nSometimes, the Planner may miss critical information that is needed by the CodeInterpreter role.\nEven though we can emphasize the importance of the Planner to pass all the necessary information to the roles in the prompt,\nit is still possible that the Planner misses some information."]}),"\n",(0,a.jsxs)(t.p,{children:["To address this challenge, we introduce the concept of ",(0,a.jsx)(t.code,{children:"board"})," in TaskWeaver.\nThe ",(0,a.jsx)(t.code,{children:"board"})," is a shared memory space that can be accessed by all roles, which is associated with the current ",(0,a.jsx)(t.a,{href:"/docs/concepts/round",children:"Round"}),".\nThe ",(0,a.jsx)(t.code,{children:"board"})," is a dictionary-like object that can store any information that is needed by the roles.\nEach role can decide to write or read any information from the ",(0,a.jsx)(t.code,{children:"board"}),"."]}),"\n",(0,a.jsx)(t.pre,{children:(0,a.jsx)(t.code,{className:"language-python",children:' def write_board(self, role_alias: str, bulletin: str) -> None:\n """Add a bulletin to the round."""\n self.board[role_alias] = bulletin\n\ndef read_board(self, role_alias: Optional[str] = None) -> Union[Dict[str, str], str]:\n """Read the bulletin of the round."""\n if role_alias is None:\n return self.board\n return self.board.get(role_alias, None)\n'})}),"\n",(0,a.jsxs)(t.p,{children:["One concrete example of using the ",(0,a.jsx)(t.code,{children:"board"})," is to pass the user's request to the CodeInterpreter role.\nWhen the Planner receives the user's request, it can write the request and its step-wise plan to the ",(0,a.jsx)(t.code,{children:"board"}),".\nThe CodeInterpreter role can then read the request and the plan from the ",(0,a.jsx)(t.code,{children:"board"})," to execute the plan."]}),"\n",(0,a.jsxs)(t.p,{children:["In summary, the concept of roles in TaskWeaver is to provide a way to extend the agent's capability by implementing new roles.\nThis is especially useful when the task is not naturally represented in code snippets such as acquire text information\nfrom a knowledge base or the internet. Implementing a new role is straightforward by inheriting the ",(0,a.jsx)(t.code,{children:"Role"})," class and implementing the ",(0,a.jsx)(t.code,{children:"reply"})," method.\nAll extra roles should be put in the ",(0,a.jsx)(t.code,{children:"TaskWeaver/taskweaver/ext_role"})," folder, which will be automatically loaded by TaskWeaver.\nWe have provided a few sample roles in the ",(0,a.jsx)(t.code,{children:"TaskWeaver/taskweaver/ext_role"})," folder, such as the ",(0,a.jsx)(t.code,{children:"Echo"})," role that echoes the user's message back to the user.\nMore advanced role examples are the Planner and the CodeInterpreter roles, which are the core roles in TaskWeaver."]})]})}function c(e={}){const{wrapper:t}={...(0,r.R)(),...e.components};return t?(0,a.jsx)(t,{...e,children:(0,a.jsx)(d,{...e})}):d(e)}},8453:(e,t,n)=>{n.d(t,{R:()=>s,x:()=>i});var a=n(6540);const r={},o=a.createContext(r);function s(e){const t=a.useContext(o);return a.useMemo((function(){return"function"==typeof e?e(t):{...t,...e}}),[t,e])}function i(e){let t;return t=e.disableParentContext?"function"==typeof e.components?e.components(r):e.components||r:s(e.components),a.createElement(o.Provider,{value:t},e.children)}}}]); \ No newline at end of file +"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[9395],{2340:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>h,contentTitle:()=>s,default:()=>c,frontMatter:()=>o,metadata:()=>i,toc:()=>l});var a=n(4848),r=n(8453);const o={},s="Roles in TaskWeaver",i={permalink:"/TaskWeaver/blog/role",editUrl:"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/role.md",source:"@site/blog/role.md",title:"Roles in TaskWeaver",description:'We frame TaskWeaver as a code-first agent framework. The term "code-first" means that the agent is designed to',date:"2024-09-14T08:42:48.000Z",tags:[],readingTime:6.15,hasTruncateMarker:!0,authors:[],frontMatter:{},unlisted:!1,prevItem:{title:"Plugins In-Depth",permalink:"/TaskWeaver/blog/plugin"}},h={authorsImageUrls:[]},l=[];function d(e){const t={a:"a",code:"code",em:"em",mermaid:"mermaid",p:"p",pre:"pre",strong:"strong",...(0,r.R)(),...e.components};return(0,a.jsxs)(a.Fragment,{children:[(0,a.jsxs)(t.p,{children:["We frame TaskWeaver as a ",(0,a.jsx)(t.strong,{children:"code-first"})," agent framework. The term \"code-first\" means that the agent is designed to\nconvert the user's request into one or multiple runnable code snippets and then execute them to generate the response.\nThe philosophy behind this design is to consider programming languages as the de facto language for communication in cyber-physical systems,\njust like the natural language for human communication. Therefore, TaskWeaver translates the user's request in natural language into\nprogramming languages, which can be executed by the system to perform the desired tasks."]}),"\n",(0,a.jsxs)(t.p,{children:["Under this design, when the developer needs to extend the agent's capability, they can write a new plugin.\nA plugin is a piece of code wrapped in a class that can be called as a function by the agent in the generated code snippets.\nLet's consider an example: ",(0,a.jsx)(t.em,{children:"the agent is asked to load a CSV file and perform anomaly detection on the data"}),".\nThe workflow of the agent is in the diagram below. It is very natural to represent data to be processed in variables and this task in code snippets."]}),"\n",(0,a.jsx)(t.mermaid,{value:'flowchart TD\n A[User] --"read a.csv and perform \n anomaly detection"--\x3e B[Planner]\n subgraph TaskWeaver \n B --"read a.csv and call the \n anomaly_detection plugin\n to find anomalies in the data"--\x3e C[Code Generator]\n subgraph Code Interpreter\n C --"df=pd.read_csv(\'a.csv\')\n anomaly_df=anomaly_detection(df)"--\x3e D[Code Executor]\n end\n end\n D --result--\x3e B\n B --response--\x3e A'}),"\n",(0,a.jsxs)(t.p,{children:["However, we do find challenges for other tasks that are not naturally represented in code snippets.\nLet's consider another example: ",(0,a.jsx)(t.em,{children:"the agent is asked to read a manual and follow the instructions to process the data"}),".\nWe first assume there is a plugin that can read the manual and extract the instructions, called ",(0,a.jsx)(t.code,{children:"read_manual"}),".\nThe workflow of the agent is in the diagram below.\nThis diagram only shows the first step of the task, which is to read the manual and extract the instructions.\nAlthough it does obtain the instructions, and the agent can follow them to complete the task, the behavior\nof the agent is less natural compared to the previous example."]}),"\n",(0,a.jsx)(t.mermaid,{value:'flowchart TD\n A[User] --"read the manual and follow \n the instructions to process the data"--\x3e B[Planner]\n subgraph TaskWeaver \n B --"step 1: read the manual by \n calling the read_manual \n plugin to extract the instructions"--\x3e C[Code Generator]\n subgraph Code Interpreter\n C --"instructions=read_manual()\n follow_instructions(instructions)"--\x3e D[Code Executor]\n end\n end\n D --instructions--\x3e B'}),"\n",(0,a.jsxs)(t.p,{children:["Why? First, there is no need to generate code to read the manual and extract the instructions.\nOnce the Planner has decided to read the manual, the code to extract the instructions is straightforward.\nEven though that there might be dynamic parts in the code such as some arguments in the function ",(0,a.jsx)(t.code,{children:"read_manual"}),",\nit could be handled by the Planner. Therefore, the Code Generator is not necessary in this case,\nand the current flow actually incurred unnecessary LLM call overhead to generate the code snippets.\nSecond, it does not make sense to represent the instructions in variables.\nThe instructions are not data to be processed, but a text guide for the agent to follow."]}),"\n",(0,a.jsxs)(t.p,{children:["For these reasons, we introduced the concept of ",(0,a.jsx)(t.a,{href:"/docs/concepts/role",children:"roles"})," in TaskWeaver.\nRoles are actually not new in TaskWeaver as there are already roles like ",(0,a.jsx)(t.code,{children:"Planner"})," and ",(0,a.jsx)(t.code,{children:"CodeInterpreter"}),".\nTo add a new role, the developer can follow the documentation ",(0,a.jsx)(t.a,{href:"/docs/concepts/role",children:"here"}),".\nIn general, a role is a class that inherits the ",(0,a.jsx)(t.code,{children:"Role"})," class and implements the ",(0,a.jsx)(t.code,{children:"reply"})," method.\nThe ",(0,a.jsx)(t.code,{children:"reply"})," method is the function that the agent calls to interact with the role, which has the\nfollowing signature:"]}),"\n",(0,a.jsx)(t.pre,{children:(0,a.jsx)(t.code,{className:"language-python",children:"def reply(self, memory: Memory, **kwargs) -> Post:\n # implementation\n"})}),"\n",(0,a.jsxs)(t.p,{children:["It takes the ",(0,a.jsx)(t.code,{children:"memory"})," object, which is the memory of the agent, and returns a ",(0,a.jsx)(t.a,{href:"/docs/concepts/post",children:"Post"})," object, which is the response of the role to the Planner.\nWith the ",(0,a.jsx)(t.code,{children:"memory"})," object, the role can access the history of the conversation and the context of the conversation.\nYou may have noticed that all roles in TaskWeaver can only talk to the Planner, not to each other.\nIf a role needs to talk to another role, it should go through the Planner.\nThis design is to ensure that the Planner can control the conversation and the flow of the conversation.\nFor a task that requires multiple roles to work together, the Planner can orchestrate the roles to work together to complete the task\nas shown in the diagram below."]}),"\n",(0,a.jsx)(t.mermaid,{value:'flowchart TD\n A[User] --"request"--\x3e B[Planner]\n subgraph TaskWeaver \n B --"step 1"--\x3e C[Role 1]\n C --reply--\x3e B\n B --"step 2"--\x3e D[Role 2]\n D --reply--\x3e B\n B --"step 3"--\x3e E[Role 3]\n E --reply--\x3e B\n end\n B --response--\x3e A'}),"\n",(0,a.jsxs)(t.p,{children:["The communication between the Planner and the roles is done through the ",(0,a.jsx)(t.a,{href:"/docs/concepts/post",children:"Post"})," object.\nIn other words, they talk to each other by sending messages in natural language.\nWhat if a role needs to send some data to another role? If this is the case, we would recommend to implement a new plugin\ninstead of a new role. Otherwise, you may need to store the data in an external storage like a database and let the other role to access it."]}),"\n",(0,a.jsxs)(t.p,{children:["There is a challenge in implementing multiple roles that is missing information.\nConsider the case in our previous example where the agent is asked to read a manual and follow the instructions to process the data.\nWhen the Planner obtains the instructions from a role called ",(0,a.jsx)(t.code,{children:"manual_reader"}),", it needs to pass the instructions to the CodeInterpreter role to execute the instructions.\nSometimes, the Planner may miss critical information that is needed by the CodeInterpreter role.\nEven though we can emphasize the importance of the Planner to pass all the necessary information to the roles in the prompt,\nit is still possible that the Planner misses some information."]}),"\n",(0,a.jsxs)(t.p,{children:["To address this challenge, we introduce the concept of ",(0,a.jsx)(t.code,{children:"board"})," in TaskWeaver.\nThe ",(0,a.jsx)(t.code,{children:"board"})," is a shared memory space that can be accessed by all roles, which is associated with the current ",(0,a.jsx)(t.a,{href:"/docs/concepts/round",children:"Round"}),".\nThe ",(0,a.jsx)(t.code,{children:"board"})," is a dictionary-like object that can store any information that is needed by the roles.\nEach role can decide to write or read any information from the ",(0,a.jsx)(t.code,{children:"board"}),"."]}),"\n",(0,a.jsx)(t.pre,{children:(0,a.jsx)(t.code,{className:"language-python",children:' def write_board(self, role_alias: str, bulletin: str) -> None:\n """Add a bulletin to the round."""\n self.board[role_alias] = bulletin\n\ndef read_board(self, role_alias: Optional[str] = None) -> Union[Dict[str, str], str]:\n """Read the bulletin of the round."""\n if role_alias is None:\n return self.board\n return self.board.get(role_alias, None)\n'})}),"\n",(0,a.jsxs)(t.p,{children:["One concrete example of using the ",(0,a.jsx)(t.code,{children:"board"})," is to pass the user's request to the CodeInterpreter role.\nWhen the Planner receives the user's request, it can write the request and its step-wise plan to the ",(0,a.jsx)(t.code,{children:"board"}),".\nThe CodeInterpreter role can then read the request and the plan from the ",(0,a.jsx)(t.code,{children:"board"})," to execute the plan."]}),"\n",(0,a.jsxs)(t.p,{children:["In summary, the concept of roles in TaskWeaver is to provide a way to extend the agent's capability by implementing new roles.\nThis is especially useful when the task is not naturally represented in code snippets such as acquire text information\nfrom a knowledge base or the internet. Implementing a new role is straightforward by inheriting the ",(0,a.jsx)(t.code,{children:"Role"})," class and implementing the ",(0,a.jsx)(t.code,{children:"reply"})," method.\nAll extra roles should be put in the ",(0,a.jsx)(t.code,{children:"TaskWeaver/taskweaver/ext_role"})," folder, which will be automatically loaded by TaskWeaver.\nWe have provided a few sample roles in the ",(0,a.jsx)(t.code,{children:"TaskWeaver/taskweaver/ext_role"})," folder, such as the ",(0,a.jsx)(t.code,{children:"Echo"})," role that echoes the user's message back to the user.\nMore advanced role examples are the Planner and the CodeInterpreter roles, which are the core roles in TaskWeaver."]})]})}function c(e={}){const{wrapper:t}={...(0,r.R)(),...e.components};return t?(0,a.jsx)(t,{...e,children:(0,a.jsx)(d,{...e})}):d(e)}},8453:(e,t,n)=>{n.d(t,{R:()=>s,x:()=>i});var a=n(6540);const r={},o=a.createContext(r);function s(e){const t=a.useContext(o);return a.useMemo((function(){return"function"==typeof e?e(t):{...t,...e}}),[t,e])}function i(e){let t;return t=e.disableParentContext?"function"==typeof e.components?e.components(r):e.components||r:s(e.components),a.createElement(o.Provider,{value:t},e.children)}}}]); \ No newline at end of file diff --git a/assets/js/c39bf4d4.dc875663.js b/assets/js/c39bf4d4.e52b9865.js similarity index 98% rename from assets/js/c39bf4d4.dc875663.js rename to assets/js/c39bf4d4.e52b9865.js index 8d3dfa6f..bb832232 100644 --- a/assets/js/c39bf4d4.dc875663.js +++ b/assets/js/c39bf4d4.e52b9865.js @@ -1 +1 @@ -"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[7986],{5290:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>h,contentTitle:()=>s,default:()=>c,frontMatter:()=>i,metadata:()=>r,toc:()=>l});var a=n(4848),o=n(8453);const i={},s="How to evaluate a LLM agent?",r={permalink:"/TaskWeaver/blog/evaluation",editUrl:"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/evaluation.md",source:"@site/blog/evaluation.md",title:"How to evaluate a LLM agent?",description:"The challenges",date:"2024-09-11T03:22:47.000Z",tags:[],readingTime:6.305,hasTruncateMarker:!0,authors:[],frontMatter:{},unlisted:!1,nextItem:{title:"Run TaskWeaver with Locally Deployed Not-that-Large Language Models",permalink:"/TaskWeaver/blog/local_llm"}},h={authorsImageUrls:[]},l=[{value:"The challenges",id:"the-challenges",level:2},{value:"A new evaluation method",id:"a-new-evaluation-method",level:2}];function u(e){const t={code:"code",h2:"h2",img:"img",p:"p",pre:"pre",...(0,o.R)(),...e.components};return(0,a.jsxs)(a.Fragment,{children:[(0,a.jsx)(t.h2,{id:"the-challenges",children:"The challenges"}),"\n",(0,a.jsx)(t.p,{children:"It is nontrivial to evaluate the performance of a LLM agent.\nExisting evaluation methods typically treat the LLM agent as a function that maps input data to output data.\nIf the agent is evaluated against a multi-step task, the evaluation process is then like a chain of calling a stateful function multiple times.\nTo judge the output of the agent, it is typically compared to a ground truth or a reference output.\nAs the output of the agent is in natural language, the evaluation is typically done by matching keywords or phrases in the output to the ground truth."}),"\n",(0,a.jsx)(t.p,{children:"This evaluation method has its limitations due to its rigid nature.\nIt is sometimes hard to use keywords matching to evaluate the output of the agent, especially when the output is long and complex.\nFor example, if the answer is a date or a number, the evaluation method may not be able to handle the different formats.\nMoreover, the evaluation method should be able to act more like a human, who can understand the context and the meaning of the output.\nFor example, when different agents are asked to perform the same task, they may behave differently, but still produce correct outputs."}),"\n",(0,a.jsx)(t.p,{children:"The below example illustrates this point:"}),"\n",(0,a.jsx)(t.pre,{children:(0,a.jsx)(t.code,{children:"Human: What is the weather today?\nAgent 1: It is sunny today in New York.\n"})}),"\n",(0,a.jsx)(t.pre,{children:(0,a.jsx)(t.code,{children:"Human: What is the weather today?\nAgent 2: Do you want to know the weather in New York today?\nHuman: Yes.\nAgent 2: It is sunny today.\n"})}),"\n",(0,a.jsx)(t.p,{children:'Compared to Agent 1, Agent 2 asks for confirmation before providing the answer, which requires more interaction with the user.\nHowever, both agents provide the correct answer to the question.\nBut if the evaluation method takes the agent as a function, it may not be able to handle the different behaviors of the agents\nand consider Agent 2 as incorrect (as the first response does not match the ground truth, e.g., "sunny").'}),"\n",(0,a.jsx)(t.h2,{id:"a-new-evaluation-method",children:"A new evaluation method"}),"\n",(0,a.jsxs)(t.p,{children:["Therefore, we propose a new evaluation method that treats the agent as a conversational partner as shown in the figure below:\n",(0,a.jsx)(t.img,{alt:"Evaluation",src:n(6805).A+"",width:"965",height:"659"})]})]})}function c(e={}){const{wrapper:t}={...(0,o.R)(),...e.components};return t?(0,a.jsx)(t,{...e,children:(0,a.jsx)(u,{...e})}):u(e)}},6805:(e,t,n)=>{n.d(t,{A:()=>a});const a=n.p+"assets/images/evaluation-ac91a46e949f383154a9ffbafcfbc981.png"},8453:(e,t,n)=>{n.d(t,{R:()=>s,x:()=>r});var a=n(6540);const o={},i=a.createContext(o);function s(e){const t=a.useContext(i);return a.useMemo((function(){return"function"==typeof e?e(t):{...t,...e}}),[t,e])}function r(e){let t;return t=e.disableParentContext?"function"==typeof e.components?e.components(o):e.components||o:s(e.components),a.createElement(i.Provider,{value:t},e.children)}}}]); \ No newline at end of file +"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[7986],{5290:(e,t,n)=>{n.r(t),n.d(t,{assets:()=>h,contentTitle:()=>s,default:()=>c,frontMatter:()=>i,metadata:()=>r,toc:()=>l});var a=n(4848),o=n(8453);const i={},s="How to evaluate a LLM agent?",r={permalink:"/TaskWeaver/blog/evaluation",editUrl:"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/evaluation.md",source:"@site/blog/evaluation.md",title:"How to evaluate a LLM agent?",description:"The challenges",date:"2024-09-14T08:42:48.000Z",tags:[],readingTime:6.305,hasTruncateMarker:!0,authors:[],frontMatter:{},unlisted:!1,nextItem:{title:"Run TaskWeaver with Locally Deployed Not-that-Large Language Models",permalink:"/TaskWeaver/blog/local_llm"}},h={authorsImageUrls:[]},l=[{value:"The challenges",id:"the-challenges",level:2},{value:"A new evaluation method",id:"a-new-evaluation-method",level:2}];function u(e){const t={code:"code",h2:"h2",img:"img",p:"p",pre:"pre",...(0,o.R)(),...e.components};return(0,a.jsxs)(a.Fragment,{children:[(0,a.jsx)(t.h2,{id:"the-challenges",children:"The challenges"}),"\n",(0,a.jsx)(t.p,{children:"It is nontrivial to evaluate the performance of a LLM agent.\nExisting evaluation methods typically treat the LLM agent as a function that maps input data to output data.\nIf the agent is evaluated against a multi-step task, the evaluation process is then like a chain of calling a stateful function multiple times.\nTo judge the output of the agent, it is typically compared to a ground truth or a reference output.\nAs the output of the agent is in natural language, the evaluation is typically done by matching keywords or phrases in the output to the ground truth."}),"\n",(0,a.jsx)(t.p,{children:"This evaluation method has its limitations due to its rigid nature.\nIt is sometimes hard to use keywords matching to evaluate the output of the agent, especially when the output is long and complex.\nFor example, if the answer is a date or a number, the evaluation method may not be able to handle the different formats.\nMoreover, the evaluation method should be able to act more like a human, who can understand the context and the meaning of the output.\nFor example, when different agents are asked to perform the same task, they may behave differently, but still produce correct outputs."}),"\n",(0,a.jsx)(t.p,{children:"The below example illustrates this point:"}),"\n",(0,a.jsx)(t.pre,{children:(0,a.jsx)(t.code,{children:"Human: What is the weather today?\nAgent 1: It is sunny today in New York.\n"})}),"\n",(0,a.jsx)(t.pre,{children:(0,a.jsx)(t.code,{children:"Human: What is the weather today?\nAgent 2: Do you want to know the weather in New York today?\nHuman: Yes.\nAgent 2: It is sunny today.\n"})}),"\n",(0,a.jsx)(t.p,{children:'Compared to Agent 1, Agent 2 asks for confirmation before providing the answer, which requires more interaction with the user.\nHowever, both agents provide the correct answer to the question.\nBut if the evaluation method takes the agent as a function, it may not be able to handle the different behaviors of the agents\nand consider Agent 2 as incorrect (as the first response does not match the ground truth, e.g., "sunny").'}),"\n",(0,a.jsx)(t.h2,{id:"a-new-evaluation-method",children:"A new evaluation method"}),"\n",(0,a.jsxs)(t.p,{children:["Therefore, we propose a new evaluation method that treats the agent as a conversational partner as shown in the figure below:\n",(0,a.jsx)(t.img,{alt:"Evaluation",src:n(6805).A+"",width:"965",height:"659"})]})]})}function c(e={}){const{wrapper:t}={...(0,o.R)(),...e.components};return t?(0,a.jsx)(t,{...e,children:(0,a.jsx)(u,{...e})}):u(e)}},6805:(e,t,n)=>{n.d(t,{A:()=>a});const a=n.p+"assets/images/evaluation-ac91a46e949f383154a9ffbafcfbc981.png"},8453:(e,t,n)=>{n.d(t,{R:()=>s,x:()=>r});var a=n(6540);const o={},i=a.createContext(o);function s(e){const t=a.useContext(i);return a.useMemo((function(){return"function"==typeof e?e(t):{...t,...e}}),[t,e])}function r(e){let t;return t=e.disableParentContext?"function"==typeof e.components?e.components(o):e.components||o:s(e.components),a.createElement(i.Provider,{value:t},e.children)}}}]); \ No newline at end of file diff --git a/assets/js/fa48389a.d016b1fd.js b/assets/js/fa48389a.cccf11a1.js similarity index 97% rename from assets/js/fa48389a.d016b1fd.js rename to assets/js/fa48389a.cccf11a1.js index 1ce58854..9a6e742e 100644 --- a/assets/js/fa48389a.d016b1fd.js +++ b/assets/js/fa48389a.cccf11a1.js @@ -1 +1 @@ -"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[6563],{2304:(e,n,t)=>{t.r(n),t.d(n,{assets:()=>d,contentTitle:()=>o,default:()=>m,frontMatter:()=>r,metadata:()=>i,toc:()=>c});var a=t(4848),s=t(8453);const r={},o="Roles in TaskWeaver",i={permalink:"/TaskWeaver/blog/role",editUrl:"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/role.md",source:"@site/blog/role.md",title:"Roles in TaskWeaver",description:'We frame TaskWeaver as a code-first agent framework. The term "code-first" means that the agent is designed to',date:"2024-09-11T03:22:47.000Z",tags:[],readingTime:6.15,hasTruncateMarker:!0,authors:[],frontMatter:{},unlisted:!1,prevItem:{title:"Plugins In-Depth",permalink:"/TaskWeaver/blog/plugin"}},d={authorsImageUrls:[]},c=[];function l(e){const n={em:"em",mermaid:"mermaid",p:"p",strong:"strong",...(0,s.R)(),...e.components};return(0,a.jsxs)(a.Fragment,{children:[(0,a.jsxs)(n.p,{children:["We frame TaskWeaver as a ",(0,a.jsx)(n.strong,{children:"code-first"})," agent framework. The term \"code-first\" means that the agent is designed to\nconvert the user's request into one or multiple runnable code snippets and then execute them to generate the response.\nThe philosophy behind this design is to consider programming languages as the de facto language for communication in cyber-physical systems,\njust like the natural language for human communication. Therefore, TaskWeaver translates the user's request in natural language into\nprogramming languages, which can be executed by the system to perform the desired tasks."]}),"\n",(0,a.jsxs)(n.p,{children:["Under this design, when the developer needs to extend the agent's capability, they can write a new plugin.\nA plugin is a piece of code wrapped in a class that can be called as a function by the agent in the generated code snippets.\nLet's consider an example: ",(0,a.jsx)(n.em,{children:"the agent is asked to load a CSV file and perform anomaly detection on the data"}),".\nThe workflow of the agent is in the diagram below. It is very natural to represent data to be processed in variables and this task in code snippets."]}),"\n",(0,a.jsx)(n.mermaid,{value:'flowchart TD\n A[User] --"read a.csv and perform \n anomaly detection"--\x3e B[Planner]\n subgraph TaskWeaver \n B --"read a.csv and call the \n anomaly_detection plugin\n to find anomalies in the data"--\x3e C[Code Generator]\n subgraph Code Interpreter\n C --"df=pd.read_csv(\'a.csv\')\n anomaly_df=anomaly_detection(df)"--\x3e D[Code Executor]\n end\n end\n D --result--\x3e B\n B --response--\x3e A'})]})}function m(e={}){const{wrapper:n}={...(0,s.R)(),...e.components};return n?(0,a.jsx)(n,{...e,children:(0,a.jsx)(l,{...e})}):l(e)}},8453:(e,n,t)=>{t.d(n,{R:()=>o,x:()=>i});var a=t(6540);const s={},r=a.createContext(s);function o(e){const n=a.useContext(r);return a.useMemo((function(){return"function"==typeof e?e(n):{...n,...e}}),[n,e])}function i(e){let n;return n=e.disableParentContext?"function"==typeof e.components?e.components(s):e.components||s:o(e.components),a.createElement(r.Provider,{value:n},e.children)}}}]); \ No newline at end of file +"use strict";(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[6563],{2304:(e,n,t)=>{t.r(n),t.d(n,{assets:()=>d,contentTitle:()=>o,default:()=>m,frontMatter:()=>r,metadata:()=>i,toc:()=>c});var a=t(4848),s=t(8453);const r={},o="Roles in TaskWeaver",i={permalink:"/TaskWeaver/blog/role",editUrl:"https://github.com/microsoft/TaskWeaver/tree/main/website/blog/role.md",source:"@site/blog/role.md",title:"Roles in TaskWeaver",description:'We frame TaskWeaver as a code-first agent framework. The term "code-first" means that the agent is designed to',date:"2024-09-14T08:42:48.000Z",tags:[],readingTime:6.15,hasTruncateMarker:!0,authors:[],frontMatter:{},unlisted:!1,prevItem:{title:"Plugins In-Depth",permalink:"/TaskWeaver/blog/plugin"}},d={authorsImageUrls:[]},c=[];function l(e){const n={em:"em",mermaid:"mermaid",p:"p",strong:"strong",...(0,s.R)(),...e.components};return(0,a.jsxs)(a.Fragment,{children:[(0,a.jsxs)(n.p,{children:["We frame TaskWeaver as a ",(0,a.jsx)(n.strong,{children:"code-first"})," agent framework. The term \"code-first\" means that the agent is designed to\nconvert the user's request into one or multiple runnable code snippets and then execute them to generate the response.\nThe philosophy behind this design is to consider programming languages as the de facto language for communication in cyber-physical systems,\njust like the natural language for human communication. Therefore, TaskWeaver translates the user's request in natural language into\nprogramming languages, which can be executed by the system to perform the desired tasks."]}),"\n",(0,a.jsxs)(n.p,{children:["Under this design, when the developer needs to extend the agent's capability, they can write a new plugin.\nA plugin is a piece of code wrapped in a class that can be called as a function by the agent in the generated code snippets.\nLet's consider an example: ",(0,a.jsx)(n.em,{children:"the agent is asked to load a CSV file and perform anomaly detection on the data"}),".\nThe workflow of the agent is in the diagram below. It is very natural to represent data to be processed in variables and this task in code snippets."]}),"\n",(0,a.jsx)(n.mermaid,{value:'flowchart TD\n A[User] --"read a.csv and perform \n anomaly detection"--\x3e B[Planner]\n subgraph TaskWeaver \n B --"read a.csv and call the \n anomaly_detection plugin\n to find anomalies in the data"--\x3e C[Code Generator]\n subgraph Code Interpreter\n C --"df=pd.read_csv(\'a.csv\')\n anomaly_df=anomaly_detection(df)"--\x3e D[Code Executor]\n end\n end\n D --result--\x3e B\n B --response--\x3e A'})]})}function m(e={}){const{wrapper:n}={...(0,s.R)(),...e.components};return n?(0,a.jsx)(n,{...e,children:(0,a.jsx)(l,{...e})}):l(e)}},8453:(e,n,t)=>{t.d(n,{R:()=>o,x:()=>i});var a=t(6540);const s={},r=a.createContext(s);function o(e){const n=a.useContext(r);return a.useMemo((function(){return"function"==typeof e?e(n):{...n,...e}}),[n,e])}function i(e){let n;return n=e.disableParentContext?"function"==typeof e.components?e.components(s):e.components||s:o(e.components),a.createElement(r.Provider,{value:n},e.children)}}}]); \ No newline at end of file diff --git a/assets/js/main.883c2795.js b/assets/js/main.883c2795.js new file mode 100644 index 00000000..164328ee --- /dev/null +++ b/assets/js/main.883c2795.js @@ -0,0 +1,2 @@ +/*! For license information please see main.883c2795.js.LICENSE.txt */ +(self.webpackChunkwebsite=self.webpackChunkwebsite||[]).push([[8792],{5391:(e,t,n)=>{"use strict";function r(e){var t,n,a="";if("string"==typeof e||"number"==typeof e)a+=e;else if("object"==typeof e)if(Array.isArray(e))for(t=0;ta});const a=function(){for(var e,t,n=0,a="";n{"use strict";n.d(t,{A:()=>p});n(6540);var r=n(3259),a=n.n(r),o=n(4054);const i={"03e8cedb":[()=>n.e(9213).then(n.bind(n,9269)),"@site/docs/usage/cmd.md",9269],"044bc5cf":[()=>n.e(6366).then(n.bind(n,2417)),"@site/docs/llms/groq.md",2417],"0c5435fe":[()=>n.e(1500).then(n.bind(n,9155)),"@site/docs/concepts/session.md",9155],"138e0e15":[()=>n.e(4921).then(n.t.bind(n,1597,19)),"@generated/@easyops-cn/docusaurus-search-local/default/__plugin.json",1597],"13e97266":[()=>n.e(1218).then(n.bind(n,8638)),"@site/docs/concepts/attachment.md",8638],"14eb3368":[()=>Promise.all([n.e(1869),n.e(6969)]).then(n.bind(n,5847)),"@theme/DocCategoryGeneratedIndexPage",5847],"1531ba9d":[()=>n.e(6489).then(n.bind(n,1591)),"@site/blog/plugin.md",1591],17896441:[()=>Promise.all([n.e(1869),n.e(7203),n.e(8401)]).then(n.bind(n,833)),"@theme/DocItem",833],"1a4e3797":[()=>Promise.all([n.e(1869),n.e(2138)]).then(n.bind(n,1283)),"@theme/SearchPage",1283],"1bff86ef":[()=>n.e(2317).then(n.bind(n,6486)),"@site/blog/evaluation.md",6486],"1cc2dcef":[()=>n.e(4950).then(n.t.bind(n,4124,19)),"@generated/docusaurus-plugin-content-blog/default/p/task-weaver-blog-archive-46e.json",4124],"1db64337":[()=>n.e(8413).then(n.bind(n,3022)),"@site/docs/overview.md",3022],"1f391b9e":[()=>Promise.all([n.e(1869),n.e(7203),n.e(6061)]).then(n.bind(n,7973)),"@theme/MDXPage",7973],"2114d3cd":[()=>n.e(625).then(n.bind(n,5666)),"@site/docs/advanced/compression.md",5666],"223de7e7":[()=>n.e(8862).then(n.bind(n,9321)),"@site/blog/plugin.md?truncated=true",9321],"2644c4f4":[()=>n.e(4508).then(n.bind(n,8876)),"@site/docs/llms/glm.md",8876],"2726c345":[()=>n.e(229).then(n.bind(n,2443)),"@site/docs/advanced/telemetry.md",2443],"30613cee":[()=>n.e(8473).then(n.bind(n,5719)),"@site/docs/advanced/cli_only.md",5719],"360ca471":[()=>n.e(7580).then(n.bind(n,3009)),"@site/docs/customization/plugin/plugin_intro.md",3009],"36994c47":[()=>n.e(9858).then(n.t.bind(n,5516,19)),"@generated/docusaurus-plugin-content-blog/default/__plugin.json",5516],"393be207":[()=>n.e(4134).then(n.bind(n,633)),"@site/src/pages/markdown-page.md",633],"3e02b86d":[()=>n.e(421).then(n.bind(n,8857)),"@site/blog/local_llm.md",8857],"45fd52fa":[()=>n.e(3450).then(n.bind(n,4149)),"@site/docs/advanced/verification.md",4149],"47284eb1":[()=>n.e(7360).then(n.bind(n,990)),"@site/docs/usage/library.md",990],"55efe1e1":[()=>n.e(8932).then(n.bind(n,6122)),"@site/docs/llms/aoai.md",6122],"5a1935a3":[()=>n.e(5359).then(n.bind(n,2248)),"@site/docs/configurations/configurations_in_detail.md",2248],"5e95c892":[()=>n.e(9647).then(n.bind(n,7121)),"@theme/DocsRoot",7121],"5e9f5e1a":[()=>Promise.resolve().then(n.bind(n,4784)),"@generated/docusaurus.config",4784],"72cb6e7f":[()=>n.e(303).then(n.bind(n,2393)),"@site/docs/usage/docker.md",2393],"7555bb16":[()=>n.e(2944).then(n.bind(n,9074)),"@site/docs/llms/customized_llm_api.md",9074],"7fc9262a":[()=>n.e(4607).then(n.bind(n,45)),"@site/docs/llms/liteLLM.md",45],"8070e160":[()=>n.e(3822).then(n.bind(n,7495)),"@site/docs/quickstart.md",7495],"814f3328":[()=>n.e(7472).then(n.t.bind(n,5513,19)),"~blog/default/blog-post-list-prop-default.json",5513],"8257ffa4":[()=>n.e(1906).then(n.bind(n,4916)),"@site/docs/concepts/app.md",4916],"834e34cc":[()=>n.e(2018).then(n.bind(n,1034)),"@site/docs/configurations/overview.md",1034],"85be924b":[()=>n.e(8874).then(n.bind(n,568)),"@site/docs/concepts/plugin.md",568],"873331c9":[()=>n.e(7562).then(n.bind(n,5499)),"@site/docs/customization/experience.md",5499],"88f45e24":[()=>n.e(4117).then(n.bind(n,1577)),"@site/docs/code_execution.md",1577],"93a501ed":[()=>Promise.all([n.e(1869),n.e(3555)]).then(n.bind(n,255)),"@site/docs/customization/index.md",255],"9bc17760":[()=>n.e(6302).then(n.bind(n,254)),"@site/docs/advanced/plugin_only.md",254],"9e4087bc":[()=>n.e(2711).then(n.bind(n,9331)),"@theme/BlogArchivePage",9331],"9fc0d84d":[()=>n.e(6011).then(n.bind(n,9291)),"@site/blog/local_llm.md?truncated=true",9291],a0385f53:[()=>n.e(9964).then(n.bind(n,2679)),"@site/docs/concepts/round.md",2679],a27d32e8:[()=>n.e(9395).then(n.bind(n,2340)),"@site/blog/role.md",2340],a417478a:[()=>n.e(6006).then(n.bind(n,5608)),"@site/docs/advanced/plugin_selection.md",5608],a4259125:[()=>Promise.all([n.e(1869),n.e(9502)]).then(n.bind(n,3570)),"@site/docs/llms/index.md",3570],a55bc7d4:[()=>n.e(3446).then(n.bind(n,3802)),"@site/docs/llms/openai.md",3802],a6aa9e1f:[()=>Promise.all([n.e(1869),n.e(7203),n.e(8793),n.e(7643)]).then(n.bind(n,2052)),"@theme/BlogListPage",2052],a7456010:[()=>n.e(1235).then(n.t.bind(n,8552,19)),"@generated/docusaurus-plugin-content-pages/default/__plugin.json",8552],a7bd4aaa:[()=>n.e(7098).then(n.bind(n,4532)),"@theme/DocVersionRoot",4532],a94703ab:[()=>Promise.all([n.e(1869),n.e(9048)]).then(n.bind(n,1377)),"@theme/DocRoot",1377],a9f7b4d5:[()=>n.e(6015).then(n.bind(n,8327)),"@site/docs/concepts/role.md",8327],aba21aa0:[()=>n.e(5742).then(n.t.bind(n,7093,19)),"@generated/docusaurus-plugin-content-docs/default/__plugin.json",7093],acecf23e:[()=>n.e(1903).then(n.t.bind(n,1912,19)),"~blog/default/blogMetadata-default.json",1912],ad895e75:[()=>n.e(9315).then(n.bind(n,2645)),"@site/docs/FAQ.md",2645],ae863774:[()=>n.e(9764).then(n.bind(n,3780)),"@site/docs/customization/example/example.md",3780],b21ad4a1:[()=>n.e(1547).then(n.bind(n,546)),"@site/docs/customization/plugin/how_to_develop_a_new_plugin.md",546],b3e09ff4:[()=>n.e(6618).then(n.bind(n,5684)),"@site/docs/concepts/conversation.md",5684],b6f402ca:[()=>n.e(3463).then(n.t.bind(n,9420,19)),"@generated/docusaurus-plugin-content-docs/default/p/task-weaver-docs-customization-21a.json",9420],be4af720:[()=>n.e(3498).then(n.bind(n,3763)),"@site/docs/llms/qwen.md",3763],c1128000:[()=>n.e(626).then(n.bind(n,298)),"@site/docs/llms/anthropic.md",298],c39bf4d4:[()=>n.e(7986).then(n.bind(n,5290)),"@site/blog/evaluation.md?truncated=true",5290],c4f5d8e4:[()=>Promise.all([n.e(1869),n.e(2634)]).then(n.bind(n,6467)),"@site/src/pages/index.js",6467],ccc49370:[()=>Promise.all([n.e(1869),n.e(7203),n.e(8793),n.e(3249)]).then(n.bind(n,1477)),"@theme/BlogPostPage",1477],cf09775e:[()=>n.e(2733).then(n.bind(n,9259)),"@site/docs/llms/gemini.md",9259],d3234990:[()=>n.e(3899).then(n.bind(n,5529)),"@site/docs/llms/multi-llm.md",5529],d5184954:[()=>n.e(9567).then(n.t.bind(n,1839,19)),"@generated/docusaurus-plugin-content-docs/default/p/task-weaver-docs-usage-090.json",1839],dfbc6587:[()=>n.e(492).then(n.t.bind(n,1053,19)),"@generated/docusaurus-plugin-content-docs/default/p/task-weaver-docs-concepts-616.json",1053],dfcda4d1:[()=>n.e(8050).then(n.bind(n,5925)),"@site/docs/usage/webui.md",5925],e269b8a3:[()=>n.e(1011).then(n.t.bind(n,7102,19)),"@generated/docusaurus-plugin-content-docs/default/p/task-weaver-docs-llms-deb.json",7102],e5d94c03:[()=>n.e(3980).then(n.t.bind(n,8468,19)),"@generated/docusaurus-plugin-content-blog/default/p/task-weaver-blog-6d0.json",8468],e76a15a6:[()=>n.e(7519).then(n.t.bind(n,8007,19)),"@generated/docusaurus-plugin-content-docs/default/p/task-weaver-docs-a5c.json",8007],e8e13c91:[()=>n.e(2106).then(n.bind(n,4982)),"@site/docs/customization/plugin/multi_yaml_single_impl.md",4982],f04cdb7e:[()=>n.e(1592).then(n.bind(n,9290)),"@site/docs/llms/ollama.md",9290],f0ac6a4f:[()=>n.e(9600).then(n.bind(n,7235)),"@site/docs/llms/Keywords-AI.md",7235],fa377e30:[()=>n.e(4431).then(n.bind(n,8314)),"@site/docs/concepts/project.md",8314],fa48389a:[()=>n.e(6563).then(n.bind(n,2304)),"@site/blog/role.md?truncated=true",2304],fc8fddfe:[()=>n.e(8987).then(n.bind(n,9509)),"@site/docs/concepts/post.md",9509],ff4089d2:[()=>n.e(2148).then(n.t.bind(n,3362,19)),"@generated/docusaurus-plugin-content-docs/default/p/task-weaver-docs-advanced-33e.json",3362]};var s=n(4848);function l(e){let{error:t,retry:n,pastDelay:r}=e;return t?(0,s.jsxs)("div",{style:{textAlign:"center",color:"#fff",backgroundColor:"#fa383e",borderColor:"#fa383e",borderStyle:"solid",borderRadius:"0.25rem",borderWidth:"1px",boxSizing:"border-box",display:"block",padding:"1rem",flex:"0 0 50%",marginLeft:"25%",marginRight:"25%",marginTop:"5rem",maxWidth:"50%",width:"100%"},children:[(0,s.jsx)("p",{children:String(t)}),(0,s.jsx)("div",{children:(0,s.jsx)("button",{type:"button",onClick:n,children:"Retry"})})]}):r?(0,s.jsx)("div",{style:{display:"flex",justifyContent:"center",alignItems:"center",height:"100vh"},children:(0,s.jsx)("svg",{id:"loader",style:{width:128,height:110,position:"absolute",top:"calc(100vh - 64%)"},viewBox:"0 0 45 45",xmlns:"http://www.w3.org/2000/svg",stroke:"#61dafb",children:(0,s.jsxs)("g",{fill:"none",fillRule:"evenodd",transform:"translate(1 1)",strokeWidth:"2",children:[(0,s.jsxs)("circle",{cx:"22",cy:"22",r:"6",strokeOpacity:"0",children:[(0,s.jsx)("animate",{attributeName:"r",begin:"1.5s",dur:"3s",values:"6;22",calcMode:"linear",repeatCount:"indefinite"}),(0,s.jsx)("animate",{attributeName:"stroke-opacity",begin:"1.5s",dur:"3s",values:"1;0",calcMode:"linear",repeatCount:"indefinite"}),(0,s.jsx)("animate",{attributeName:"stroke-width",begin:"1.5s",dur:"3s",values:"2;0",calcMode:"linear",repeatCount:"indefinite"})]}),(0,s.jsxs)("circle",{cx:"22",cy:"22",r:"6",strokeOpacity:"0",children:[(0,s.jsx)("animate",{attributeName:"r",begin:"3s",dur:"3s",values:"6;22",calcMode:"linear",repeatCount:"indefinite"}),(0,s.jsx)("animate",{attributeName:"stroke-opacity",begin:"3s",dur:"3s",values:"1;0",calcMode:"linear",repeatCount:"indefinite"}),(0,s.jsx)("animate",{attributeName:"stroke-width",begin:"3s",dur:"3s",values:"2;0",calcMode:"linear",repeatCount:"indefinite"})]}),(0,s.jsx)("circle",{cx:"22",cy:"22",r:"8",children:(0,s.jsx)("animate",{attributeName:"r",begin:"0s",dur:"1.5s",values:"6;1;2;3;4;5;6",calcMode:"linear",repeatCount:"indefinite"})})]})})}):null}var c=n(6921),u=n(3102);function d(e,t){if("*"===e)return a()({loading:l,loader:()=>n.e(2237).then(n.bind(n,2237)),modules:["@theme/NotFound"],webpack:()=>[2237],render(e,t){const n=e.default;return(0,s.jsx)(u.W,{value:{plugin:{name:"native",id:"default"}},children:(0,s.jsx)(n,{...t})})}});const r=o[`${e}-${t}`],d={},p=[],f=[],h=(0,c.A)(r);return Object.entries(h).forEach((e=>{let[t,n]=e;const r=i[n];r&&(d[t]=r[0],p.push(r[1]),f.push(r[2]))})),a().Map({loading:l,loader:d,modules:p,webpack:()=>f,render(t,n){const a=JSON.parse(JSON.stringify(r));Object.entries(t).forEach((t=>{let[n,r]=t;const o=r.default;if(!o)throw new Error(`The page component at ${e} doesn't have a default export. This makes it impossible to render anything. Consider default-exporting a React component.`);"object"!=typeof o&&"function"!=typeof o||Object.keys(r).filter((e=>"default"!==e)).forEach((e=>{o[e]=r[e]}));let i=a;const s=n.split(".");s.slice(0,-1).forEach((e=>{i=i[e]})),i[s[s.length-1]]=o}));const o=a.__comp;delete a.__comp;const i=a.__context;delete a.__context;const l=a.__props;return delete a.__props,(0,s.jsx)(u.W,{value:i,children:(0,s.jsx)(o,{...a,...l,...n})})}})}const p=[{path:"/TaskWeaver/blog",component:d("/TaskWeaver/blog","b4d"),exact:!0},{path:"/TaskWeaver/blog/archive",component:d("/TaskWeaver/blog/archive","a4b"),exact:!0},{path:"/TaskWeaver/blog/evaluation",component:d("/TaskWeaver/blog/evaluation","be8"),exact:!0},{path:"/TaskWeaver/blog/local_llm",component:d("/TaskWeaver/blog/local_llm","278"),exact:!0},{path:"/TaskWeaver/blog/plugin",component:d("/TaskWeaver/blog/plugin","608"),exact:!0},{path:"/TaskWeaver/blog/role",component:d("/TaskWeaver/blog/role","b00"),exact:!0},{path:"/TaskWeaver/markdown-page",component:d("/TaskWeaver/markdown-page","e6a"),exact:!0},{path:"/TaskWeaver/search",component:d("/TaskWeaver/search","d6f"),exact:!0},{path:"/TaskWeaver/docs",component:d("/TaskWeaver/docs","cf2"),routes:[{path:"/TaskWeaver/docs",component:d("/TaskWeaver/docs","633"),routes:[{path:"/TaskWeaver/docs",component:d("/TaskWeaver/docs","3d9"),routes:[{path:"/TaskWeaver/docs/advanced",component:d("/TaskWeaver/docs/advanced","f90"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/advanced/cli_only",component:d("/TaskWeaver/docs/advanced/cli_only","c53"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/advanced/code_verification",component:d("/TaskWeaver/docs/advanced/code_verification","ef4"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/advanced/compression",component:d("/TaskWeaver/docs/advanced/compression","d7f"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/advanced/plugin_selection",component:d("/TaskWeaver/docs/advanced/plugin_selection","b84"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/advanced/telemetry",component:d("/TaskWeaver/docs/advanced/telemetry","3d4"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/code_execution",component:d("/TaskWeaver/docs/code_execution","7a3"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/concepts",component:d("/TaskWeaver/docs/concepts","6fa"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/concepts/app",component:d("/TaskWeaver/docs/concepts/app","050"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/concepts/attachment",component:d("/TaskWeaver/docs/concepts/attachment","aff"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/concepts/conversation",component:d("/TaskWeaver/docs/concepts/conversation","78b"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/concepts/plugin",component:d("/TaskWeaver/docs/concepts/plugin","5c4"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/concepts/post",component:d("/TaskWeaver/docs/concepts/post","a46"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/concepts/project",component:d("/TaskWeaver/docs/concepts/project","96c"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/concepts/role",component:d("/TaskWeaver/docs/concepts/role","694"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/concepts/round",component:d("/TaskWeaver/docs/concepts/round","c55"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/concepts/session",component:d("/TaskWeaver/docs/concepts/session","32b"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/configurations/configurations_in_detail",component:d("/TaskWeaver/docs/configurations/configurations_in_detail","851"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/configurations/overview",component:d("/TaskWeaver/docs/configurations/overview","669"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/customization",component:d("/TaskWeaver/docs/customization","0f7"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/customization/",component:d("/TaskWeaver/docs/customization/","6b7"),exact:!0},{path:"/TaskWeaver/docs/customization/example/",component:d("/TaskWeaver/docs/customization/example/","b3c"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/customization/experience",component:d("/TaskWeaver/docs/customization/experience","976"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/FAQ",component:d("/TaskWeaver/docs/FAQ","52f"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/llms",component:d("/TaskWeaver/docs/llms","bb3"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/llms/",component:d("/TaskWeaver/docs/llms/","c21"),exact:!0},{path:"/TaskWeaver/docs/llms/anthropic",component:d("/TaskWeaver/docs/llms/anthropic","166"),exact:!0},{path:"/TaskWeaver/docs/llms/aoai",component:d("/TaskWeaver/docs/llms/aoai","774"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/llms/customized_llm_api",component:d("/TaskWeaver/docs/llms/customized_llm_api","7ee"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/llms/gemini",component:d("/TaskWeaver/docs/llms/gemini","4b2"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/llms/glm",component:d("/TaskWeaver/docs/llms/glm","e55"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/llms/groq",component:d("/TaskWeaver/docs/llms/groq","8b3"),exact:!0},{path:"/TaskWeaver/docs/llms/Keywords-AI",component:d("/TaskWeaver/docs/llms/Keywords-AI","b46"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/llms/liteLLM",component:d("/TaskWeaver/docs/llms/liteLLM","ccb"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/llms/multi-llm",component:d("/TaskWeaver/docs/llms/multi-llm","648"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/llms/ollama",component:d("/TaskWeaver/docs/llms/ollama","82b"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/llms/openai",component:d("/TaskWeaver/docs/llms/openai","5eb"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/llms/qwen",component:d("/TaskWeaver/docs/llms/qwen","11e"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/overview",component:d("/TaskWeaver/docs/overview","083"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/plugin/how_to_develop_a_new_plugin",component:d("/TaskWeaver/docs/plugin/how_to_develop_a_new_plugin","d2e"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/plugin/multi_yaml_single_impl",component:d("/TaskWeaver/docs/plugin/multi_yaml_single_impl","3bc"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/plugin/plugin_intro",component:d("/TaskWeaver/docs/plugin/plugin_intro","27b"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/plugin/plugin_only",component:d("/TaskWeaver/docs/plugin/plugin_only","d91"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/quickstart",component:d("/TaskWeaver/docs/quickstart","cdc"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/usage",component:d("/TaskWeaver/docs/usage","439"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/usage/cmd",component:d("/TaskWeaver/docs/usage/cmd","107"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/usage/docker",component:d("/TaskWeaver/docs/usage/docker","20c"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/usage/library",component:d("/TaskWeaver/docs/usage/library","d9a"),exact:!0,sidebar:"documentSidebar"},{path:"/TaskWeaver/docs/usage/webui",component:d("/TaskWeaver/docs/usage/webui","c9f"),exact:!0,sidebar:"documentSidebar"}]}]}]},{path:"/TaskWeaver/",component:d("/TaskWeaver/","82d"),exact:!0},{path:"*",component:d("*")}]},6125:(e,t,n)=>{"use strict";n.d(t,{o:()=>o,x:()=>i});var r=n(6540),a=n(4848);const o=r.createContext(!1);function i(e){let{children:t}=e;const[n,i]=(0,r.useState)(!1);return(0,r.useEffect)((()=>{i(!0)}),[]),(0,a.jsx)(o.Provider,{value:n,children:t})}},7815:(e,t,n)=>{"use strict";var r=n(6540),a=n(5338),o=n(545),i=n(4625),s=n(4784),l=n(8193);const c=[n(119),n(6134),n(6294),n(1043)];var u=n(8328),d=n(6347),p=n(2831),f=n(4848);function h(e){let{children:t}=e;return(0,f.jsx)(f.Fragment,{children:t})}var m=n(5260),g=n(4586),y=n(6025),b=n(6342),v=n(1213),k=n(2131),w=n(4090);const x="default";var S=n(440),_=n(1463);function E(){const{i18n:{currentLocale:e,defaultLocale:t,localeConfigs:n}}=(0,g.A)(),r=(0,k.o)(),a=n[e].htmlLang,o=e=>e.replace("-","_");return(0,f.jsxs)(m.A,{children:[Object.entries(n).map((e=>{let[t,{htmlLang:n}]=e;return(0,f.jsx)("link",{rel:"alternate",href:r.createUrl({locale:t,fullyQualified:!0}),hrefLang:n},t)})),(0,f.jsx)("link",{rel:"alternate",href:r.createUrl({locale:t,fullyQualified:!0}),hrefLang:"x-default"}),(0,f.jsx)("meta",{property:"og:locale",content:o(a)}),Object.values(n).filter((e=>a!==e.htmlLang)).map((e=>(0,f.jsx)("meta",{property:"og:locale:alternate",content:o(e.htmlLang)},`meta-og-${e.htmlLang}`)))]})}function T(e){let{permalink:t}=e;const{siteConfig:{url:n}}=(0,g.A)(),r=function(){const{siteConfig:{url:e,baseUrl:t,trailingSlash:n}}=(0,g.A)(),{pathname:r}=(0,d.zy)();return e+(0,S.Ks)((0,y.Ay)(r),{trailingSlash:n,baseUrl:t})}(),a=t?`${n}${t}`:r;return(0,f.jsxs)(m.A,{children:[(0,f.jsx)("meta",{property:"og:url",content:a}),(0,f.jsx)("link",{rel:"canonical",href:a})]})}function C(){const{i18n:{currentLocale:e}}=(0,g.A)(),{metadata:t,image:n}=(0,b.p)();return(0,f.jsxs)(f.Fragment,{children:[(0,f.jsxs)(m.A,{children:[(0,f.jsx)("meta",{name:"twitter:card",content:"summary_large_image"}),(0,f.jsx)("body",{className:w.w})]}),n&&(0,f.jsx)(v.be,{image:n}),(0,f.jsx)(T,{}),(0,f.jsx)(E,{}),(0,f.jsx)(_.A,{tag:x,locale:e}),(0,f.jsx)(m.A,{children:t.map(((e,t)=>(0,f.jsx)("meta",{...e},t)))})]})}const A=new Map;var L=n(6125),P=n(6988),O=n(205);function N(e){for(var t=arguments.length,n=new Array(t>1?t-1:0),r=1;r{const r=t.default?.[e]??t[e];return r?.(...n)}));return()=>a.forEach((e=>e?.()))}const j=function(e){let{children:t,location:n,previousLocation:r}=e;return(0,O.A)((()=>{r!==n&&(!function(e){let{location:t,previousLocation:n}=e;if(!n)return;const r=t.pathname===n.pathname,a=t.hash===n.hash,o=t.search===n.search;if(r&&a&&!o)return;const{hash:i}=t;if(i){const e=decodeURIComponent(i.substring(1)),t=document.getElementById(e);t?.scrollIntoView()}else window.scrollTo(0,0)}({location:n,previousLocation:r}),N("onRouteDidUpdate",{previousLocation:r,location:n}))}),[r,n]),t};function R(e){const t=Array.from(new Set([e,decodeURI(e)])).map((e=>(0,p.u)(u.A,e))).flat();return Promise.all(t.map((e=>e.route.component.preload?.())))}class I extends r.Component{previousLocation;routeUpdateCleanupCb;constructor(e){super(e),this.previousLocation=null,this.routeUpdateCleanupCb=l.A.canUseDOM?N("onRouteUpdate",{previousLocation:null,location:this.props.location}):()=>{},this.state={nextRouteHasLoaded:!0}}shouldComponentUpdate(e,t){if(e.location===this.props.location)return t.nextRouteHasLoaded;const n=e.location;return this.previousLocation=this.props.location,this.setState({nextRouteHasLoaded:!1}),this.routeUpdateCleanupCb=N("onRouteUpdate",{previousLocation:this.previousLocation,location:n}),R(n.pathname).then((()=>{this.routeUpdateCleanupCb(),this.setState({nextRouteHasLoaded:!0})})).catch((e=>{console.warn(e),window.location.reload()})),!1}render(){const{children:e,location:t}=this.props;return(0,f.jsx)(j,{previousLocation:this.previousLocation,location:t,children:(0,f.jsx)(d.qh,{location:t,render:()=>e})})}}const F=I,D="__docusaurus-base-url-issue-banner-container",M="__docusaurus-base-url-issue-banner",z="__docusaurus-base-url-issue-banner-suggestion-container";function B(e){return`\ndocument.addEventListener('DOMContentLoaded', function maybeInsertBanner() {\n var shouldInsert = typeof window['docusaurus'] === 'undefined';\n shouldInsert && insertBanner();\n});\n\nfunction insertBanner() {\n var bannerContainer = document.createElement('div');\n bannerContainer.id = '${D}';\n var bannerHtml = ${JSON.stringify(function(e){return`\n
\n

Your Docusaurus site did not load properly.

\n

A very common reason is a wrong site baseUrl configuration.

\n

Current configured baseUrl = ${e} ${"/"===e?" (default value)":""}

\n

We suggest trying baseUrl =

\n
\n`}(e)).replace(/{let{route:t}=e;return!0===t.exact})))return A.set(e.pathname,e.pathname),e;const t=e.pathname.trim().replace(/(?:\/index)?\.html$/,"")||"/";return A.set(e.pathname,t),{...e,pathname:t}}((0,d.zy)());return(0,f.jsx)(F,{location:e,children:q})}function K(){return(0,f.jsx)(H.A,{children:(0,f.jsx)(P.l,{children:(0,f.jsxs)(L.x,{children:[(0,f.jsxs)(h,{children:[(0,f.jsx)(U,{}),(0,f.jsx)(C,{}),(0,f.jsx)($,{}),(0,f.jsx)(G,{})]}),(0,f.jsx)(V,{})]})})})}var Y=n(4054);const Z=function(e){try{return document.createElement("link").relList.supports(e)}catch{return!1}}("prefetch")?function(e){return new Promise(((t,n)=>{if("undefined"==typeof document)return void n();const r=document.createElement("link");r.setAttribute("rel","prefetch"),r.setAttribute("href",e),r.onload=()=>t(),r.onerror=()=>n();const a=document.getElementsByTagName("head")[0]??document.getElementsByName("script")[0]?.parentNode;a?.appendChild(r)}))}:function(e){return new Promise(((t,n)=>{const r=new XMLHttpRequest;r.open("GET",e,!0),r.withCredentials=!0,r.onload=()=>{200===r.status?t():n()},r.send(null)}))};var X=n(6921);const J=new Set,ee=new Set,te=()=>navigator.connection?.effectiveType.includes("2g")||navigator.connection?.saveData,ne={prefetch:e=>{if(!(e=>!te()&&!ee.has(e)&&!J.has(e))(e))return!1;J.add(e);const t=(0,p.u)(u.A,e).flatMap((e=>{return t=e.route.path,Object.entries(Y).filter((e=>{let[n]=e;return n.replace(/-[^-]+$/,"")===t})).flatMap((e=>{let[,t]=e;return Object.values((0,X.A)(t))}));var t}));return Promise.all(t.map((e=>{const t=n.gca(e);return t&&!t.includes("undefined")?Z(t).catch((()=>{})):Promise.resolve()})))},preload:e=>!!(e=>!te()&&!ee.has(e))(e)&&(ee.add(e),R(e))},re=Object.freeze(ne);function ae(e){let{children:t}=e;return"hash"===s.default.future.experimental_router?(0,f.jsx)(i.I9,{children:t}):(0,f.jsx)(i.Kd,{children:t})}const oe=Boolean(!0);if(l.A.canUseDOM){window.docusaurus=re;const e=document.getElementById("__docusaurus"),t=(0,f.jsx)(o.vd,{children:(0,f.jsx)(ae,{children:(0,f.jsx)(K,{})})}),n=(e,t)=>{console.error("Docusaurus React Root onRecoverableError:",e,t)},i=()=>{if(window.docusaurusRoot)window.docusaurusRoot.render(t);else if(oe)window.docusaurusRoot=a.hydrateRoot(e,t,{onRecoverableError:n});else{const r=a.createRoot(e,{onRecoverableError:n});r.render(t),window.docusaurusRoot=r}};R(window.location.pathname).then((()=>{(0,r.startTransition)(i)}))}},6988:(e,t,n)=>{"use strict";n.d(t,{o:()=>d,l:()=>p});var r=n(6540),a=n(4784);const o=JSON.parse('{"docusaurus-plugin-content-docs":{"default":{"path":"/TaskWeaver/docs","versions":[{"name":"current","label":"Next","isLast":true,"path":"/TaskWeaver/docs","mainDocId":"overview","docs":[{"id":"advanced/cli_only","path":"/TaskWeaver/docs/advanced/cli_only","sidebar":"documentSidebar"},{"id":"advanced/code_verification","path":"/TaskWeaver/docs/advanced/code_verification","sidebar":"documentSidebar"},{"id":"advanced/compression","path":"/TaskWeaver/docs/advanced/compression","sidebar":"documentSidebar"},{"id":"advanced/plugin_only","path":"/TaskWeaver/docs/plugin/plugin_only","sidebar":"documentSidebar"},{"id":"advanced/plugin_selection","path":"/TaskWeaver/docs/advanced/plugin_selection","sidebar":"documentSidebar"},{"id":"advanced/telemetry","path":"/TaskWeaver/docs/advanced/telemetry","sidebar":"documentSidebar"},{"id":"code_execution","path":"/TaskWeaver/docs/code_execution","sidebar":"documentSidebar"},{"id":"concepts/app","path":"/TaskWeaver/docs/concepts/app","sidebar":"documentSidebar"},{"id":"concepts/attachment","path":"/TaskWeaver/docs/concepts/attachment","sidebar":"documentSidebar"},{"id":"concepts/conversation","path":"/TaskWeaver/docs/concepts/conversation","sidebar":"documentSidebar"},{"id":"concepts/plugin","path":"/TaskWeaver/docs/concepts/plugin","sidebar":"documentSidebar"},{"id":"concepts/post","path":"/TaskWeaver/docs/concepts/post","sidebar":"documentSidebar"},{"id":"concepts/project","path":"/TaskWeaver/docs/concepts/project","sidebar":"documentSidebar"},{"id":"concepts/role","path":"/TaskWeaver/docs/concepts/role","sidebar":"documentSidebar"},{"id":"concepts/round","path":"/TaskWeaver/docs/concepts/round","sidebar":"documentSidebar"},{"id":"concepts/session","path":"/TaskWeaver/docs/concepts/session","sidebar":"documentSidebar"},{"id":"configurations/configurations_in_detail","path":"/TaskWeaver/docs/configurations/configurations_in_detail","sidebar":"documentSidebar"},{"id":"configurations/overview","path":"/TaskWeaver/docs/configurations/overview","sidebar":"documentSidebar"},{"id":"customization/example/example","path":"/TaskWeaver/docs/customization/example/","sidebar":"documentSidebar"},{"id":"customization/experience","path":"/TaskWeaver/docs/customization/experience","sidebar":"documentSidebar"},{"id":"customization/index","path":"/TaskWeaver/docs/customization/"},{"id":"customization/plugin/develop_plugin","path":"/TaskWeaver/docs/plugin/how_to_develop_a_new_plugin","sidebar":"documentSidebar"},{"id":"customization/plugin/multi_yaml_single_impl","path":"/TaskWeaver/docs/plugin/multi_yaml_single_impl","sidebar":"documentSidebar"},{"id":"customization/plugin/plugin_intro","path":"/TaskWeaver/docs/plugin/plugin_intro","sidebar":"documentSidebar"},{"id":"FAQ","path":"/TaskWeaver/docs/FAQ","sidebar":"documentSidebar"},{"id":"llms/anthropic","path":"/TaskWeaver/docs/llms/anthropic"},{"id":"llms/aoai","path":"/TaskWeaver/docs/llms/aoai","sidebar":"documentSidebar"},{"id":"llms/customized_llm_api","path":"/TaskWeaver/docs/llms/customized_llm_api","sidebar":"documentSidebar"},{"id":"llms/gemini","path":"/TaskWeaver/docs/llms/gemini","sidebar":"documentSidebar"},{"id":"llms/glm","path":"/TaskWeaver/docs/llms/glm","sidebar":"documentSidebar"},{"id":"llms/groq","path":"/TaskWeaver/docs/llms/groq"},{"id":"llms/index","path":"/TaskWeaver/docs/llms/"},{"id":"llms/Keywords-AI","path":"/TaskWeaver/docs/llms/Keywords-AI","sidebar":"documentSidebar"},{"id":"llms/liteLLM","path":"/TaskWeaver/docs/llms/liteLLM","sidebar":"documentSidebar"},{"id":"llms/multi-llm","path":"/TaskWeaver/docs/llms/multi-llm","sidebar":"documentSidebar"},{"id":"llms/ollama","path":"/TaskWeaver/docs/llms/ollama","sidebar":"documentSidebar"},{"id":"llms/openai","path":"/TaskWeaver/docs/llms/openai","sidebar":"documentSidebar"},{"id":"llms/qwen","path":"/TaskWeaver/docs/llms/qwen","sidebar":"documentSidebar"},{"id":"overview","path":"/TaskWeaver/docs/overview","sidebar":"documentSidebar"},{"id":"quickstart","path":"/TaskWeaver/docs/quickstart","sidebar":"documentSidebar"},{"id":"usage/cmd","path":"/TaskWeaver/docs/usage/cmd","sidebar":"documentSidebar"},{"id":"usage/docker","path":"/TaskWeaver/docs/usage/docker","sidebar":"documentSidebar"},{"id":"usage/library","path":"/TaskWeaver/docs/usage/library","sidebar":"documentSidebar"},{"id":"usage/webui","path":"/TaskWeaver/docs/usage/webui","sidebar":"documentSidebar"},{"id":"/concepts","path":"/TaskWeaver/docs/concepts","sidebar":"documentSidebar"},{"id":"/usage","path":"/TaskWeaver/docs/usage","sidebar":"documentSidebar"},{"id":"/llms","path":"/TaskWeaver/docs/llms","sidebar":"documentSidebar"},{"id":"advanced","path":"/TaskWeaver/docs/advanced","sidebar":"documentSidebar"},{"id":"customization","path":"/TaskWeaver/docs/customization","sidebar":"documentSidebar"}],"draftIds":[],"sidebars":{"documentSidebar":{"link":{"path":"/TaskWeaver/docs/overview","label":"overview"}}}}],"breadcrumbs":true}}}'),i=JSON.parse('{"defaultLocale":"en","locales":["en"],"path":"i18n","currentLocale":"en","localeConfigs":{"en":{"label":"English","direction":"ltr","htmlLang":"en","calendar":"gregory","path":"en"}}}');var s=n(2654);const l=JSON.parse('{"docusaurusVersion":"3.5.2","siteVersion":"0.0.0","pluginVersions":{"docusaurus-plugin-content-docs":{"type":"package","name":"@docusaurus/plugin-content-docs","version":"3.5.2"},"docusaurus-plugin-content-blog":{"type":"package","name":"@docusaurus/plugin-content-blog","version":"3.5.2"},"docusaurus-plugin-content-pages":{"type":"package","name":"@docusaurus/plugin-content-pages","version":"3.5.2"},"docusaurus-plugin-sitemap":{"type":"package","name":"@docusaurus/plugin-sitemap","version":"3.5.2"},"docusaurus-theme-classic":{"type":"package","name":"@docusaurus/theme-classic","version":"3.5.2"},"@easyops-cn/docusaurus-search-local":{"type":"package","name":"@easyops-cn/docusaurus-search-local","version":"0.44.5"},"docusaurus-theme-mermaid":{"type":"package","name":"@docusaurus/theme-mermaid","version":"3.5.2"}}}');var c=n(4848);const u={siteConfig:a.default,siteMetadata:l,globalData:o,i18n:i,codeTranslations:s},d=r.createContext(u);function p(e){let{children:t}=e;return(0,c.jsx)(d.Provider,{value:u,children:t})}},7489:(e,t,n)=>{"use strict";n.d(t,{A:()=>m});var r=n(6540),a=n(8193),o=n(5260),i=n(440),s=n(9504),l=n(3102),c=n(4848);function u(e){let{error:t,tryAgain:n}=e;return(0,c.jsxs)("div",{style:{display:"flex",flexDirection:"column",justifyContent:"center",alignItems:"flex-start",minHeight:"100vh",width:"100%",maxWidth:"80ch",fontSize:"20px",margin:"0 auto",padding:"1rem"},children:[(0,c.jsx)("h1",{style:{fontSize:"3rem"},children:"This page crashed"}),(0,c.jsx)("button",{type:"button",onClick:n,style:{margin:"1rem 0",fontSize:"2rem",cursor:"pointer",borderRadius:20,padding:"1rem"},children:"Try again"}),(0,c.jsx)(d,{error:t})]})}function d(e){let{error:t}=e;const n=(0,i.rA)(t).map((e=>e.message)).join("\n\nCause:\n");return(0,c.jsx)("p",{style:{whiteSpace:"pre-wrap"},children:n})}function p(e){let{children:t}=e;return(0,c.jsx)(l.W,{value:{plugin:{name:"docusaurus-core-error-boundary",id:"default"}},children:t})}function f(e){let{error:t,tryAgain:n}=e;return(0,c.jsx)(p,{children:(0,c.jsxs)(m,{fallback:()=>(0,c.jsx)(u,{error:t,tryAgain:n}),children:[(0,c.jsx)(o.A,{children:(0,c.jsx)("title",{children:"Page Error"})}),(0,c.jsx)(s.A,{children:(0,c.jsx)(u,{error:t,tryAgain:n})})]})})}const h=e=>(0,c.jsx)(f,{...e});class m extends r.Component{constructor(e){super(e),this.state={error:null}}componentDidCatch(e){a.A.canUseDOM&&this.setState({error:e})}render(){const{children:e}=this.props,{error:t}=this.state;if(t){const e={error:t,tryAgain:()=>this.setState({error:null})};return(this.props.fallback??h)(e)}return e??null}}},8193:(e,t,n)=>{"use strict";n.d(t,{A:()=>a});const r="undefined"!=typeof window&&"document"in window&&"createElement"in window.document,a={canUseDOM:r,canUseEventListeners:r&&("addEventListener"in window||"attachEvent"in window),canUseIntersectionObserver:r&&"IntersectionObserver"in window,canUseViewport:r&&"screen"in window}},5260:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});n(6540);var r=n(545),a=n(4848);function o(e){return(0,a.jsx)(r.mg,{...e})}},8774:(e,t,n)=>{"use strict";n.d(t,{A:()=>f});var r=n(6540),a=n(4625),o=n(440),i=n(4586),s=n(6654),l=n(8193),c=n(3427),u=n(6025),d=n(4848);function p(e,t){let{isNavLink:n,to:p,href:f,activeClassName:h,isActive:m,"data-noBrokenLinkCheck":g,autoAddBaseUrl:y=!0,...b}=e;const{siteConfig:v}=(0,i.A)(),{trailingSlash:k,baseUrl:w}=v,x=v.future.experimental_router,{withBaseUrl:S}=(0,u.hH)(),_=(0,c.A)(),E=(0,r.useRef)(null);(0,r.useImperativeHandle)(t,(()=>E.current));const T=p||f;const C=(0,s.A)(T),A=T?.replace("pathname://","");let L=void 0!==A?(P=A,y&&(e=>e.startsWith("/"))(P)?S(P):P):void 0;var P;"hash"===x&&L?.startsWith("./")&&(L=L?.slice(1)),L&&C&&(L=(0,o.Ks)(L,{trailingSlash:k,baseUrl:w}));const O=(0,r.useRef)(!1),N=n?a.k2:a.N_,j=l.A.canUseIntersectionObserver,R=(0,r.useRef)(),I=()=>{O.current||null==L||(window.docusaurus.preload(L),O.current=!0)};(0,r.useEffect)((()=>(!j&&C&&l.A.canUseDOM&&null!=L&&window.docusaurus.prefetch(L),()=>{j&&R.current&&R.current.disconnect()})),[R,L,j,C]);const F=L?.startsWith("#")??!1,D=!b.target||"_self"===b.target,M=!L||!C||!D||F&&"hash"!==x;g||!F&&M||_.collectLink(L),b.id&&_.collectAnchor(b.id);const z={};return M?(0,d.jsx)("a",{ref:E,href:L,...T&&!C&&{target:"_blank",rel:"noopener noreferrer"},...b,...z}):(0,d.jsx)(N,{...b,onMouseEnter:I,onTouchStart:I,innerRef:e=>{E.current=e,j&&e&&C&&(R.current=new window.IntersectionObserver((t=>{t.forEach((t=>{e===t.target&&(t.isIntersecting||t.intersectionRatio>0)&&(R.current.unobserve(e),R.current.disconnect(),null!=L&&window.docusaurus.prefetch(L))}))})),R.current.observe(e))},to:L,...n&&{isActive:m,activeClassName:h},...z})}const f=r.forwardRef(p)},1312:(e,t,n)=>{"use strict";n.d(t,{A:()=>c,T:()=>l});var r=n(6540),a=n(4848);function o(e,t){const n=e.split(/(\{\w+\})/).map(((e,n)=>{if(n%2==1){const n=t?.[e.slice(1,-1)];if(void 0!==n)return n}return e}));return n.some((e=>(0,r.isValidElement)(e)))?n.map(((e,t)=>(0,r.isValidElement)(e)?r.cloneElement(e,{key:t}):e)).filter((e=>""!==e)):n.join("")}var i=n(2654);function s(e){let{id:t,message:n}=e;if(void 0===t&&void 0===n)throw new Error("Docusaurus translation declarations must have at least a translation id or a default translation message");return i[t??n]??n??t}function l(e,t){let{message:n,id:r}=e;return o(s({message:n,id:r}),t)}function c(e){let{children:t,id:n,values:r}=e;if(t&&"string"!=typeof t)throw console.warn("Illegal children",t),new Error("The Docusaurus component only accept simple string values");const i=s({message:t,id:n});return(0,a.jsx)(a.Fragment,{children:o(i,r)})}},7065:(e,t,n)=>{"use strict";n.d(t,{W:()=>r});const r="default"},6654:(e,t,n)=>{"use strict";function r(e){return/^(?:\w*:|\/\/)/.test(e)}function a(e){return void 0!==e&&!r(e)}n.d(t,{A:()=>a,z:()=>r})},6025:(e,t,n)=>{"use strict";n.d(t,{Ay:()=>s,hH:()=>i});var r=n(6540),a=n(4586),o=n(6654);function i(){const{siteConfig:e}=(0,a.A)(),{baseUrl:t,url:n}=e,i=e.future.experimental_router,s=(0,r.useCallback)(((e,r)=>function(e){let{siteUrl:t,baseUrl:n,url:r,options:{forcePrependBaseUrl:a=!1,absolute:i=!1}={},router:s}=e;if(!r||r.startsWith("#")||(0,o.z)(r))return r;if("hash"===s)return r.startsWith("/")?`.${r}`:`./${r}`;if(a)return n+r.replace(/^\//,"");if(r===n.replace(/\/$/,""))return n;const l=r.startsWith(n)?r:n+r.replace(/^\//,"");return i?t+l:l}({siteUrl:n,baseUrl:t,url:e,options:r,router:i})),[n,t,i]);return{withBaseUrl:s}}function s(e,t){void 0===t&&(t={});const{withBaseUrl:n}=i();return n(e,t)}},3427:(e,t,n)=>{"use strict";n.d(t,{A:()=>i});var r=n(6540);n(4848);const a=r.createContext({collectAnchor:()=>{},collectLink:()=>{}}),o=()=>(0,r.useContext)(a);function i(){return o()}},4586:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(6540),a=n(6988);function o(){return(0,r.useContext)(a.o)}},2303:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(6540),a=n(6125);function o(){return(0,r.useContext)(a.o)}},205:(e,t,n)=>{"use strict";n.d(t,{A:()=>a});var r=n(6540);const a=n(8193).A.canUseDOM?r.useLayoutEffect:r.useEffect},6803:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(6540),a=n(3102);function o(){const e=r.useContext(a.o);if(!e)throw new Error("Unexpected: no Docusaurus route context found");return e}},6921:(e,t,n)=>{"use strict";n.d(t,{A:()=>a});const r=e=>"object"==typeof e&&!!e&&Object.keys(e).length>0;function a(e){const t={};return function e(n,a){Object.entries(n).forEach((n=>{let[o,i]=n;const s=a?`${a}.${o}`:o;r(i)?e(i,s):t[s]=i}))}(e),t}},3102:(e,t,n)=>{"use strict";n.d(t,{W:()=>i,o:()=>o});var r=n(6540),a=n(4848);const o=r.createContext(null);function i(e){let{children:t,value:n}=e;const i=r.useContext(o),s=(0,r.useMemo)((()=>function(e){let{parent:t,value:n}=e;if(!t){if(!n)throw new Error("Unexpected: no Docusaurus route context found");if(!("plugin"in n))throw new Error("Unexpected: Docusaurus topmost route context has no `plugin` attribute");return n}const r={...t.data,...n?.data};return{plugin:t.plugin,data:r}}({parent:i,value:n})),[i,n]);return(0,a.jsx)(o.Provider,{value:s,children:t})}},3886:(e,t,n)=>{"use strict";n.d(t,{VQ:()=>g,XK:()=>v,g1:()=>b});var r=n(6540),a=n(4070),o=n(7065),i=n(6342),s=n(679),l=n(9532),c=n(4848);const u=e=>`docs-preferred-version-${e}`,d={save:(e,t,n)=>{(0,s.Wf)(u(e),{persistence:t}).set(n)},read:(e,t)=>(0,s.Wf)(u(e),{persistence:t}).get(),clear:(e,t)=>{(0,s.Wf)(u(e),{persistence:t}).del()}},p=e=>Object.fromEntries(e.map((e=>[e,{preferredVersionName:null}])));const f=r.createContext(null);function h(){const e=(0,a.Gy)(),t=(0,i.p)().docs.versionPersistence,n=(0,r.useMemo)((()=>Object.keys(e)),[e]),[o,s]=(0,r.useState)((()=>p(n)));(0,r.useEffect)((()=>{s(function(e){let{pluginIds:t,versionPersistence:n,allDocsData:r}=e;function a(e){const t=d.read(e,n);return r[e].versions.some((e=>e.name===t))?{preferredVersionName:t}:(d.clear(e,n),{preferredVersionName:null})}return Object.fromEntries(t.map((e=>[e,a(e)])))}({allDocsData:e,versionPersistence:t,pluginIds:n}))}),[e,t,n]);return[o,(0,r.useMemo)((()=>({savePreferredVersion:function(e,n){d.save(e,t,n),s((t=>({...t,[e]:{preferredVersionName:n}})))}})),[t])]}function m(e){let{children:t}=e;const n=h();return(0,c.jsx)(f.Provider,{value:n,children:t})}function g(e){let{children:t}=e;return(0,c.jsx)(m,{children:t})}function y(){const e=(0,r.useContext)(f);if(!e)throw new l.dV("DocsPreferredVersionContextProvider");return e}function b(e){void 0===e&&(e=o.W);const t=(0,a.ht)(e),[n,i]=y(),{preferredVersionName:s}=n[e];return{preferredVersion:t.versions.find((e=>e.name===s))??null,savePreferredVersionName:(0,r.useCallback)((t=>{i.savePreferredVersion(e,t)}),[i,e])}}function v(){const e=(0,a.Gy)(),[t]=y();function n(n){const r=e[n],{preferredVersionName:a}=t[n];return r.versions.find((e=>e.name===a))??null}const r=Object.keys(e);return Object.fromEntries(r.map((e=>[e,n(e)])))}},2565:(e,t,n)=>{"use strict";n.d(t,{k:()=>o,v:()=>i});var r=n(4070),a=n(3886);function o(e,t){return`docs-${e}-${t}`}function i(){const e=(0,r.Gy)(),t=(0,r.gk)(),n=(0,a.XK)();return[...Object.keys(e).map((function(r){const a=t?.activePlugin.pluginId===r?t.activeVersion:void 0,i=n[r],s=e[r].versions.find((e=>e.isLast));return o(r,(a??i??s).name)}))]}},609:(e,t,n)=>{"use strict";n.d(t,{V:()=>l,t:()=>c});var r=n(6540),a=n(9532),o=n(4848);const i=Symbol("EmptyContext"),s=r.createContext(i);function l(e){let{children:t,name:n,items:a}=e;const i=(0,r.useMemo)((()=>n&&a?{name:n,items:a}:null),[n,a]);return(0,o.jsx)(s.Provider,{value:i,children:t})}function c(){const e=(0,r.useContext)(s);if(e===i)throw new a.dV("DocsSidebarProvider");return e}},6972:(e,t,n)=>{"use strict";n.d(t,{$S:()=>h,B5:()=>E,Nr:()=>f,OF:()=>w,QB:()=>_,Vd:()=>x,Y:()=>v,cC:()=>p,d1:()=>T,fW:()=>S,w8:()=>y});var r=n(6540),a=n(6347),o=n(2831),i=n(4070),s=n(9169),l=n(1682),c=n(3886),u=n(3025),d=n(609);function p(e){const t=(0,u.r)();if(!e)return;const n=t.docs[e];if(!n)throw new Error(`no version doc found by id=${e}`);return n}function f(e){return"link"!==e.type||e.unlisted?"category"===e.type?function(e){if(e.href&&!e.linkUnlisted)return e.href;for(const t of e.items){const e=f(t);if(e)return e}}(e):void 0:e.href}function h(){const{pathname:e}=(0,a.zy)(),t=(0,d.t)();if(!t)throw new Error("Unexpected: cant find current sidebar in context");const n=k({sidebarItems:t.items,pathname:e,onlyCategories:!0}).slice(-1)[0];if(!n)throw new Error(`${e} is not associated with a category. useCurrentSidebarCategory() should only be used on category index pages.`);return n}const m=(e,t)=>void 0!==e&&(0,s.ys)(e,t),g=(e,t)=>e.some((e=>y(e,t)));function y(e,t){return"link"===e.type?m(e.href,t):"category"===e.type&&(m(e.href,t)||g(e.items,t))}function b(e,t){switch(e.type){case"category":return y(e,t)||e.items.some((e=>b(e,t)));case"link":return!e.unlisted||y(e,t);default:return!0}}function v(e,t){return(0,r.useMemo)((()=>e.filter((e=>b(e,t)))),[e,t])}function k(e){let{sidebarItems:t,pathname:n,onlyCategories:r=!1}=e;const a=[];return function e(t){for(const o of t)if("category"===o.type&&((0,s.ys)(o.href,n)||e(o.items))||"link"===o.type&&(0,s.ys)(o.href,n)){return r&&"category"!==o.type||a.unshift(o),!0}return!1}(t),a}function w(){const e=(0,d.t)(),{pathname:t}=(0,a.zy)(),n=(0,i.vT)()?.pluginData.breadcrumbs;return!1!==n&&e?k({sidebarItems:e.items,pathname:t}):null}function x(e){const{activeVersion:t}=(0,i.zK)(e),{preferredVersion:n}=(0,c.g1)(e),a=(0,i.r7)(e);return(0,r.useMemo)((()=>(0,l.sb)([t,n,a].filter(Boolean))),[t,n,a])}function S(e,t){const n=x(t);return(0,r.useMemo)((()=>{const t=n.flatMap((e=>e.sidebars?Object.entries(e.sidebars):[])),r=t.find((t=>t[0]===e));if(!r)throw new Error(`Can't find any sidebar with id "${e}" in version${n.length>1?"s":""} ${n.map((e=>e.name)).join(", ")}".\nAvailable sidebar ids are:\n- ${t.map((e=>e[0])).join("\n- ")}`);return r[1]}),[e,n])}function _(e,t){const n=x(t);return(0,r.useMemo)((()=>{const t=n.flatMap((e=>e.docs)),r=t.find((t=>t.id===e));if(!r){if(n.flatMap((e=>e.draftIds)).includes(e))return null;throw new Error(`Couldn't find any doc with id "${e}" in version${n.length>1?"s":""} "${n.map((e=>e.name)).join(", ")}".\nAvailable doc ids are:\n- ${(0,l.sb)(t.map((e=>e.id))).join("\n- ")}`)}return r}),[e,n])}function E(e){let{route:t}=e;const n=(0,a.zy)(),r=(0,u.r)(),i=t.routes,s=i.find((e=>(0,a.B6)(n.pathname,e)));if(!s)return null;const l=s.sidebar,c=l?r.docsSidebars[l]:void 0;return{docElement:(0,o.v)(i),sidebarName:l,sidebarItems:c}}function T(e){return e.filter((e=>!("category"===e.type||"link"===e.type)||!!f(e)))}},3025:(e,t,n)=>{"use strict";n.d(t,{n:()=>s,r:()=>l});var r=n(6540),a=n(9532),o=n(4848);const i=r.createContext(null);function s(e){let{children:t,version:n}=e;return(0,o.jsx)(i.Provider,{value:n,children:t})}function l(){const e=(0,r.useContext)(i);if(null===e)throw new a.dV("DocsVersionProvider");return e}},4070:(e,t,n)=>{"use strict";n.d(t,{d1:()=>u.d1,zK:()=>k,vT:()=>g,gk:()=>y,Gy:()=>h,$S:()=>u.$S,HW:()=>w,vF:()=>p.v,ht:()=>m,g1:()=>d.g1,r7:()=>v,jh:()=>b});var r=n(6347),a=n(4586),o=n(7065);function i(e,t){void 0===t&&(t={});const n=function(){const{globalData:e}=(0,a.A)();return e}()[e];if(!n&&t.failfast)throw new Error(`Docusaurus plugin global data not found for "${e}" plugin.`);return n}const s=e=>e.versions.find((e=>e.isLast));function l(e,t){return[...e.versions].sort(((e,t)=>e.path===t.path?0:e.path.includes(t.path)?-1:t.path.includes(e.path)?1:0)).find((e=>!!(0,r.B6)(t,{path:e.path,exact:!1,strict:!1})))}function c(e,t){const n=l(e,t),a=n?.docs.find((e=>!!(0,r.B6)(t,{path:e.path,exact:!0,strict:!1})));return{activeVersion:n,activeDoc:a,alternateDocVersions:a?function(t){const n={};return e.versions.forEach((e=>{e.docs.forEach((r=>{r.id===t&&(n[e.name]=r)}))})),n}(a.id):{}}}var u=n(6972),d=n(3886),p=n(2565);const f={},h=()=>i("docusaurus-plugin-content-docs")??f,m=e=>{try{return function(e,t,n){void 0===t&&(t=o.W),void 0===n&&(n={});const r=i(e),a=r?.[t];if(!a&&n.failfast)throw new Error(`Docusaurus plugin global data not found for "${e}" plugin with id "${t}".`);return a}("docusaurus-plugin-content-docs",e,{failfast:!0})}catch(t){throw new Error("You are using a feature of the Docusaurus docs plugin, but this plugin does not seem to be enabled"+("Default"===e?"":` (pluginId=${e}`),{cause:t})}};function g(e){void 0===e&&(e={});const t=h(),{pathname:n}=(0,r.zy)();return function(e,t,n){void 0===n&&(n={});const a=Object.entries(e).sort(((e,t)=>t[1].path.localeCompare(e[1].path))).find((e=>{let[,n]=e;return!!(0,r.B6)(t,{path:n.path,exact:!1,strict:!1})})),o=a?{pluginId:a[0],pluginData:a[1]}:void 0;if(!o&&n.failfast)throw new Error(`Can't find active docs plugin for "${t}" pathname, while it was expected to be found. Maybe you tried to use a docs feature that can only be used on a docs-related page? Existing docs plugin paths are: ${Object.values(e).map((e=>e.path)).join(", ")}`);return o}(t,n,e)}function y(e){void 0===e&&(e={});const t=g(e),{pathname:n}=(0,r.zy)();if(!t)return;return{activePlugin:t,activeVersion:l(t.pluginData,n)}}function b(e){return m(e).versions}function v(e){const t=m(e);return s(t)}function k(e){const t=m(e),{pathname:n}=(0,r.zy)();return c(t,n)}function w(e){const t=m(e),{pathname:n}=(0,r.zy)();return function(e,t){const n=s(e);return{latestDocSuggestion:c(e,t).alternateDocVersions[n.name],latestVersionSuggestion:n}}(t,n)}},6294:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>o});var r=n(5947),a=n.n(r);a().configure({showSpinner:!1});const o={onRouteUpdate(e){let{location:t,previousLocation:n}=e;if(n&&t.pathname!==n.pathname){const e=window.setTimeout((()=>{a().start()}),200);return()=>window.clearTimeout(e)}},onRouteDidUpdate(){a().done()}}},6134:(e,t,n)=>{"use strict";var r=n(1765),a=n(4784);!function(e){const{themeConfig:{prism:t}}=a.default,{additionalLanguages:r}=t;globalThis.Prism=e,r.forEach((e=>{"php"===e&&n(9700),n(793)(`./prism-${e}`)})),delete globalThis.Prism}(r.My)},1107:(e,t,n)=>{"use strict";n.d(t,{A:()=>u});n(6540);var r=n(4164),a=n(1312),o=n(6342),i=n(8774),s=n(3427);const l={anchorWithStickyNavbar:"anchorWithStickyNavbar_LWe7",anchorWithHideOnScrollNavbar:"anchorWithHideOnScrollNavbar_WYt5"};var c=n(4848);function u(e){let{as:t,id:n,...u}=e;const d=(0,s.A)(),{navbar:{hideOnScroll:p}}=(0,o.p)();if("h1"===t||!n)return(0,c.jsx)(t,{...u,id:void 0});d.collectAnchor(n);const f=(0,a.T)({id:"theme.common.headingLinkTitle",message:"Direct link to {heading}",description:"Title for link to heading"},{heading:"string"==typeof u.children?u.children:n});return(0,c.jsxs)(t,{...u,className:(0,r.A)("anchor",p?l.anchorWithHideOnScrollNavbar:l.anchorWithStickyNavbar,u.className),id:n,children:[u.children,(0,c.jsx)(i.A,{className:"hash-link",to:`#${n}`,"aria-label":f,title:f,children:"\u200b"})]})}},3186:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});n(6540);const r={iconExternalLink:"iconExternalLink_nPIU"};var a=n(4848);function o(e){let{width:t=13.5,height:n=13.5}=e;return(0,a.jsx)("svg",{width:t,height:n,"aria-hidden":"true",viewBox:"0 0 24 24",className:r.iconExternalLink,children:(0,a.jsx)("path",{fill:"currentColor",d:"M21 13v10h-21v-19h12v2h-10v15h17v-8h2zm3-12h-10.988l4.035 4-6.977 7.07 2.828 2.828 6.977-7.07 4.125 4.172v-11z"})})}},9504:(e,t,n)=>{"use strict";n.d(t,{A:()=>Nt});var r=n(6540),a=n(4164),o=n(7489),i=n(1213),s=n(6347),l=n(1312),c=n(5062),u=n(4848);const d="__docusaurus_skipToContent_fallback";function p(e){e.setAttribute("tabindex","-1"),e.focus(),e.removeAttribute("tabindex")}function f(){const e=(0,r.useRef)(null),{action:t}=(0,s.W6)(),n=(0,r.useCallback)((e=>{e.preventDefault();const t=document.querySelector("main:first-of-type")??document.getElementById(d);t&&p(t)}),[]);return(0,c.$)((n=>{let{location:r}=n;e.current&&!r.hash&&"PUSH"===t&&p(e.current)})),{containerRef:e,onClick:n}}const h=(0,l.T)({id:"theme.common.skipToMainContent",description:"The skip to content label used for accessibility, allowing to rapidly navigate to main content with keyboard tab/enter navigation",message:"Skip to main content"});function m(e){const t=e.children??h,{containerRef:n,onClick:r}=f();return(0,u.jsx)("div",{ref:n,role:"region","aria-label":h,children:(0,u.jsx)("a",{...e,href:`#${d}`,onClick:r,children:t})})}var g=n(7559),y=n(4090);const b={skipToContent:"skipToContent_fXgn"};function v(){return(0,u.jsx)(m,{className:b.skipToContent})}var k=n(6342),w=n(5041);function x(e){let{width:t=21,height:n=21,color:r="currentColor",strokeWidth:a=1.2,className:o,...i}=e;return(0,u.jsx)("svg",{viewBox:"0 0 15 15",width:t,height:n,...i,children:(0,u.jsx)("g",{stroke:r,strokeWidth:a,children:(0,u.jsx)("path",{d:"M.75.75l13.5 13.5M14.25.75L.75 14.25"})})})}const S={closeButton:"closeButton_CVFx"};function _(e){return(0,u.jsx)("button",{type:"button","aria-label":(0,l.T)({id:"theme.AnnouncementBar.closeButtonAriaLabel",message:"Close",description:"The ARIA label for close button of announcement bar"}),...e,className:(0,a.A)("clean-btn close",S.closeButton,e.className),children:(0,u.jsx)(x,{width:14,height:14,strokeWidth:3.1})})}const E={content:"content_knG7"};function T(e){const{announcementBar:t}=(0,k.p)(),{content:n}=t;return(0,u.jsx)("div",{...e,className:(0,a.A)(E.content,e.className),dangerouslySetInnerHTML:{__html:n}})}const C={announcementBar:"announcementBar_mb4j",announcementBarPlaceholder:"announcementBarPlaceholder_vyr4",announcementBarClose:"announcementBarClose_gvF7",announcementBarContent:"announcementBarContent_xLdY"};function A(){const{announcementBar:e}=(0,k.p)(),{isActive:t,close:n}=(0,w.M)();if(!t)return null;const{backgroundColor:r,textColor:a,isCloseable:o}=e;return(0,u.jsxs)("div",{className:C.announcementBar,style:{backgroundColor:r,color:a},role:"banner",children:[o&&(0,u.jsx)("div",{className:C.announcementBarPlaceholder}),(0,u.jsx)(T,{className:C.announcementBarContent}),o&&(0,u.jsx)(_,{onClick:n,className:C.announcementBarClose})]})}var L=n(9876),P=n(3104);var O=n(9532),N=n(5600);const j=r.createContext(null);function R(e){let{children:t}=e;const n=function(){const e=(0,L.M)(),t=(0,N.YL)(),[n,a]=(0,r.useState)(!1),o=null!==t.component,i=(0,O.ZC)(o);return(0,r.useEffect)((()=>{o&&!i&&a(!0)}),[o,i]),(0,r.useEffect)((()=>{o?e.shown||a(!0):a(!1)}),[e.shown,o]),(0,r.useMemo)((()=>[n,a]),[n])}();return(0,u.jsx)(j.Provider,{value:n,children:t})}function I(e){if(e.component){const t=e.component;return(0,u.jsx)(t,{...e.props})}}function F(){const e=(0,r.useContext)(j);if(!e)throw new O.dV("NavbarSecondaryMenuDisplayProvider");const[t,n]=e,a=(0,r.useCallback)((()=>n(!1)),[n]),o=(0,N.YL)();return(0,r.useMemo)((()=>({shown:t,hide:a,content:I(o)})),[a,o,t])}function D(e){let{header:t,primaryMenu:n,secondaryMenu:r}=e;const{shown:o}=F();return(0,u.jsxs)("div",{className:"navbar-sidebar",children:[t,(0,u.jsxs)("div",{className:(0,a.A)("navbar-sidebar__items",{"navbar-sidebar__items--show-secondary":o}),children:[(0,u.jsx)("div",{className:"navbar-sidebar__item menu",children:n}),(0,u.jsx)("div",{className:"navbar-sidebar__item menu",children:r})]})]})}var M=n(5293),z=n(2303);function B(e){return(0,u.jsx)("svg",{viewBox:"0 0 24 24",width:24,height:24,...e,children:(0,u.jsx)("path",{fill:"currentColor",d:"M12,9c1.65,0,3,1.35,3,3s-1.35,3-3,3s-3-1.35-3-3S10.35,9,12,9 M12,7c-2.76,0-5,2.24-5,5s2.24,5,5,5s5-2.24,5-5 S14.76,7,12,7L12,7z M2,13l2,0c0.55,0,1-0.45,1-1s-0.45-1-1-1l-2,0c-0.55,0-1,0.45-1,1S1.45,13,2,13z M20,13l2,0c0.55,0,1-0.45,1-1 s-0.45-1-1-1l-2,0c-0.55,0-1,0.45-1,1S19.45,13,20,13z M11,2v2c0,0.55,0.45,1,1,1s1-0.45,1-1V2c0-0.55-0.45-1-1-1S11,1.45,11,2z M11,20v2c0,0.55,0.45,1,1,1s1-0.45,1-1v-2c0-0.55-0.45-1-1-1C11.45,19,11,19.45,11,20z M5.99,4.58c-0.39-0.39-1.03-0.39-1.41,0 c-0.39,0.39-0.39,1.03,0,1.41l1.06,1.06c0.39,0.39,1.03,0.39,1.41,0s0.39-1.03,0-1.41L5.99,4.58z M18.36,16.95 c-0.39-0.39-1.03-0.39-1.41,0c-0.39,0.39-0.39,1.03,0,1.41l1.06,1.06c0.39,0.39,1.03,0.39,1.41,0c0.39-0.39,0.39-1.03,0-1.41 L18.36,16.95z M19.42,5.99c0.39-0.39,0.39-1.03,0-1.41c-0.39-0.39-1.03-0.39-1.41,0l-1.06,1.06c-0.39,0.39-0.39,1.03,0,1.41 s1.03,0.39,1.41,0L19.42,5.99z M7.05,18.36c0.39-0.39,0.39-1.03,0-1.41c-0.39-0.39-1.03-0.39-1.41,0l-1.06,1.06 c-0.39,0.39-0.39,1.03,0,1.41s1.03,0.39,1.41,0L7.05,18.36z"})})}function W(e){return(0,u.jsx)("svg",{viewBox:"0 0 24 24",width:24,height:24,...e,children:(0,u.jsx)("path",{fill:"currentColor",d:"M9.37,5.51C9.19,6.15,9.1,6.82,9.1,7.5c0,4.08,3.32,7.4,7.4,7.4c0.68,0,1.35-0.09,1.99-0.27C17.45,17.19,14.93,19,12,19 c-3.86,0-7-3.14-7-7C5,9.07,6.81,6.55,9.37,5.51z M12,3c-4.97,0-9,4.03-9,9s4.03,9,9,9s9-4.03,9-9c0-0.46-0.04-0.92-0.1-1.36 c-0.98,1.37-2.58,2.26-4.4,2.26c-2.98,0-5.4-2.42-5.4-5.4c0-1.81,0.89-3.42,2.26-4.4C12.92,3.04,12.46,3,12,3L12,3z"})})}const $={toggle:"toggle_vylO",toggleButton:"toggleButton_gllP",darkToggleIcon:"darkToggleIcon_wfgR",lightToggleIcon:"lightToggleIcon_pyhR",toggleButtonDisabled:"toggleButtonDisabled_aARS"};function U(e){let{className:t,buttonClassName:n,value:r,onChange:o}=e;const i=(0,z.A)(),s=(0,l.T)({message:"Switch between dark and light mode (currently {mode})",id:"theme.colorToggle.ariaLabel",description:"The ARIA label for the navbar color mode toggle"},{mode:"dark"===r?(0,l.T)({message:"dark mode",id:"theme.colorToggle.ariaLabel.mode.dark",description:"The name for the dark color mode"}):(0,l.T)({message:"light mode",id:"theme.colorToggle.ariaLabel.mode.light",description:"The name for the light color mode"})});return(0,u.jsx)("div",{className:(0,a.A)($.toggle,t),children:(0,u.jsxs)("button",{className:(0,a.A)("clean-btn",$.toggleButton,!i&&$.toggleButtonDisabled,n),type:"button",onClick:()=>o("dark"===r?"light":"dark"),disabled:!i,title:s,"aria-label":s,"aria-live":"polite",children:[(0,u.jsx)(B,{className:(0,a.A)($.toggleIcon,$.lightToggleIcon)}),(0,u.jsx)(W,{className:(0,a.A)($.toggleIcon,$.darkToggleIcon)})]})})}const H=r.memo(U),Q={darkNavbarColorModeToggle:"darkNavbarColorModeToggle_X3D1"};function V(e){let{className:t}=e;const n=(0,k.p)().navbar.style,r=(0,k.p)().colorMode.disableSwitch,{colorMode:a,setColorMode:o}=(0,M.G)();return r?null:(0,u.jsx)(H,{className:t,buttonClassName:"dark"===n?Q.darkNavbarColorModeToggle:void 0,value:a,onChange:o})}var q=n(3465);function G(){return(0,u.jsx)(q.A,{className:"navbar__brand",imageClassName:"navbar__logo",titleClassName:"navbar__title text--truncate"})}function K(){const e=(0,L.M)();return(0,u.jsx)("button",{type:"button","aria-label":(0,l.T)({id:"theme.docs.sidebar.closeSidebarButtonAriaLabel",message:"Close navigation bar",description:"The ARIA label for close button of mobile sidebar"}),className:"clean-btn navbar-sidebar__close",onClick:()=>e.toggle(),children:(0,u.jsx)(x,{color:"var(--ifm-color-emphasis-600)"})})}function Y(){return(0,u.jsxs)("div",{className:"navbar-sidebar__brand",children:[(0,u.jsx)(G,{}),(0,u.jsx)(V,{className:"margin-right--md"}),(0,u.jsx)(K,{})]})}var Z=n(8774),X=n(6025),J=n(6654);function ee(e,t){return void 0!==e&&void 0!==t&&new RegExp(e,"gi").test(t)}var te=n(3186);function ne(e){let{activeBasePath:t,activeBaseRegex:n,to:r,href:a,label:o,html:i,isDropdownLink:s,prependBaseUrlToHref:l,...c}=e;const d=(0,X.Ay)(r),p=(0,X.Ay)(t),f=(0,X.Ay)(a,{forcePrependBaseUrl:!0}),h=o&&a&&!(0,J.A)(a),m=i?{dangerouslySetInnerHTML:{__html:i}}:{children:(0,u.jsxs)(u.Fragment,{children:[o,h&&(0,u.jsx)(te.A,{...s&&{width:12,height:12}})]})};return a?(0,u.jsx)(Z.A,{href:l?f:a,...c,...m}):(0,u.jsx)(Z.A,{to:d,isNavLink:!0,...(t||n)&&{isActive:(e,t)=>n?ee(n,t.pathname):t.pathname.startsWith(p)},...c,...m})}function re(e){let{className:t,isDropdownItem:n=!1,...r}=e;const o=(0,u.jsx)(ne,{className:(0,a.A)(n?"dropdown__link":"navbar__item navbar__link",t),isDropdownLink:n,...r});return n?(0,u.jsx)("li",{children:o}):o}function ae(e){let{className:t,isDropdownItem:n,...r}=e;return(0,u.jsx)("li",{className:"menu__list-item",children:(0,u.jsx)(ne,{className:(0,a.A)("menu__link",t),...r})})}function oe(e){let{mobile:t=!1,position:n,...r}=e;const a=t?ae:re;return(0,u.jsx)(a,{...r,activeClassName:r.activeClassName??(t?"menu__link--active":"navbar__link--active")})}var ie=n(1422),se=n(9169),le=n(4586);const ce={dropdownNavbarItemMobile:"dropdownNavbarItemMobile_S0Fm"};function ue(e,t){return e.some((e=>function(e,t){return!!(0,se.ys)(e.to,t)||!!ee(e.activeBaseRegex,t)||!(!e.activeBasePath||!t.startsWith(e.activeBasePath))}(e,t)))}function de(e){let{items:t,position:n,className:o,onClick:i,...s}=e;const l=(0,r.useRef)(null),[c,d]=(0,r.useState)(!1);return(0,r.useEffect)((()=>{const e=e=>{l.current&&!l.current.contains(e.target)&&d(!1)};return document.addEventListener("mousedown",e),document.addEventListener("touchstart",e),document.addEventListener("focusin",e),()=>{document.removeEventListener("mousedown",e),document.removeEventListener("touchstart",e),document.removeEventListener("focusin",e)}}),[l]),(0,u.jsxs)("div",{ref:l,className:(0,a.A)("navbar__item","dropdown","dropdown--hoverable",{"dropdown--right":"right"===n,"dropdown--show":c}),children:[(0,u.jsx)(ne,{"aria-haspopup":"true","aria-expanded":c,role:"button",href:s.to?void 0:"#",className:(0,a.A)("navbar__link",o),...s,onClick:s.to?void 0:e=>e.preventDefault(),onKeyDown:e=>{"Enter"===e.key&&(e.preventDefault(),d(!c))},children:s.children??s.label}),(0,u.jsx)("ul",{className:"dropdown__menu",children:t.map(((e,t)=>(0,r.createElement)(Ge,{isDropdownItem:!0,activeClassName:"dropdown__link--active",...e,key:t})))})]})}function pe(e){let{items:t,className:n,position:o,onClick:i,...l}=e;const c=function(){const{siteConfig:{baseUrl:e}}=(0,le.A)(),{pathname:t}=(0,s.zy)();return t.replace(e,"/")}(),d=ue(t,c),{collapsed:p,toggleCollapsed:f,setCollapsed:h}=(0,ie.u)({initialState:()=>!d});return(0,r.useEffect)((()=>{d&&h(!d)}),[c,d,h]),(0,u.jsxs)("li",{className:(0,a.A)("menu__list-item",{"menu__list-item--collapsed":p}),children:[(0,u.jsx)(ne,{role:"button",className:(0,a.A)(ce.dropdownNavbarItemMobile,"menu__link menu__link--sublist menu__link--sublist-caret",n),...l,onClick:e=>{e.preventDefault(),f()},children:l.children??l.label}),(0,u.jsx)(ie.N,{lazy:!0,as:"ul",className:"menu__list",collapsed:p,children:t.map(((e,t)=>(0,r.createElement)(Ge,{mobile:!0,isDropdownItem:!0,onClick:i,activeClassName:"menu__link--active",...e,key:t})))})]})}function fe(e){let{mobile:t=!1,...n}=e;const r=t?pe:de;return(0,u.jsx)(r,{...n})}var he=n(2131);function me(e){let{width:t=20,height:n=20,...r}=e;return(0,u.jsx)("svg",{viewBox:"0 0 24 24",width:t,height:n,"aria-hidden":!0,...r,children:(0,u.jsx)("path",{fill:"currentColor",d:"M12.87 15.07l-2.54-2.51.03-.03c1.74-1.94 2.98-4.17 3.71-6.53H17V4h-7V2H8v2H1v1.99h11.17C11.5 7.92 10.44 9.75 9 11.35 8.07 10.32 7.3 9.19 6.69 8h-2c.73 1.63 1.73 3.17 2.98 4.56l-5.09 5.02L4 19l5-5 3.11 3.11.76-2.04zM18.5 10h-2L12 22h2l1.12-3h4.75L21 22h2l-4.5-12zm-2.62 7l1.62-4.33L19.12 17h-3.24z"})})}const ge="iconLanguage_nlXk";var ye=n(1088),be=n(5391);var ve=n(4070),ke=n(5891),we=n(2384),xe=n(9913),Se=n(4471),_e=n(7674),Ee=n(6841),Te=n(3810);const Ce='',Ae='',Le='',Pe='',Oe='',Ne='',je='',Re={searchBar:"searchBar_RVTs",dropdownMenu:"dropdownMenu_qbY6",searchBarLeft:"searchBarLeft_MXDe",suggestion:"suggestion_fB_2",cursor:"cursor_eG29",hitTree:"hitTree_kk6K",hitIcon:"hitIcon_a7Zy",hitPath:"hitPath_ieM4",noResultsIcon:"noResultsIcon_EBY5",hitFooter:"hitFooter_E9YW",hitWrapper:"hitWrapper_sAK8",hitTitle:"hitTitle_vyVt",hitAction:"hitAction_NqkB",hideAction:"hideAction_vcyE",noResults:"noResults_l6Q3",searchBarContainer:"searchBarContainer_NW3z",searchBarLoadingRing:"searchBarLoadingRing_YnHq",searchClearButton:"searchClearButton_qk4g",searchIndexLoading:"searchIndexLoading_EJ1f",searchHintContainer:"searchHintContainer_Pkmr",searchHint:"searchHint_iIMx",focused:"focused_OWtg",input:"input_FOTf",hint:"hint_URu1",suggestions:"suggestions_X8XU",dataset:"dataset_QiCy",empty:"empty_eITn"};function Ie(e){let{document:t,type:n,page:r,metadata:a,tokens:o,isInterOfTree:i,isLastOfTree:s}=e;const l=n===xe.i.Title,c=n===xe.i.Keywords,u=l||c,d=n===xe.i.Heading,p=[];i?p.push(Ne):s&&p.push(je);const f=p.map((e=>`${e}`)),h=`${u?Ce:d?Ae:Le}`,m=[`${c?(0,Ee.Z)(t.s,o):(0,Te.C)(t.t,(0,_e.g)(a,"t"),o)}`];if(!i&&!s&&ye.tb){const e=r?r.b?.concat(r.t).concat(t.s&&t.s!==r.t?t.s:[]):t.b;m.push(`${(0,Se.$)(e??[])}`)}else u||m.push(`${(0,Ee.Z)(r.t||(t.u.startsWith("/docs/api-reference/")?"API Reference":""),o)}`);const g=`${Pe}`;return[...f,h,``,...m,"",g].join("")}function Fe(){return`${Oe}${(0,l.T)({id:"theme.SearchBar.noResultsText",message:"No results"})}`}var De=n(2849),Me=n(3385);async function ze(){const e=await Promise.all([n.e(489),n.e(5741)]).then(n.t.bind(n,489,23)),t=e.default;return t.noConflict?t.noConflict():e.noConflict&&e.noConflict(),t}const Be="_highlight";const We=function(e){let{handleSearchBarToggle:t}=e;const a=(0,z.A)(),{siteConfig:{baseUrl:o},i18n:{currentLocale:i}}=(0,le.A)(),c=(0,ve.vT)();let d=o;try{const{preferredVersion:e}=function(){return n(4070).g1(...arguments)}(c?.pluginId??ye.UB);e&&!e.isLast&&(d=e.path+"/")}catch(M){if(ye.I$&&!(M instanceof O.dV))throw M}const p=(0,s.W6)(),f=(0,s.zy)(),h=(0,r.useRef)(null),m=(0,r.useRef)(new Map),g=(0,r.useRef)(!1),[y,b]=(0,r.useState)(!1),[v,k]=(0,r.useState)(!1),[w,x]=(0,r.useState)(""),S=(0,r.useRef)(null),_=(0,r.useRef)(""),[E,T]=(0,r.useState)("");(0,r.useEffect)((()=>{if(!Array.isArray(ye.Hg))return;let e="";if(f.pathname.startsWith(d)){const t=f.pathname.substring(d.length);let n;for(const e of ye.Hg){const r="string"==typeof e?e:e.path;if(t===r||t.startsWith(`${r}/`)){n=r;break}}n&&(e=n)}_.current!==e&&(m.current.delete(e),_.current=e),T(e)}),[f.pathname,d]);const C=!!ye.O6&&Array.isArray(ye.Hg)&&""===E,A=(0,r.useCallback)((async()=>{if(C||m.current.get(E))return;m.current.set(E,"loading"),S.current?.autocomplete.destroy(),b(!0);const[{wrappedIndexes:e,zhDictionary:t},n]=await Promise.all([(0,ke.Z)(d,E),ze()]);if(S.current=n(h.current,{hint:!1,autoselect:!0,openOnFocus:!0,cssClasses:{root:(0,be.A)(Re.searchBar,{[Re.searchBarLeft]:"left"===ye.ZG}),noPrefix:!0,dropdownMenu:Re.dropdownMenu,input:Re.input,hint:Re.hint,suggestions:Re.suggestions,suggestion:Re.suggestion,cursor:Re.cursor,dataset:Re.dataset,empty:Re.empty}},[{source:(0,we.m)(e,t,ye.AT),templates:{suggestion:Ie,empty:Fe,footer:e=>{let{query:t,isEmpty:n}=e;if(n&&(!E||!ye.dz))return;const r=(e=>{let{query:t,isEmpty:n}=e;const r=document.createElement("a"),a=new URLSearchParams;let s;if(a.set("q",t),E){const e=E&&Array.isArray(ye.Hg)?ye.Hg.find((e=>"string"==typeof e?e===E:e.path===E)):E,t=e?(0,Me.p)(e,i).label:E;s=ye.dz&&n?(0,l.T)({id:"theme.SearchBar.seeAllOutsideContext",message:'See all results outside "{context}"'},{context:t}):(0,l.T)({id:"theme.SearchBar.searchInContext",message:'See all results within "{context}"'},{context:t})}else s=(0,l.T)({id:"theme.SearchBar.seeAll",message:"See all results"});if(!E||!Array.isArray(ye.Hg)||ye.dz&&n||a.set("ctx",E),d!==o){if(!d.startsWith(o))throw new Error(`Version url '${d}' does not start with base url '${o}', this is a bug of \`@easyops-cn/docusaurus-search-local\`, please report it.`);a.set("version",d.substring(o.length))}const c=`${o}search/?${a.toString()}`;return r.href=c,r.textContent=s,r.addEventListener("click",(e=>{e.ctrlKey||e.metaKey||(e.preventDefault(),S.current?.autocomplete.close(),p.push(c))})),r})({query:t,isEmpty:n}),a=document.createElement("div");return a.className=Re.hitFooter,a.appendChild(r),a}}}]).on("autocomplete:selected",(function(e,t){let{document:{u:n,h:r},tokens:a}=t;h.current?.blur();let o=n;if(ye.CU&&a.length>0){const e=new URLSearchParams;for(const t of a)e.append(Be,t);o+=`?${e.toString()}`}r&&(o+=r),p.push(o)})).on("autocomplete:closed",(()=>{h.current?.blur()})),m.current.set(E,"done"),b(!1),g.current){const e=h.current;e.value&&S.current?.autocomplete.open(),e.focus()}}),[C,E,d,o,p]);(0,r.useEffect)((()=>{if(!ye.CU)return;const e=a?new URLSearchParams(f.search).getAll(Be):[];setTimeout((()=>{const t=document.querySelector("article");if(!t)return;const n=new ye.CU(t);n.unmark(),0!==e.length&&n.mark(e),x(e.join(" ")),S.current?.autocomplete.setVal(e.join(" "))}))}),[a,f.search,f.pathname]);const[L,P]=(0,r.useState)(!1),N=(0,r.useCallback)((()=>{g.current=!0,A(),P(!0),t?.(!0)}),[t,A]),j=(0,r.useCallback)((()=>{P(!1),t?.(!1)}),[t]),R=(0,r.useCallback)((()=>{A()}),[A]),I=(0,r.useCallback)((e=>{x(e.target.value),e.target.value&&k(!0)}),[]),F=!!a&&/mac/i.test(navigator.userAgentData?.platform??navigator.platform);(0,r.useEffect)((()=>{if(!ye.WW)return;const e=e=>{!(F?e.metaKey:e.ctrlKey)||"k"!==e.key&&"K"!==e.key||(e.preventDefault(),h.current?.focus(),N())};return document.addEventListener("keydown",e),()=>{document.removeEventListener("keydown",e)}}),[F,N]);const D=(0,r.useCallback)((()=>{const e=new URLSearchParams(f.search);e.delete(Be);const t=e.toString(),n=f.pathname+(""!=t?`?${t}`:"")+f.hash;n!=f.pathname+f.search+f.hash&&p.push(n),x(""),S.current?.autocomplete.setVal("")}),[f.pathname,f.search,f.hash,p]);return(0,u.jsxs)("div",{className:(0,be.A)("navbar__search",Re.searchBarContainer,{[Re.searchIndexLoading]:y&&v,[Re.focused]:L}),hidden:C,dir:"ltr",children:[(0,u.jsx)("input",{placeholder:(0,l.T)({id:"theme.SearchBar.label",message:"Search",description:"The ARIA label and placeholder for search button"}),"aria-label":"Search",className:"navbar__search-input",onMouseEnter:R,onFocus:N,onBlur:j,onChange:I,ref:h,value:w}),(0,u.jsx)(De.A,{className:Re.searchBarLoadingRing}),ye.WW&&ye.pk&&(""!==w?(0,u.jsx)("button",{className:Re.searchClearButton,onClick:D,children:"\u2715"}):a&&(0,u.jsxs)("div",{className:Re.searchHintContainer,children:[(0,u.jsx)("kbd",{className:Re.searchHint,children:F?"\u2318":"ctrl"}),(0,u.jsx)("kbd",{className:Re.searchHint,children:"K"})]}))]})},$e={navbarSearchContainer:"navbarSearchContainer_Bca1"};function Ue(e){let{children:t,className:n}=e;return(0,u.jsx)("div",{className:(0,a.A)(n,$e.navbarSearchContainer),children:t})}var He=n(6972);var Qe=n(3886);function Ve(e,t){return t.alternateDocVersions[e.name]??function(e){return e.docs.find((t=>t.id===e.mainDocId))}(e)}const qe={default:oe,localeDropdown:function(e){let{mobile:t,dropdownItemsBefore:n,dropdownItemsAfter:r,queryString:a="",...o}=e;const{i18n:{currentLocale:i,locales:c,localeConfigs:d}}=(0,le.A)(),p=(0,he.o)(),{search:f,hash:h}=(0,s.zy)(),m=[...n,...c.map((e=>{const n=`${`pathname://${p.createUrl({locale:e,fullyQualified:!1})}`}${f}${h}${a}`;return{label:d[e].label,lang:d[e].htmlLang,to:n,target:"_self",autoAddBaseUrl:!1,className:e===i?t?"menu__link--active":"dropdown__link--active":""}})),...r],g=t?(0,l.T)({message:"Languages",id:"theme.navbar.mobileLanguageDropdown.label",description:"The label for the mobile language switcher dropdown"}):d[i].label;return(0,u.jsx)(fe,{...o,mobile:t,label:(0,u.jsxs)(u.Fragment,{children:[(0,u.jsx)(me,{className:ge}),g]}),items:m})},search:function(e){let{mobile:t,className:n}=e;return t?null:(0,u.jsx)(Ue,{className:n,children:(0,u.jsx)(We,{})})},dropdown:fe,html:function(e){let{value:t,className:n,mobile:r=!1,isDropdownItem:o=!1}=e;const i=o?"li":"div";return(0,u.jsx)(i,{className:(0,a.A)({navbar__item:!r&&!o,"menu__list-item":r},n),dangerouslySetInnerHTML:{__html:t}})},doc:function(e){let{docId:t,label:n,docsPluginId:r,...a}=e;const{activeDoc:o}=(0,ve.zK)(r),i=(0,He.QB)(t,r),s=o?.path===i?.path;return null===i||i.unlisted&&!s?null:(0,u.jsx)(oe,{exact:!0,...a,isActive:()=>s||!!o?.sidebar&&o.sidebar===i.sidebar,label:n??i.id,to:i.path})},docSidebar:function(e){let{sidebarId:t,label:n,docsPluginId:r,...a}=e;const{activeDoc:o}=(0,ve.zK)(r),i=(0,He.fW)(t,r).link;if(!i)throw new Error(`DocSidebarNavbarItem: Sidebar with ID "${t}" doesn't have anything to be linked to.`);return(0,u.jsx)(oe,{exact:!0,...a,isActive:()=>o?.sidebar===t,label:n??i.label,to:i.path})},docsVersion:function(e){let{label:t,to:n,docsPluginId:r,...a}=e;const o=(0,He.Vd)(r)[0],i=t??o.label,s=n??(e=>e.docs.find((t=>t.id===e.mainDocId)))(o).path;return(0,u.jsx)(oe,{...a,label:i,to:s})},docsVersionDropdown:function(e){let{mobile:t,docsPluginId:n,dropdownActiveClassDisabled:r,dropdownItemsBefore:a,dropdownItemsAfter:o,...i}=e;const{search:c,hash:d}=(0,s.zy)(),p=(0,ve.zK)(n),f=(0,ve.jh)(n),{savePreferredVersionName:h}=(0,Qe.g1)(n),m=[...a,...f.map((function(e){const t=Ve(e,p);return{label:e.label,to:`${t.path}${c}${d}`,isActive:()=>e===p.activeVersion,onClick:()=>h(e.name)}})),...o],g=(0,He.Vd)(n)[0],y=t&&m.length>1?(0,l.T)({id:"theme.navbar.mobileVersionsDropdown.label",message:"Versions",description:"The label for the navbar versions dropdown on mobile view"}):g.label,b=t&&m.length>1?void 0:Ve(g,p).path;return m.length<=1?(0,u.jsx)(oe,{...i,mobile:t,label:y,to:b,isActive:r?()=>!1:void 0}):(0,u.jsx)(fe,{...i,mobile:t,label:y,to:b,items:m,isActive:r?()=>!1:void 0})}};function Ge(e){let{type:t,...n}=e;const r=function(e,t){return e&&"default"!==e?e:"items"in t?"dropdown":"default"}(t,n),a=qe[r];if(!a)throw new Error(`No NavbarItem component found for type "${t}".`);return(0,u.jsx)(a,{...n})}function Ke(){const e=(0,L.M)(),t=(0,k.p)().navbar.items;return(0,u.jsx)("ul",{className:"menu__list",children:t.map(((t,n)=>(0,r.createElement)(Ge,{mobile:!0,...t,onClick:()=>e.toggle(),key:n})))})}function Ye(e){return(0,u.jsx)("button",{...e,type:"button",className:"clean-btn navbar-sidebar__back",children:(0,u.jsx)(l.A,{id:"theme.navbar.mobileSidebarSecondaryMenu.backButtonLabel",description:"The label of the back button to return to main menu, inside the mobile navbar sidebar secondary menu (notably used to display the docs sidebar)",children:"\u2190 Back to main menu"})})}function Ze(){const e=0===(0,k.p)().navbar.items.length,t=F();return(0,u.jsxs)(u.Fragment,{children:[!e&&(0,u.jsx)(Ye,{onClick:()=>t.hide()}),t.content]})}function Xe(){const e=(0,L.M)();var t;return void 0===(t=e.shown)&&(t=!0),(0,r.useEffect)((()=>(document.body.style.overflow=t?"hidden":"visible",()=>{document.body.style.overflow="visible"})),[t]),e.shouldRender?(0,u.jsx)(D,{header:(0,u.jsx)(Y,{}),primaryMenu:(0,u.jsx)(Ke,{}),secondaryMenu:(0,u.jsx)(Ze,{})}):null}const Je={navbarHideable:"navbarHideable_m1mJ",navbarHidden:"navbarHidden_jGov"};function et(e){return(0,u.jsx)("div",{role:"presentation",...e,className:(0,a.A)("navbar-sidebar__backdrop",e.className)})}function tt(e){let{children:t}=e;const{navbar:{hideOnScroll:n,style:o}}=(0,k.p)(),i=(0,L.M)(),{navbarRef:s,isNavbarVisible:d}=function(e){const[t,n]=(0,r.useState)(e),a=(0,r.useRef)(!1),o=(0,r.useRef)(0),i=(0,r.useCallback)((e=>{null!==e&&(o.current=e.getBoundingClientRect().height)}),[]);return(0,P.Mq)(((t,r)=>{let{scrollY:i}=t;if(!e)return;if(i=s?n(!1):i+c{if(!e)return;const r=t.location.hash;if(r?document.getElementById(r.substring(1)):void 0)return a.current=!0,void n(!1);n(!0)})),{navbarRef:i,isNavbarVisible:t}}(n);return(0,u.jsxs)("nav",{ref:s,"aria-label":(0,l.T)({id:"theme.NavBar.navAriaLabel",message:"Main",description:"The ARIA label for the main navigation"}),className:(0,a.A)("navbar","navbar--fixed-top",n&&[Je.navbarHideable,!d&&Je.navbarHidden],{"navbar--dark":"dark"===o,"navbar--primary":"primary"===o,"navbar-sidebar--show":i.shown}),children:[t,(0,u.jsx)(et,{onClick:i.toggle}),(0,u.jsx)(Xe,{})]})}var nt=n(2181);const rt="right";function at(e){let{width:t=30,height:n=30,className:r,...a}=e;return(0,u.jsx)("svg",{className:r,width:t,height:n,viewBox:"0 0 30 30","aria-hidden":"true",...a,children:(0,u.jsx)("path",{stroke:"currentColor",strokeLinecap:"round",strokeMiterlimit:"10",strokeWidth:"2",d:"M4 7h22M4 15h22M4 23h22"})})}function ot(){const{toggle:e,shown:t}=(0,L.M)();return(0,u.jsx)("button",{onClick:e,"aria-label":(0,l.T)({id:"theme.docs.sidebar.toggleSidebarButtonAriaLabel",message:"Toggle navigation bar",description:"The ARIA label for hamburger menu button of mobile navigation"}),"aria-expanded":t,className:"navbar__toggle clean-btn",type:"button",children:(0,u.jsx)(at,{})})}const it={colorModeToggle:"colorModeToggle_DEke"};function st(e){let{items:t}=e;return(0,u.jsx)(u.Fragment,{children:t.map(((e,t)=>(0,u.jsx)(nt.k2,{onError:t=>new Error(`A theme navbar item failed to render.\nPlease double-check the following navbar item (themeConfig.navbar.items) of your Docusaurus config:\n${JSON.stringify(e,null,2)}`,{cause:t}),children:(0,u.jsx)(Ge,{...e})},t)))})}function lt(e){let{left:t,right:n}=e;return(0,u.jsxs)("div",{className:"navbar__inner",children:[(0,u.jsx)("div",{className:"navbar__items",children:t}),(0,u.jsx)("div",{className:"navbar__items navbar__items--right",children:n})]})}function ct(){const e=(0,L.M)(),t=(0,k.p)().navbar.items,[n,r]=function(e){function t(e){return"left"===(e.position??rt)}return[e.filter(t),e.filter((e=>!t(e)))]}(t),a=t.find((e=>"search"===e.type));return(0,u.jsx)(lt,{left:(0,u.jsxs)(u.Fragment,{children:[!e.disabled&&(0,u.jsx)(ot,{}),(0,u.jsx)(G,{}),(0,u.jsx)(st,{items:n})]}),right:(0,u.jsxs)(u.Fragment,{children:[(0,u.jsx)(st,{items:r}),(0,u.jsx)(V,{className:it.colorModeToggle}),!a&&(0,u.jsx)(Ue,{children:(0,u.jsx)(We,{})})]})})}function ut(){return(0,u.jsx)(tt,{children:(0,u.jsx)(ct,{})})}function dt(e){let{item:t}=e;const{to:n,href:r,label:a,prependBaseUrlToHref:o,...i}=t,s=(0,X.Ay)(n),l=(0,X.Ay)(r,{forcePrependBaseUrl:!0});return(0,u.jsxs)(Z.A,{className:"footer__link-item",...r?{href:o?l:r}:{to:s},...i,children:[a,r&&!(0,J.A)(r)&&(0,u.jsx)(te.A,{})]})}function pt(e){let{item:t}=e;return t.html?(0,u.jsx)("li",{className:"footer__item",dangerouslySetInnerHTML:{__html:t.html}}):(0,u.jsx)("li",{className:"footer__item",children:(0,u.jsx)(dt,{item:t})},t.href??t.to)}function ft(e){let{column:t}=e;return(0,u.jsxs)("div",{className:"col footer__col",children:[(0,u.jsx)("div",{className:"footer__title",children:t.title}),(0,u.jsx)("ul",{className:"footer__items clean-list",children:t.items.map(((e,t)=>(0,u.jsx)(pt,{item:e},t)))})]})}function ht(e){let{columns:t}=e;return(0,u.jsx)("div",{className:"row footer__links",children:t.map(((e,t)=>(0,u.jsx)(ft,{column:e},t)))})}function mt(){return(0,u.jsx)("span",{className:"footer__link-separator",children:"\xb7"})}function gt(e){let{item:t}=e;return t.html?(0,u.jsx)("span",{className:"footer__link-item",dangerouslySetInnerHTML:{__html:t.html}}):(0,u.jsx)(dt,{item:t})}function yt(e){let{links:t}=e;return(0,u.jsx)("div",{className:"footer__links text--center",children:(0,u.jsx)("div",{className:"footer__links",children:t.map(((e,n)=>(0,u.jsxs)(r.Fragment,{children:[(0,u.jsx)(gt,{item:e}),t.length!==n+1&&(0,u.jsx)(mt,{})]},n)))})})}function bt(e){let{links:t}=e;return function(e){return"title"in e[0]}(t)?(0,u.jsx)(ht,{columns:t}):(0,u.jsx)(yt,{links:t})}var vt=n(1122);const kt={footerLogoLink:"footerLogoLink_BH7S"};function wt(e){let{logo:t}=e;const{withBaseUrl:n}=(0,X.hH)(),r={light:n(t.src),dark:n(t.srcDark??t.src)};return(0,u.jsx)(vt.A,{className:(0,a.A)("footer__logo",t.className),alt:t.alt,sources:r,width:t.width,height:t.height,style:t.style})}function xt(e){let{logo:t}=e;return t.href?(0,u.jsx)(Z.A,{href:t.href,className:kt.footerLogoLink,target:t.target,children:(0,u.jsx)(wt,{logo:t})}):(0,u.jsx)(wt,{logo:t})}function St(e){let{copyright:t}=e;return(0,u.jsx)("div",{className:"footer__copyright",dangerouslySetInnerHTML:{__html:t}})}function _t(e){let{style:t,links:n,logo:r,copyright:o}=e;return(0,u.jsx)("footer",{className:(0,a.A)("footer",{"footer--dark":"dark"===t}),children:(0,u.jsxs)("div",{className:"container container-fluid",children:[n,(r||o)&&(0,u.jsxs)("div",{className:"footer__bottom text--center",children:[r&&(0,u.jsx)("div",{className:"margin-bottom--sm",children:r}),o]})]})})}function Et(){const{footer:e}=(0,k.p)();if(!e)return null;const{copyright:t,links:n,logo:r,style:a}=e;return(0,u.jsx)(_t,{style:a,links:n&&n.length>0&&(0,u.jsx)(bt,{links:n}),logo:r&&(0,u.jsx)(xt,{logo:r}),copyright:t&&(0,u.jsx)(St,{copyright:t})})}const Tt=r.memo(Et),Ct=(0,O.fM)([M.a,w.o,P.Tv,Qe.VQ,i.Jx,function(e){let{children:t}=e;return(0,u.jsx)(N.y_,{children:(0,u.jsx)(L.e,{children:(0,u.jsx)(R,{children:t})})})}]);function At(e){let{children:t}=e;return(0,u.jsx)(Ct,{children:t})}var Lt=n(1107);function Pt(e){let{error:t,tryAgain:n}=e;return(0,u.jsx)("main",{className:"container margin-vert--xl",children:(0,u.jsx)("div",{className:"row",children:(0,u.jsxs)("div",{className:"col col--6 col--offset-3",children:[(0,u.jsx)(Lt.A,{as:"h1",className:"hero__title",children:(0,u.jsx)(l.A,{id:"theme.ErrorPageContent.title",description:"The title of the fallback page when the page crashed",children:"This page crashed."})}),(0,u.jsx)("div",{className:"margin-vert--lg",children:(0,u.jsx)(nt.a2,{onClick:n,className:"button button--primary shadow--lw"})}),(0,u.jsx)("hr",{}),(0,u.jsx)("div",{className:"margin-vert--md",children:(0,u.jsx)(nt.bq,{error:t})})]})})})}const Ot={mainWrapper:"mainWrapper_z2l0"};function Nt(e){const{children:t,noFooter:n,wrapperClassName:r,title:s,description:l}=e;return(0,y.J)(),(0,u.jsxs)(At,{children:[(0,u.jsx)(i.be,{title:s,description:l}),(0,u.jsx)(v,{}),(0,u.jsx)(A,{}),(0,u.jsx)(ut,{}),(0,u.jsx)("div",{id:d,className:(0,a.A)(g.G.wrapper.main,Ot.mainWrapper,r),children:(0,u.jsx)(o.A,{fallback:e=>(0,u.jsx)(Pt,{...e}),children:t})}),!n&&(0,u.jsx)(Tt,{})]})}},3465:(e,t,n)=>{"use strict";n.d(t,{A:()=>u});n(6540);var r=n(8774),a=n(6025),o=n(4586),i=n(6342),s=n(1122),l=n(4848);function c(e){let{logo:t,alt:n,imageClassName:r}=e;const o={light:(0,a.Ay)(t.src),dark:(0,a.Ay)(t.srcDark||t.src)},i=(0,l.jsx)(s.A,{className:t.className,sources:o,height:t.height,width:t.width,alt:n,style:t.style});return r?(0,l.jsx)("div",{className:r,children:i}):i}function u(e){const{siteConfig:{title:t}}=(0,o.A)(),{navbar:{title:n,logo:s}}=(0,i.p)(),{imageClassName:u,titleClassName:d,...p}=e,f=(0,a.Ay)(s?.href||"/"),h=n?"":t,m=s?.alt??h;return(0,l.jsxs)(r.A,{to:f,...p,...s?.target&&{target:s.target},children:[s&&(0,l.jsx)(c,{logo:s,alt:m,imageClassName:u}),null!=n&&(0,l.jsx)("b",{className:d,children:n})]})}},1463:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});n(6540);var r=n(5260),a=n(4848);function o(e){let{locale:t,version:n,tag:o}=e;const i=t;return(0,a.jsxs)(r.A,{children:[t&&(0,a.jsx)("meta",{name:"docusaurus_locale",content:t}),n&&(0,a.jsx)("meta",{name:"docusaurus_version",content:n}),o&&(0,a.jsx)("meta",{name:"docusaurus_tag",content:o}),i&&(0,a.jsx)("meta",{name:"docsearch:language",content:i}),n&&(0,a.jsx)("meta",{name:"docsearch:version",content:n}),o&&(0,a.jsx)("meta",{name:"docsearch:docusaurus_tag",content:o})]})}},1122:(e,t,n)=>{"use strict";n.d(t,{A:()=>u});var r=n(6540),a=n(4164),o=n(2303),i=n(5293);const s={themedComponent:"themedComponent_mlkZ","themedComponent--light":"themedComponent--light_NVdE","themedComponent--dark":"themedComponent--dark_xIcU"};var l=n(4848);function c(e){let{className:t,children:n}=e;const c=(0,o.A)(),{colorMode:u}=(0,i.G)();return(0,l.jsx)(l.Fragment,{children:(c?"dark"===u?["dark"]:["light"]:["light","dark"]).map((e=>{const o=n({theme:e,className:(0,a.A)(t,s.themedComponent,s[`themedComponent--${e}`])});return(0,l.jsx)(r.Fragment,{children:o},e)}))})}function u(e){const{sources:t,className:n,alt:r,...a}=e;return(0,l.jsx)(c,{className:n,children:e=>{let{theme:n,className:o}=e;return(0,l.jsx)("img",{src:t[n],alt:r,className:o,...a})}})}},1422:(e,t,n)=>{"use strict";n.d(t,{N:()=>y,u:()=>c});var r=n(6540),a=n(8193),o=n(205),i=n(3109),s=n(4848);const l="ease-in-out";function c(e){let{initialState:t}=e;const[n,a]=(0,r.useState)(t??!1),o=(0,r.useCallback)((()=>{a((e=>!e))}),[]);return{collapsed:n,setCollapsed:a,toggleCollapsed:o}}const u={display:"none",overflow:"hidden",height:"0px"},d={display:"block",overflow:"visible",height:"auto"};function p(e,t){const n=t?u:d;e.style.display=n.display,e.style.overflow=n.overflow,e.style.height=n.height}function f(e){let{collapsibleRef:t,collapsed:n,animation:a}=e;const o=(0,r.useRef)(!1);(0,r.useEffect)((()=>{const e=t.current;function r(){const t=e.scrollHeight,n=a?.duration??function(e){if((0,i.O)())return 1;const t=e/36;return Math.round(10*(4+15*t**.25+t/5))}(t);return{transition:`height ${n}ms ${a?.easing??l}`,height:`${t}px`}}function s(){const t=r();e.style.transition=t.transition,e.style.height=t.height}if(!o.current)return p(e,n),void(o.current=!0);return e.style.willChange="height",function(){const t=requestAnimationFrame((()=>{n?(s(),requestAnimationFrame((()=>{e.style.height=u.height,e.style.overflow=u.overflow}))):(e.style.display="block",requestAnimationFrame((()=>{s()})))}));return()=>cancelAnimationFrame(t)}()}),[t,n,a])}function h(e){if(!a.A.canUseDOM)return e?u:d}function m(e){let{as:t="div",collapsed:n,children:a,animation:o,onCollapseTransitionEnd:i,className:l,disableSSRStyle:c}=e;const u=(0,r.useRef)(null);return f({collapsibleRef:u,collapsed:n,animation:o}),(0,s.jsx)(t,{ref:u,style:c?void 0:h(n),onTransitionEnd:e=>{"height"===e.propertyName&&(p(u.current,n),i?.(n))},className:l,children:a})}function g(e){let{collapsed:t,...n}=e;const[a,i]=(0,r.useState)(!t),[l,c]=(0,r.useState)(t);return(0,o.A)((()=>{t||i(!0)}),[t]),(0,o.A)((()=>{a&&c(t)}),[a,t]),a?(0,s.jsx)(m,{...n,collapsed:l}):null}function y(e){let{lazy:t,...n}=e;const r=t?g:m;return(0,s.jsx)(r,{...n})}},5041:(e,t,n)=>{"use strict";n.d(t,{M:()=>m,o:()=>h});var r=n(6540),a=n(2303),o=n(679),i=n(9532),s=n(6342),l=n(4848);const c=(0,o.Wf)("docusaurus.announcement.dismiss"),u=(0,o.Wf)("docusaurus.announcement.id"),d=()=>"true"===c.get(),p=e=>c.set(String(e)),f=r.createContext(null);function h(e){let{children:t}=e;const n=function(){const{announcementBar:e}=(0,s.p)(),t=(0,a.A)(),[n,o]=(0,r.useState)((()=>!!t&&d()));(0,r.useEffect)((()=>{o(d())}),[]);const i=(0,r.useCallback)((()=>{p(!0),o(!0)}),[]);return(0,r.useEffect)((()=>{if(!e)return;const{id:t}=e;let n=u.get();"annoucement-bar"===n&&(n="announcement-bar");const r=t!==n;u.set(t),r&&p(!1),!r&&d()||o(!1)}),[e]),(0,r.useMemo)((()=>({isActive:!!e&&!n,close:i})),[e,n,i])}();return(0,l.jsx)(f.Provider,{value:n,children:t})}function m(){const e=(0,r.useContext)(f);if(!e)throw new i.dV("AnnouncementBarProvider");return e}},5293:(e,t,n)=>{"use strict";n.d(t,{G:()=>y,a:()=>g});var r=n(6540),a=n(8193),o=n(9532),i=n(679),s=n(6342),l=n(4848);const c=r.createContext(void 0),u="theme",d=(0,i.Wf)(u),p={light:"light",dark:"dark"},f=e=>e===p.dark?p.dark:p.light,h=e=>a.A.canUseDOM?f(document.documentElement.getAttribute("data-theme")):f(e),m=e=>{d.set(f(e))};function g(e){let{children:t}=e;const n=function(){const{colorMode:{defaultMode:e,disableSwitch:t,respectPrefersColorScheme:n}}=(0,s.p)(),[a,o]=(0,r.useState)(h(e));(0,r.useEffect)((()=>{t&&d.del()}),[t]);const i=(0,r.useCallback)((function(t,r){void 0===r&&(r={});const{persist:a=!0}=r;t?(o(t),a&&m(t)):(o(n?window.matchMedia("(prefers-color-scheme: dark)").matches?p.dark:p.light:e),d.del())}),[n,e]);(0,r.useEffect)((()=>{document.documentElement.setAttribute("data-theme",f(a))}),[a]),(0,r.useEffect)((()=>{if(t)return;const e=e=>{if(e.key!==u)return;const t=d.get();null!==t&&i(f(t))};return window.addEventListener("storage",e),()=>window.removeEventListener("storage",e)}),[t,i]);const l=(0,r.useRef)(!1);return(0,r.useEffect)((()=>{if(t&&!n)return;const e=window.matchMedia("(prefers-color-scheme: dark)"),r=()=>{window.matchMedia("print").matches||l.current?l.current=window.matchMedia("print").matches:i(null)};return e.addListener(r),()=>e.removeListener(r)}),[i,t,n]),(0,r.useMemo)((()=>({colorMode:a,setColorMode:i,get isDarkTheme(){return a===p.dark},setLightTheme(){i(p.light)},setDarkTheme(){i(p.dark)}})),[a,i])}();return(0,l.jsx)(c.Provider,{value:n,children:t})}function y(){const e=(0,r.useContext)(c);if(null==e)throw new o.dV("ColorModeProvider","Please see https://docusaurus.io/docs/api/themes/configuration#use-color-mode.");return e}},9876:(e,t,n)=>{"use strict";n.d(t,{e:()=>f,M:()=>h});var r=n(6540),a=n(5600),o=n(4581),i=n(6347),s=n(9532);function l(e){!function(e){const t=(0,i.W6)(),n=(0,s._q)(e);(0,r.useEffect)((()=>t.block(((e,t)=>n(e,t)))),[t,n])}(((t,n)=>{if("POP"===n)return e(t,n)}))}var c=n(6342),u=n(4848);const d=r.createContext(void 0);function p(){const e=function(){const e=(0,a.YL)(),{items:t}=(0,c.p)().navbar;return 0===t.length&&!e.component}(),t=(0,o.l)(),n=!e&&"mobile"===t,[i,s]=(0,r.useState)(!1);l((()=>{if(i)return s(!1),!1}));const u=(0,r.useCallback)((()=>{s((e=>!e))}),[]);return(0,r.useEffect)((()=>{"desktop"===t&&s(!1)}),[t]),(0,r.useMemo)((()=>({disabled:e,shouldRender:n,toggle:u,shown:i})),[e,n,u,i])}function f(e){let{children:t}=e;const n=p();return(0,u.jsx)(d.Provider,{value:n,children:t})}function h(){const e=r.useContext(d);if(void 0===e)throw new s.dV("NavbarMobileSidebarProvider");return e}},5600:(e,t,n)=>{"use strict";n.d(t,{GX:()=>c,YL:()=>l,y_:()=>s});var r=n(6540),a=n(9532),o=n(4848);const i=r.createContext(null);function s(e){let{children:t}=e;const n=(0,r.useState)({component:null,props:null});return(0,o.jsx)(i.Provider,{value:n,children:t})}function l(){const e=(0,r.useContext)(i);if(!e)throw new a.dV("NavbarSecondaryMenuContentProvider");return e[0]}function c(e){let{component:t,props:n}=e;const o=(0,r.useContext)(i);if(!o)throw new a.dV("NavbarSecondaryMenuContentProvider");const[,s]=o,l=(0,a.Be)(n);return(0,r.useEffect)((()=>{s({component:t,props:l})}),[s,t,l]),(0,r.useEffect)((()=>()=>s({component:null,props:null})),[s]),null}},4090:(e,t,n)=>{"use strict";n.d(t,{w:()=>a,J:()=>o});var r=n(6540);const a="navigation-with-keyboard";function o(){(0,r.useEffect)((()=>{function e(e){"keydown"===e.type&&"Tab"===e.key&&document.body.classList.add(a),"mousedown"===e.type&&document.body.classList.remove(a)}return document.addEventListener("keydown",e),document.addEventListener("mousedown",e),()=>{document.body.classList.remove(a),document.removeEventListener("keydown",e),document.removeEventListener("mousedown",e)}}),[])}},4581:(e,t,n)=>{"use strict";n.d(t,{l:()=>s});var r=n(6540),a=n(8193);const o={desktop:"desktop",mobile:"mobile",ssr:"ssr"},i=996;function s(e){let{desktopBreakpoint:t=i}=void 0===e?{}:e;const[n,s]=(0,r.useState)((()=>"ssr"));return(0,r.useEffect)((()=>{function e(){s(function(e){if(!a.A.canUseDOM)throw new Error("getWindowSize() should only be called after React hydration");return window.innerWidth>e?o.desktop:o.mobile}(t))}return e(),window.addEventListener("resize",e),()=>{window.removeEventListener("resize",e)}}),[t]),n}},7559:(e,t,n)=>{"use strict";n.d(t,{G:()=>r});const r={page:{blogListPage:"blog-list-page",blogPostPage:"blog-post-page",blogTagsListPage:"blog-tags-list-page",blogTagPostListPage:"blog-tags-post-list-page",blogAuthorsListPage:"blog-authors-list-page",blogAuthorsPostsPage:"blog-authors-posts-page",docsDocPage:"docs-doc-page",docsTagsListPage:"docs-tags-list-page",docsTagDocListPage:"docs-tags-doc-list-page",mdxPage:"mdx-page"},wrapper:{main:"main-wrapper",blogPages:"blog-wrapper",docsPages:"docs-wrapper",mdxPages:"mdx-wrapper"},common:{editThisPage:"theme-edit-this-page",lastUpdated:"theme-last-updated",backToTopButton:"theme-back-to-top-button",codeBlock:"theme-code-block",admonition:"theme-admonition",unlistedBanner:"theme-unlisted-banner",draftBanner:"theme-draft-banner",admonitionType:e=>`theme-admonition-${e}`},layout:{},docs:{docVersionBanner:"theme-doc-version-banner",docVersionBadge:"theme-doc-version-badge",docBreadcrumbs:"theme-doc-breadcrumbs",docMarkdown:"theme-doc-markdown",docTocMobile:"theme-doc-toc-mobile",docTocDesktop:"theme-doc-toc-desktop",docFooter:"theme-doc-footer",docFooterTagsRow:"theme-doc-footer-tags-row",docFooterEditMetaRow:"theme-doc-footer-edit-meta-row",docSidebarContainer:"theme-doc-sidebar-container",docSidebarMenu:"theme-doc-sidebar-menu",docSidebarItemCategory:"theme-doc-sidebar-item-category",docSidebarItemLink:"theme-doc-sidebar-item-link",docSidebarItemCategoryLevel:e=>`theme-doc-sidebar-item-category-level-${e}`,docSidebarItemLinkLevel:e=>`theme-doc-sidebar-item-link-level-${e}`},blog:{blogFooterTagsRow:"theme-blog-footer-tags-row",blogFooterEditMetaRow:"theme-blog-footer-edit-meta-row"},pages:{pageFooterEditMetaRow:"theme-pages-footer-edit-meta-row"}}},3109:(e,t,n)=>{"use strict";function r(){return window.matchMedia("(prefers-reduced-motion: reduce)").matches}n.d(t,{O:()=>r})},2181:(e,t,n)=>{"use strict";n.d(t,{bq:()=>u,MN:()=>c,a2:()=>l,k2:()=>d});var r=n(6540),a=n(1312),o=n(440);const i={errorBoundaryError:"errorBoundaryError_a6uf",errorBoundaryFallback:"errorBoundaryFallback_VBag"};var s=n(4848);function l(e){return(0,s.jsx)("button",{type:"button",...e,children:(0,s.jsx)(a.A,{id:"theme.ErrorPageContent.tryAgain",description:"The label of the button to try again rendering when the React error boundary captures an error",children:"Try again"})})}function c(e){let{error:t,tryAgain:n}=e;return(0,s.jsxs)("div",{className:i.errorBoundaryFallback,children:[(0,s.jsx)("p",{children:t.message}),(0,s.jsx)(l,{onClick:n})]})}function u(e){let{error:t}=e;const n=(0,o.rA)(t).map((e=>e.message)).join("\n\nCause:\n");return(0,s.jsx)("p",{className:i.errorBoundaryError,children:n})}class d extends r.Component{componentDidCatch(e,t){throw this.props.onError(e,t)}render(){return this.props.children}}},1682:(e,t,n)=>{"use strict";function r(e){return Array.from(new Set(e))}function a(e,t){const n={};let r=0;for(const a of e){const e=t(a,r);n[e]??=[],n[e].push(a),r+=1}return n}n.d(t,{$z:()=>a,sb:()=>r})},1213:(e,t,n)=>{"use strict";n.d(t,{e3:()=>p,be:()=>u,Jx:()=>f});var r=n(6540),a=n(4164),o=n(5260),i=n(6803),s=n(6025),l=n(4586);var c=n(4848);function u(e){let{title:t,description:n,keywords:r,image:a,children:i}=e;const u=function(e){const{siteConfig:t}=(0,l.A)(),{title:n,titleDelimiter:r}=t;return e?.trim().length?`${e.trim()} ${r} ${n}`:n}(t),{withBaseUrl:d}=(0,s.hH)(),p=a?d(a,{absolute:!0}):void 0;return(0,c.jsxs)(o.A,{children:[t&&(0,c.jsx)("title",{children:u}),t&&(0,c.jsx)("meta",{property:"og:title",content:u}),n&&(0,c.jsx)("meta",{name:"description",content:n}),n&&(0,c.jsx)("meta",{property:"og:description",content:n}),r&&(0,c.jsx)("meta",{name:"keywords",content:Array.isArray(r)?r.join(","):r}),p&&(0,c.jsx)("meta",{property:"og:image",content:p}),p&&(0,c.jsx)("meta",{name:"twitter:image",content:p}),i]})}const d=r.createContext(void 0);function p(e){let{className:t,children:n}=e;const i=r.useContext(d),s=(0,a.A)(i,t);return(0,c.jsxs)(d.Provider,{value:s,children:[(0,c.jsx)(o.A,{children:(0,c.jsx)("html",{className:s})}),n]})}function f(e){let{children:t}=e;const n=(0,i.A)(),r=`plugin-${n.plugin.name.replace(/docusaurus-(?:plugin|theme)-(?:content-)?/gi,"")}`;const o=`plugin-id-${n.plugin.id}`;return(0,c.jsx)(p,{className:(0,a.A)(r,o),children:t})}},9532:(e,t,n)=>{"use strict";n.d(t,{Be:()=>c,ZC:()=>s,_q:()=>i,dV:()=>l,fM:()=>u});var r=n(6540),a=n(205),o=n(4848);function i(e){const t=(0,r.useRef)(e);return(0,a.A)((()=>{t.current=e}),[e]),(0,r.useCallback)((function(){return t.current(...arguments)}),[])}function s(e){const t=(0,r.useRef)();return(0,a.A)((()=>{t.current=e})),t.current}class l extends Error{constructor(e,t){super(),this.name="ReactContextError",this.message=`Hook ${this.stack?.split("\n")[1]?.match(/at (?:\w+\.)?(?\w+)/)?.groups.name??""} is called outside the <${e}>. ${t??""}`}}function c(e){const t=Object.entries(e);return t.sort(((e,t)=>e[0].localeCompare(t[0]))),(0,r.useMemo)((()=>e),t.flat())}function u(e){return t=>{let{children:n}=t;return(0,o.jsx)(o.Fragment,{children:e.reduceRight(((e,t)=>(0,o.jsx)(t,{children:e})),n)})}}},9169:(e,t,n)=>{"use strict";n.d(t,{Dt:()=>s,ys:()=>i});var r=n(6540),a=n(8328),o=n(4586);function i(e,t){const n=e=>(!e||e.endsWith("/")?e:`${e}/`)?.toLowerCase();return n(e)===n(t)}function s(){const{baseUrl:e}=(0,o.A)().siteConfig;return(0,r.useMemo)((()=>function(e){let{baseUrl:t,routes:n}=e;function r(e){return e.path===t&&!0===e.exact}function a(e){return e.path===t&&!e.exact}return function e(t){if(0===t.length)return;return t.find(r)||e(t.filter(a).flatMap((e=>e.routes??[])))}(n)}({routes:a.A,baseUrl:e})),[e])}},3104:(e,t,n)=>{"use strict";n.d(t,{Mq:()=>p,Tv:()=>c,gk:()=>f});var r=n(6540),a=n(8193),o=n(2303),i=(n(205),n(9532)),s=n(4848);const l=r.createContext(void 0);function c(e){let{children:t}=e;const n=function(){const e=(0,r.useRef)(!0);return(0,r.useMemo)((()=>({scrollEventsEnabledRef:e,enableScrollEvents:()=>{e.current=!0},disableScrollEvents:()=>{e.current=!1}})),[])}();return(0,s.jsx)(l.Provider,{value:n,children:t})}function u(){const e=(0,r.useContext)(l);if(null==e)throw new i.dV("ScrollControllerProvider");return e}const d=()=>a.A.canUseDOM?{scrollX:window.pageXOffset,scrollY:window.pageYOffset}:null;function p(e,t){void 0===t&&(t=[]);const{scrollEventsEnabledRef:n}=u(),a=(0,r.useRef)(d()),o=(0,i._q)(e);(0,r.useEffect)((()=>{const e=()=>{if(!n.current)return;const e=d();o(e,a.current),a.current=e},t={passive:!0};return e(),window.addEventListener("scroll",e,t),()=>window.removeEventListener("scroll",e,t)}),[o,n,...t])}function f(){const e=(0,r.useRef)(null),t=(0,o.A)()&&"smooth"===getComputedStyle(document.documentElement).scrollBehavior;return{startScroll:n=>{e.current=t?function(e){return window.scrollTo({top:e,behavior:"smooth"}),()=>{}}(n):function(e){let t=null;const n=document.documentElement.scrollTop>e;return function r(){const a=document.documentElement.scrollTop;(n&&a>e||!n&&at&&cancelAnimationFrame(t)}(n)},cancelScroll:()=>e.current?.()}}},679:(e,t,n)=>{"use strict";n.d(t,{Wf:()=>c});n(6540);const r=JSON.parse('{"N":"localStorage","M":""}'),a=r.N;function o(e){let{key:t,oldValue:n,newValue:r,storage:a}=e;if(n===r)return;const o=document.createEvent("StorageEvent");o.initStorageEvent("storage",!1,!1,t,n,r,window.location.href,a),window.dispatchEvent(o)}function i(e){if(void 0===e&&(e=a),"undefined"==typeof window)throw new Error("Browser storage is not available on Node.js/Docusaurus SSR process.");if("none"===e)return null;try{return window[e]}catch(n){return t=n,s||(console.warn("Docusaurus browser storage is not available.\nPossible reasons: running Docusaurus in an iframe, in an incognito browser session, or using too strict browser privacy settings.",t),s=!0),null}var t}let s=!1;const l={get:()=>null,set:()=>{},del:()=>{},listen:()=>()=>{}};function c(e,t){const n=`${e}${r.M}`;if("undefined"==typeof window)return function(e){function t(){throw new Error(`Illegal storage API usage for storage key "${e}".\nDocusaurus storage APIs are not supposed to be called on the server-rendering process.\nPlease only call storage APIs in effects and event handlers.`)}return{get:t,set:t,del:t,listen:t}}(n);const a=i(t?.persistence);return null===a?l:{get:()=>{try{return a.getItem(n)}catch(e){return console.error(`Docusaurus storage error, can't get key=${n}`,e),null}},set:e=>{try{const t=a.getItem(n);a.setItem(n,e),o({key:n,oldValue:t,newValue:e,storage:a})}catch(t){console.error(`Docusaurus storage error, can't set ${n}=${e}`,t)}},del:()=>{try{const e=a.getItem(n);a.removeItem(n),o({key:n,oldValue:e,newValue:null,storage:a})}catch(e){console.error(`Docusaurus storage error, can't delete key=${n}`,e)}},listen:e=>{try{const t=t=>{t.storageArea===a&&t.key===n&&e(t)};return window.addEventListener("storage",t),()=>window.removeEventListener("storage",t)}catch(t){return console.error(`Docusaurus storage error, can't listen for changes of key=${n}`,t),()=>{}}}}}},2131:(e,t,n)=>{"use strict";n.d(t,{o:()=>i});var r=n(4586),a=n(6347),o=n(440);function i(){const{siteConfig:{baseUrl:e,url:t,trailingSlash:n},i18n:{defaultLocale:i,currentLocale:s}}=(0,r.A)(),{pathname:l}=(0,a.zy)(),c=(0,o.Ks)(l,{trailingSlash:n,baseUrl:e}),u=s===i?e:e.replace(`/${s}/`,"/"),d=c.replace(e,"");return{createUrl:function(e){let{locale:n,fullyQualified:r}=e;return`${r?t:""}${function(e){return e===i?`${u}`:`${u}${e}/`}(n)}${d}`}}}},5062:(e,t,n)=>{"use strict";n.d(t,{$:()=>i});var r=n(6540),a=n(6347),o=n(9532);function i(e){const t=(0,a.zy)(),n=(0,o.ZC)(t),i=(0,o._q)(e);(0,r.useEffect)((()=>{n&&t!==n&&i({location:t,previousLocation:n})}),[i,t,n])}},6342:(e,t,n)=>{"use strict";n.d(t,{p:()=>a});var r=n(4586);function a(){return(0,r.A)().siteConfig.themeConfig}},2983:(e,t,n)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.addTrailingSlash=a,t.default=function(e,t){const{trailingSlash:n,baseUrl:r}=t;if(e.startsWith("#"))return e;if(void 0===n)return e;const[i]=e.split(/[#?]/),s="/"===i||i===r?i:(l=i,c=n,c?a(l):o(l));var l,c;return e.replace(i,s)},t.addLeadingSlash=function(e){return(0,r.addPrefix)(e,"/")},t.removeTrailingSlash=o;const r=n(2566);function a(e){return e.endsWith("/")?e:`${e}/`}function o(e){return(0,r.removeSuffix)(e,"/")}},253:(e,t)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.getErrorCausalChain=function e(t){if(t.cause)return[t,...e(t.cause)];return[t]}},440:(e,t,n)=>{"use strict";t.rA=t.Ks=t.LU=void 0;const r=n(1635);t.LU="__blog-post-container";var a=n(2983);Object.defineProperty(t,"Ks",{enumerable:!0,get:function(){return r.__importDefault(a).default}});var o=n(2566);var i=n(253);Object.defineProperty(t,"rA",{enumerable:!0,get:function(){return i.getErrorCausalChain}})},2566:(e,t)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.addPrefix=function(e,t){return e.startsWith(t)?e:`${t}${e}`},t.removeSuffix=function(e,t){if(""===t)return e;return e.endsWith(t)?e.slice(0,-t.length):e},t.addSuffix=function(e,t){return e.endsWith(t)?e:`${e}${t}`},t.removePrefix=function(e,t){return e.startsWith(t)?e.slice(t.length):e}},2849:(e,t,n)=>{"use strict";n.d(t,{A:()=>i});n(6540);var r=n(5391);const a={loadingRing:"loadingRing_RJI3","loading-ring":"loading-ring_FB5o"};var o=n(4848);function i(e){let{className:t}=e;return(0,o.jsxs)("div",{className:(0,r.A)(a.loadingRing,t),children:[(0,o.jsx)("div",{}),(0,o.jsx)("div",{}),(0,o.jsx)("div",{}),(0,o.jsx)("div",{})]})}},5891:(e,t,n)=>{"use strict";n.d(t,{Z:()=>s});var r=n(8291),a=n.n(r),o=n(1088);const i=new Map;function s(e,t){const n=`${e}${t}`;let r=i.get(n);return r||(r=async function(e,t){{const n=`${e}${o.IH.replace("{dir}",t?`-${t.replace(/\//g,"-")}`:"")}`;if(new URL(n,location.origin).origin!==location.origin)throw new Error("Unexpected version url");const r=await(await fetch(n)).json(),i=r.map(((e,t)=>{let{documents:n,index:r}=e;return{type:t,documents:n,index:a().Index.load(r)}})),s=r.reduce(((e,t)=>{for(const n of t.index.invertedIndex)/\p{Unified_Ideograph}/u.test(n[0][0])&&e.add(n[0]);return e}),new Set);return{wrappedIndexes:i,zhDictionary:Array.from(s)}}return{wrappedIndexes:[],zhDictionary:[]}}(e,t),i.set(n,r)),r}},2384:(e,t,n)=>{"use strict";n.d(t,{m:()=>c});var r=n(8291),a=n.n(r);var o=n(1088);function i(e){return s(e).concat(s(e.filter((e=>{const t=e[e.length-1];return!t.trailing&&t.maybeTyping})),!0))}function s(e,t){return e.map((e=>({tokens:e.map((e=>e.value)),term:e.map((e=>({value:e.value,presence:a().Query.presence.REQUIRED,wildcard:(t?e.trailing||e.maybeTyping:e.trailing)?a().Query.wildcard.TRAILING:a().Query.wildcard.NONE})))})))}var l=n(9913);function c(e,t,n){return function(r,s){const c=function(e,t){if(1===t.length&&["ja","jp","th"].includes(t[0]))return a()[t[0]].tokenizer(e).map((e=>e.toString()));let n=/[^-\s]+/g;return t.includes("zh")&&(n=/\w+|\p{Unified_Ideograph}+/gu),e.toLowerCase().match(n)||[]}(r,o.BH);if(0===c.length)return void s([]);const u=function(e,t){const n=function(e,t){const n=[];return function e(r,a){if(0===r.length)return void n.push(a);const o=r[0];if(/\p{Unified_Ideograph}/u.test(o)){const n=function(e,t){const n=[];return function e(r,a){let o=0,i=!1;for(const s of t)if(r.substr(0,s.length)===s){const t={missed:a.missed,term:a.term.concat({value:s})};r.length>s.length?e(r.substr(s.length),t):n.push(t),i=!0}else for(let t=s.length-1;t>o;t-=1){const l=s.substr(0,t);if(r.substr(0,t)===l){o=t;const s={missed:a.missed,term:a.term.concat({value:l,trailing:!0})};r.length>t?e(r.substr(t),s):n.push(s),i=!0;break}}i||(r.length>0?e(r.substr(1),{missed:a.missed+1,term:a.term}):a.term.length>0&&n.push(a))}(e,{missed:0,term:[]}),n.sort(((e,t)=>{const n=e.missed>0?1:0,r=t.missed>0?1:0;return n!==r?n-r:e.term.length-t.term.length})).map((e=>e.term))}(o,t);for(const t of n){const n=a.concat(...t);e(r.slice(1),n)}}else{const t=a.concat({value:o});e(r.slice(1),t)}}(e,[]),n}(e,t);if(0===n.length)return[{tokens:e,term:e.map((e=>({value:e,presence:a().Query.presence.REQUIRED,wildcard:a().Query.wildcard.LEADING|a().Query.wildcard.TRAILING})))}];for(const a of n)a[a.length-1].maybeTyping=!0;const r=[];for(const i of o.BH)if("en"===i)o.sx||r.unshift(a().stopWordFilter);else{const e=a()[i];e.stopWordFilter&&r.unshift(e.stopWordFilter)}let s;if(r.length>0){const e=e=>r.reduce(((e,t)=>e.filter((e=>t(e.value)))),e);s=[];const t=[];for(const r of n){const n=e(r);s.push(n),n.length0&&t.push(n)}n.push(...t)}else s=n.slice();const l=[];for(const a of s)if(a.length>2)for(let e=a.length-1;e>=0;e-=1)l.push(a.slice(0,e).concat(a.slice(e+1)));return i(n).concat(i(l))}(c,t),d=[];e:for(const{term:t,tokens:a}of u)for(const{documents:r,index:o,type:i}of e)if(d.push(...o.query((e=>{for(const n of t)e.term(n.value,{wildcard:n.wildcard,presence:n.presence})})).slice(0,n).filter((e=>!d.some((t=>t.document.i.toString()===e.ref)))).slice(0,n-d.length).map((t=>{const n=r.find((e=>e.i.toString()===t.ref));return{document:n,type:i,page:i!==l.i.Title&&e[0].documents.find((e=>e.i===n.p)),metadata:t.matchData.metadata,tokens:a,score:t.score}}))),d.length>=n)break e;!function(e){e.forEach(((e,t)=>{e.index=t})),e.sort(((t,n)=>{let r=t.type!==l.i.Heading&&t.type!==l.i.Content&&t.type!==l.i.Description||!t.page?t.index:e.findIndex((e=>e.document===t.page)),a=n.type!==l.i.Heading&&n.type!==l.i.Content&&n.type!==l.i.Description||!n.page?n.index:e.findIndex((e=>e.document===n.page));if(-1===r&&(r=t.index),-1===a&&(a=n.index),r===a){const e=(0===n.type?1:0)-(0===t.type?1:0);return 0===e?t.index-n.index:e}return r-a}))}(d),function(e){e.forEach(((t,n)=>{n>0&&t.page&&e.slice(0,n).some((e=>(e.type===l.i.Keywords?e.page:e.document)===t.page))&&(n{"use strict";function r(e){return e.join(" \u203a ")}n.d(t,{$:()=>r})},3103:(e,t,n)=>{"use strict";function r(e){return e.replace(/&/g,"&").replace(//g,">").replace(/"/g,""").replace(/'/g,"'")}n.d(t,{Z:()=>r})},7674:(e,t,n)=>{"use strict";function r(e,t){const n=[];for(const r of Object.values(e))r[t]&&n.push(...r[t].position);return n.sort(((e,t)=>e[0]-t[0]||t[1]-e[1]))}n.d(t,{g:()=>r})},6841:(e,t,n)=>{"use strict";n.d(t,{Z:()=>a});var r=n(3103);function a(e,t,n){const o=[];for(const i of t){const n=e.toLowerCase().indexOf(i);if(n>=0){n>0&&o.push(a(e.substr(0,n),t)),o.push(`${(0,r.Z)(e.substr(n,i.length))}`);const s=n+i.length;s${(0,r.Z)(e)}`:(0,r.Z)(e):o.join("")}},3810:(e,t,n)=>{"use strict";n.d(t,{C:()=>l});var r=n(3103),a=n(6841);const o=/\w+|\p{Unified_Ideograph}/u;function i(e){const t=[];let n=0,r=e;for(;r.length>0;){const a=r.match(o);if(!a){t.push(r);break}a.index>0&&t.push(r.substring(0,a.index)),t.push(a[0]),n+=a.index+a[0].length,r=e.substring(n)}return t}var s=n(1088);function l(e,t,n,o){void 0===o&&(o=s.rG);const{chunkIndex:l,chunks:c}=function(e,t,n){const o=[];let s=0,l=0,c=-1;for(;sl){const t=i(e.substring(l,u)).map((e=>({html:(0,r.Z)(e),textLength:e.length})));for(const e of t)o.push(e)}-1===c&&(c=o.length),l=u+d,o.push({html:(0,a.Z)(e.substring(u,l),n,!0),textLength:d})}}if(l({html:(0,r.Z)(e),textLength:e.length})));for(const e of t)o.push(e)}return{chunkIndex:c,chunks:o}}(e,t,n),u=c.slice(0,l),d=c[l],p=[d.html],f=c.slice(l+1);let h=d.textLength,m=0,g=0,y=!1,b=!1;for(;h0){const e=u.pop();h+e.textLength<=o?(p.unshift(e.html),m+=e.textLength,h+=e.textLength):(y=!0,u.length=0)}else{if(!(f.length>0))break;{const e=f.shift();h+e.textLength<=o?(p.push(e.html),g+=e.textLength,h+=e.textLength):(b=!0,f.length=0)}}return(y||u.length>0)&&p.unshift("\u2026"),(b||f.length>0)&&p.push("\u2026"),p.join("")}},3385:(e,t,n)=>{"use strict";function r(e,t){if("string"==typeof e)return{label:e,path:e};{const{label:n,path:r}=e;return"string"==typeof n?{label:n,path:r}:Object.prototype.hasOwnProperty.call(n,t)?{label:n[t],path:r}:{label:r,path:r}}}n.d(t,{p:()=>r})},1088:(e,t,n)=>{"use strict";n.d(t,{CU:()=>o,UB:()=>f,tb:()=>c,O6:()=>g,I$:()=>h,BH:()=>r,sx:()=>a,ZG:()=>p,WW:()=>u,pk:()=>d,Hg:()=>m,IH:()=>i,rG:()=>l,AT:()=>s,dz:()=>y});n(8291);const r=["en"],a=!1,o=null,i="search-index{dir}.json?_=b7f7de33",s=8,l=50,c=!1,u=!0,d=!0,p="right",f=void 0,h=!0,m=[{label:"Documents",path:"docs"},{label:"Blog",path:"blog"}],g=!0,y=!1},9913:(e,t,n)=>{"use strict";var r;n.d(t,{i:()=>r}),function(e){e[e.Title=0]="Title",e[e.Heading=1]="Heading",e[e.Description=2]="Description",e[e.Keywords=3]="Keywords",e[e.Content=4]="Content"}(r||(r={}))},1513:(e,t,n)=>{"use strict";n.d(t,{zR:()=>k,TM:()=>T,yJ:()=>f,sC:()=>A,AO:()=>p});var r=n(8168);function a(e){return"/"===e.charAt(0)}function o(e,t){for(var n=t,r=n+1,a=e.length;r=0;p--){var f=i[p];"."===f?o(i,p):".."===f?(o(i,p),d++):d&&(o(i,p),d--)}if(!c)for(;d--;d)i.unshift("..");!c||""===i[0]||i[0]&&a(i[0])||i.unshift("");var h=i.join("/");return n&&"/"!==h.substr(-1)&&(h+="/"),h};var s=n(1561);function l(e){return"/"===e.charAt(0)?e:"/"+e}function c(e){return"/"===e.charAt(0)?e.substr(1):e}function u(e,t){return function(e,t){return 0===e.toLowerCase().indexOf(t.toLowerCase())&&-1!=="/?#".indexOf(e.charAt(t.length))}(e,t)?e.substr(t.length):e}function d(e){return"/"===e.charAt(e.length-1)?e.slice(0,-1):e}function p(e){var t=e.pathname,n=e.search,r=e.hash,a=t||"/";return n&&"?"!==n&&(a+="?"===n.charAt(0)?n:"?"+n),r&&"#"!==r&&(a+="#"===r.charAt(0)?r:"#"+r),a}function f(e,t,n,a){var o;"string"==typeof e?(o=function(e){var t=e||"/",n="",r="",a=t.indexOf("#");-1!==a&&(r=t.substr(a),t=t.substr(0,a));var o=t.indexOf("?");return-1!==o&&(n=t.substr(o),t=t.substr(0,o)),{pathname:t,search:"?"===n?"":n,hash:"#"===r?"":r}}(e),o.state=t):(void 0===(o=(0,r.A)({},e)).pathname&&(o.pathname=""),o.search?"?"!==o.search.charAt(0)&&(o.search="?"+o.search):o.search="",o.hash?"#"!==o.hash.charAt(0)&&(o.hash="#"+o.hash):o.hash="",void 0!==t&&void 0===o.state&&(o.state=t));try{o.pathname=decodeURI(o.pathname)}catch(s){throw s instanceof URIError?new URIError('Pathname "'+o.pathname+'" could not be decoded. This is likely caused by an invalid percent-encoding.'):s}return n&&(o.key=n),a?o.pathname?"/"!==o.pathname.charAt(0)&&(o.pathname=i(o.pathname,a.pathname)):o.pathname=a.pathname:o.pathname||(o.pathname="/"),o}function h(){var e=null;var t=[];return{setPrompt:function(t){return e=t,function(){e===t&&(e=null)}},confirmTransitionTo:function(t,n,r,a){if(null!=e){var o="function"==typeof e?e(t,n):e;"string"==typeof o?"function"==typeof r?r(o,a):a(!0):a(!1!==o)}else a(!0)},appendListener:function(e){var n=!0;function r(){n&&e.apply(void 0,arguments)}return t.push(r),function(){n=!1,t=t.filter((function(e){return e!==r}))}},notifyListeners:function(){for(var e=arguments.length,n=new Array(e),r=0;rt?n.splice(t,n.length-t,a):n.push(a),d({action:r,location:a,index:t,entries:n})}}))},replace:function(e,t){var r="REPLACE",a=f(e,t,m(),k.location);u.confirmTransitionTo(a,r,n,(function(e){e&&(k.entries[k.index]=a,d({action:r,location:a}))}))},go:v,goBack:function(){v(-1)},goForward:function(){v(1)},canGo:function(e){var t=k.index+e;return t>=0&&t{"use strict";var r=n(4363),a={childContextTypes:!0,contextType:!0,contextTypes:!0,defaultProps:!0,displayName:!0,getDefaultProps:!0,getDerivedStateFromError:!0,getDerivedStateFromProps:!0,mixins:!0,propTypes:!0,type:!0},o={name:!0,length:!0,prototype:!0,caller:!0,callee:!0,arguments:!0,arity:!0},i={$$typeof:!0,compare:!0,defaultProps:!0,displayName:!0,propTypes:!0,type:!0},s={};function l(e){return r.isMemo(e)?i:s[e.$$typeof]||a}s[r.ForwardRef]={$$typeof:!0,render:!0,defaultProps:!0,displayName:!0,propTypes:!0},s[r.Memo]=i;var c=Object.defineProperty,u=Object.getOwnPropertyNames,d=Object.getOwnPropertySymbols,p=Object.getOwnPropertyDescriptor,f=Object.getPrototypeOf,h=Object.prototype;e.exports=function e(t,n,r){if("string"!=typeof n){if(h){var a=f(n);a&&a!==h&&e(t,a,r)}var i=u(n);d&&(i=i.concat(d(n)));for(var s=l(t),m=l(n),g=0;g{"use strict";e.exports=function(e,t,n,r,a,o,i,s){if(!e){var l;if(void 0===t)l=new Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings.");else{var c=[n,r,a,o,i,s],u=0;(l=new Error(t.replace(/%s/g,(function(){return c[u++]})))).name="Invariant Violation"}throw l.framesToPop=1,l}}},4634:e=>{e.exports=Array.isArray||function(e){return"[object Array]"==Object.prototype.toString.call(e)}},8291:(e,t,n)=>{var r,a;!function(){var o,i,s,l,c,u,d,p,f,h,m,g,y,b,v,k,w,x,S,_,E,T,C,A,L,P,O,N,j,R,I=function(e){var t=new I.Builder;return t.pipeline.add(I.trimmer,I.stopWordFilter,I.stemmer),t.searchPipeline.add(I.stemmer),e.call(t,t),t.build()};I.version="2.3.9",I.utils={},I.utils.warn=(o=this,function(e){o.console&&console.warn&&console.warn(e)}),I.utils.asString=function(e){return null==e?"":e.toString()},I.utils.clone=function(e){if(null==e)return e;for(var t=Object.create(null),n=Object.keys(e),r=0;r0){var l=I.utils.clone(t)||{};l.position=[i,s],l.index=a.length,a.push(new I.Token(n.slice(i,o),l))}i=o+1}}return a},I.tokenizer.separator=/[\s\-]+/,I.Pipeline=function(){this._stack=[]},I.Pipeline.registeredFunctions=Object.create(null),I.Pipeline.registerFunction=function(e,t){t in this.registeredFunctions&&I.utils.warn("Overwriting existing registered function: "+t),e.label=t,I.Pipeline.registeredFunctions[e.label]=e},I.Pipeline.warnIfFunctionNotRegistered=function(e){e.label&&e.label in this.registeredFunctions||I.utils.warn("Function is not registered with pipeline. This may cause problems when serialising the index.\n",e)},I.Pipeline.load=function(e){var t=new I.Pipeline;return e.forEach((function(e){var n=I.Pipeline.registeredFunctions[e];if(!n)throw new Error("Cannot load unregistered function: "+e);t.add(n)})),t},I.Pipeline.prototype.add=function(){Array.prototype.slice.call(arguments).forEach((function(e){I.Pipeline.warnIfFunctionNotRegistered(e),this._stack.push(e)}),this)},I.Pipeline.prototype.after=function(e,t){I.Pipeline.warnIfFunctionNotRegistered(t);var n=this._stack.indexOf(e);if(-1==n)throw new Error("Cannot find existingFn");n+=1,this._stack.splice(n,0,t)},I.Pipeline.prototype.before=function(e,t){I.Pipeline.warnIfFunctionNotRegistered(t);var n=this._stack.indexOf(e);if(-1==n)throw new Error("Cannot find existingFn");this._stack.splice(n,0,t)},I.Pipeline.prototype.remove=function(e){var t=this._stack.indexOf(e);-1!=t&&this._stack.splice(t,1)},I.Pipeline.prototype.run=function(e){for(var t=this._stack.length,n=0;n1&&(oe&&(n=a),o!=e);)r=n-t,a=t+Math.floor(r/2),o=this.elements[2*a];return o==e||o>e?2*a:os?c+=2:i==s&&(t+=n[l+1]*r[c+1],l+=2,c+=2);return t},I.Vector.prototype.similarity=function(e){return this.dot(e)/this.magnitude()||0},I.Vector.prototype.toArray=function(){for(var e=new Array(this.elements.length/2),t=1,n=0;t0){var o,i=a.str.charAt(0);i in a.node.edges?o=a.node.edges[i]:(o=new I.TokenSet,a.node.edges[i]=o),1==a.str.length&&(o.final=!0),r.push({node:o,editsRemaining:a.editsRemaining,str:a.str.slice(1)})}if(0!=a.editsRemaining){if("*"in a.node.edges)var s=a.node.edges["*"];else{s=new I.TokenSet;a.node.edges["*"]=s}if(0==a.str.length&&(s.final=!0),r.push({node:s,editsRemaining:a.editsRemaining-1,str:a.str}),a.str.length>1&&r.push({node:a.node,editsRemaining:a.editsRemaining-1,str:a.str.slice(1)}),1==a.str.length&&(a.node.final=!0),a.str.length>=1){if("*"in a.node.edges)var l=a.node.edges["*"];else{l=new I.TokenSet;a.node.edges["*"]=l}1==a.str.length&&(l.final=!0),r.push({node:l,editsRemaining:a.editsRemaining-1,str:a.str.slice(1)})}if(a.str.length>1){var c,u=a.str.charAt(0),d=a.str.charAt(1);d in a.node.edges?c=a.node.edges[d]:(c=new I.TokenSet,a.node.edges[d]=c),1==a.str.length&&(c.final=!0),r.push({node:c,editsRemaining:a.editsRemaining-1,str:u+a.str.slice(2)})}}}return n},I.TokenSet.fromString=function(e){for(var t=new I.TokenSet,n=t,r=0,a=e.length;r=e;t--){var n=this.uncheckedNodes[t],r=n.child.toString();r in this.minimizedNodes?n.parent.edges[n.char]=this.minimizedNodes[r]:(n.child._str=r,this.minimizedNodes[r]=n.child),this.uncheckedNodes.pop()}},I.Index=function(e){this.invertedIndex=e.invertedIndex,this.fieldVectors=e.fieldVectors,this.tokenSet=e.tokenSet,this.fields=e.fields,this.pipeline=e.pipeline},I.Index.prototype.search=function(e){return this.query((function(t){new I.QueryParser(e,t).parse()}))},I.Index.prototype.query=function(e){for(var t=new I.Query(this.fields),n=Object.create(null),r=Object.create(null),a=Object.create(null),o=Object.create(null),i=Object.create(null),s=0;s1?1:e},I.Builder.prototype.k1=function(e){this._k1=e},I.Builder.prototype.add=function(e,t){var n=e[this._ref],r=Object.keys(this._fields);this._documents[n]=t||{},this.documentCount+=1;for(var a=0;a=this.length)return I.QueryLexer.EOS;var e=this.str.charAt(this.pos);return this.pos+=1,e},I.QueryLexer.prototype.width=function(){return this.pos-this.start},I.QueryLexer.prototype.ignore=function(){this.start==this.pos&&(this.pos+=1),this.start=this.pos},I.QueryLexer.prototype.backup=function(){this.pos-=1},I.QueryLexer.prototype.acceptDigitRun=function(){var e,t;do{t=(e=this.next()).charCodeAt(0)}while(t>47&&t<58);e!=I.QueryLexer.EOS&&this.backup()},I.QueryLexer.prototype.more=function(){return this.pos1&&(e.backup(),e.emit(I.QueryLexer.TERM)),e.ignore(),e.more())return I.QueryLexer.lexText},I.QueryLexer.lexEditDistance=function(e){return e.ignore(),e.acceptDigitRun(),e.emit(I.QueryLexer.EDIT_DISTANCE),I.QueryLexer.lexText},I.QueryLexer.lexBoost=function(e){return e.ignore(),e.acceptDigitRun(),e.emit(I.QueryLexer.BOOST),I.QueryLexer.lexText},I.QueryLexer.lexEOS=function(e){e.width()>0&&e.emit(I.QueryLexer.TERM)},I.QueryLexer.termSeparator=I.tokenizer.separator,I.QueryLexer.lexText=function(e){for(;;){var t=e.next();if(t==I.QueryLexer.EOS)return I.QueryLexer.lexEOS;if(92!=t.charCodeAt(0)){if(":"==t)return I.QueryLexer.lexField;if("~"==t)return e.backup(),e.width()>0&&e.emit(I.QueryLexer.TERM),I.QueryLexer.lexEditDistance;if("^"==t)return e.backup(),e.width()>0&&e.emit(I.QueryLexer.TERM),I.QueryLexer.lexBoost;if("+"==t&&1===e.width())return e.emit(I.QueryLexer.PRESENCE),I.QueryLexer.lexText;if("-"==t&&1===e.width())return e.emit(I.QueryLexer.PRESENCE),I.QueryLexer.lexText;if(t.match(I.QueryLexer.termSeparator))return I.QueryLexer.lexTerm}else e.escapeCharacter()}},I.QueryParser=function(e,t){this.lexer=new I.QueryLexer(e),this.query=t,this.currentClause={},this.lexemeIdx=0},I.QueryParser.prototype.parse=function(){this.lexer.run(),this.lexemes=this.lexer.lexemes;for(var e=I.QueryParser.parseClause;e;)e=e(this);return this.query},I.QueryParser.prototype.peekLexeme=function(){return this.lexemes[this.lexemeIdx]},I.QueryParser.prototype.consumeLexeme=function(){var e=this.peekLexeme();return this.lexemeIdx+=1,e},I.QueryParser.prototype.nextClause=function(){var e=this.currentClause;this.query.clause(e),this.currentClause={}},I.QueryParser.parseClause=function(e){var t=e.peekLexeme();if(null!=t)switch(t.type){case I.QueryLexer.PRESENCE:return I.QueryParser.parsePresence;case I.QueryLexer.FIELD:return I.QueryParser.parseField;case I.QueryLexer.TERM:return I.QueryParser.parseTerm;default:var n="expected either a field or a term, found "+t.type;throw t.str.length>=1&&(n+=" with value '"+t.str+"'"),new I.QueryParseError(n,t.start,t.end)}},I.QueryParser.parsePresence=function(e){var t=e.consumeLexeme();if(null!=t){switch(t.str){case"-":e.currentClause.presence=I.Query.presence.PROHIBITED;break;case"+":e.currentClause.presence=I.Query.presence.REQUIRED;break;default:var n="unrecognised presence operator'"+t.str+"'";throw new I.QueryParseError(n,t.start,t.end)}var r=e.peekLexeme();if(null==r){n="expecting term or field, found nothing";throw new I.QueryParseError(n,t.start,t.end)}switch(r.type){case I.QueryLexer.FIELD:return I.QueryParser.parseField;case I.QueryLexer.TERM:return I.QueryParser.parseTerm;default:n="expecting term or field, found '"+r.type+"'";throw new I.QueryParseError(n,r.start,r.end)}}},I.QueryParser.parseField=function(e){var t=e.consumeLexeme();if(null!=t){if(-1==e.query.allFields.indexOf(t.str)){var n=e.query.allFields.map((function(e){return"'"+e+"'"})).join(", "),r="unrecognised field '"+t.str+"', possible fields: "+n;throw new I.QueryParseError(r,t.start,t.end)}e.currentClause.fields=[t.str];var a=e.peekLexeme();if(null==a){r="expecting term, found nothing";throw new I.QueryParseError(r,t.start,t.end)}if(a.type===I.QueryLexer.TERM)return I.QueryParser.parseTerm;r="expecting term, found '"+a.type+"'";throw new I.QueryParseError(r,a.start,a.end)}},I.QueryParser.parseTerm=function(e){var t=e.consumeLexeme();if(null!=t){e.currentClause.term=t.str.toLowerCase(),-1!=t.str.indexOf("*")&&(e.currentClause.usePipeline=!1);var n=e.peekLexeme();if(null!=n)switch(n.type){case I.QueryLexer.TERM:return e.nextClause(),I.QueryParser.parseTerm;case I.QueryLexer.FIELD:return e.nextClause(),I.QueryParser.parseField;case I.QueryLexer.EDIT_DISTANCE:return I.QueryParser.parseEditDistance;case I.QueryLexer.BOOST:return I.QueryParser.parseBoost;case I.QueryLexer.PRESENCE:return e.nextClause(),I.QueryParser.parsePresence;default:var r="Unexpected lexeme type '"+n.type+"'";throw new I.QueryParseError(r,n.start,n.end)}else e.nextClause()}},I.QueryParser.parseEditDistance=function(e){var t=e.consumeLexeme();if(null!=t){var n=parseInt(t.str,10);if(isNaN(n)){var r="edit distance must be numeric";throw new I.QueryParseError(r,t.start,t.end)}e.currentClause.editDistance=n;var a=e.peekLexeme();if(null!=a)switch(a.type){case I.QueryLexer.TERM:return e.nextClause(),I.QueryParser.parseTerm;case I.QueryLexer.FIELD:return e.nextClause(),I.QueryParser.parseField;case I.QueryLexer.EDIT_DISTANCE:return I.QueryParser.parseEditDistance;case I.QueryLexer.BOOST:return I.QueryParser.parseBoost;case I.QueryLexer.PRESENCE:return e.nextClause(),I.QueryParser.parsePresence;default:r="Unexpected lexeme type '"+a.type+"'";throw new I.QueryParseError(r,a.start,a.end)}else e.nextClause()}},I.QueryParser.parseBoost=function(e){var t=e.consumeLexeme();if(null!=t){var n=parseInt(t.str,10);if(isNaN(n)){var r="boost must be numeric";throw new I.QueryParseError(r,t.start,t.end)}e.currentClause.boost=n;var a=e.peekLexeme();if(null!=a)switch(a.type){case I.QueryLexer.TERM:return e.nextClause(),I.QueryParser.parseTerm;case I.QueryLexer.FIELD:return e.nextClause(),I.QueryParser.parseField;case I.QueryLexer.EDIT_DISTANCE:return I.QueryParser.parseEditDistance;case I.QueryLexer.BOOST:return I.QueryParser.parseBoost;case I.QueryLexer.PRESENCE:return e.nextClause(),I.QueryParser.parsePresence;default:r="Unexpected lexeme type '"+a.type+"'";throw new I.QueryParseError(r,a.start,a.end)}else e.nextClause()}},void 0===(a="function"==typeof(r=function(){return I})?r.call(t,n,t,e):r)||(e.exports=a)}()},119:(e,t,n)=>{"use strict";n.r(t)},1043:(e,t,n)=>{"use strict";n.r(t)},5947:function(e,t,n){var r,a;r=function(){var e,t,n={version:"0.2.0"},r=n.settings={minimum:.08,easing:"ease",positionUsing:"",speed:200,trickle:!0,trickleRate:.02,trickleSpeed:800,showSpinner:!0,barSelector:'[role="bar"]',spinnerSelector:'[role="spinner"]',parent:"body",template:'
'};function a(e,t,n){return en?n:e}function o(e){return 100*(-1+e)}function i(e,t,n){var a;return(a="translate3d"===r.positionUsing?{transform:"translate3d("+o(e)+"%,0,0)"}:"translate"===r.positionUsing?{transform:"translate("+o(e)+"%,0)"}:{"margin-left":o(e)+"%"}).transition="all "+t+"ms "+n,a}n.configure=function(e){var t,n;for(t in e)void 0!==(n=e[t])&&e.hasOwnProperty(t)&&(r[t]=n);return this},n.status=null,n.set=function(e){var t=n.isStarted();e=a(e,r.minimum,1),n.status=1===e?null:e;var o=n.render(!t),c=o.querySelector(r.barSelector),u=r.speed,d=r.easing;return o.offsetWidth,s((function(t){""===r.positionUsing&&(r.positionUsing=n.getPositioningCSS()),l(c,i(e,u,d)),1===e?(l(o,{transition:"none",opacity:1}),o.offsetWidth,setTimeout((function(){l(o,{transition:"all "+u+"ms linear",opacity:0}),setTimeout((function(){n.remove(),t()}),u)}),u)):setTimeout(t,u)})),this},n.isStarted=function(){return"number"==typeof n.status},n.start=function(){n.status||n.set(0);var e=function(){setTimeout((function(){n.status&&(n.trickle(),e())}),r.trickleSpeed)};return r.trickle&&e(),this},n.done=function(e){return e||n.status?n.inc(.3+.5*Math.random()).set(1):this},n.inc=function(e){var t=n.status;return t?("number"!=typeof e&&(e=(1-t)*a(Math.random()*t,.1,.95)),t=a(t+e,0,.994),n.set(t)):n.start()},n.trickle=function(){return n.inc(Math.random()*r.trickleRate)},e=0,t=0,n.promise=function(r){return r&&"resolved"!==r.state()?(0===t&&n.start(),e++,t++,r.always((function(){0==--t?(e=0,n.done()):n.set((e-t)/e)})),this):this},n.render=function(e){if(n.isRendered())return document.getElementById("nprogress");u(document.documentElement,"nprogress-busy");var t=document.createElement("div");t.id="nprogress",t.innerHTML=r.template;var a,i=t.querySelector(r.barSelector),s=e?"-100":o(n.status||0),c=document.querySelector(r.parent);return l(i,{transition:"all 0 linear",transform:"translate3d("+s+"%,0,0)"}),r.showSpinner||(a=t.querySelector(r.spinnerSelector))&&f(a),c!=document.body&&u(c,"nprogress-custom-parent"),c.appendChild(t),t},n.remove=function(){d(document.documentElement,"nprogress-busy"),d(document.querySelector(r.parent),"nprogress-custom-parent");var e=document.getElementById("nprogress");e&&f(e)},n.isRendered=function(){return!!document.getElementById("nprogress")},n.getPositioningCSS=function(){var e=document.body.style,t="WebkitTransform"in e?"Webkit":"MozTransform"in e?"Moz":"msTransform"in e?"ms":"OTransform"in e?"O":"";return t+"Perspective"in e?"translate3d":t+"Transform"in e?"translate":"margin"};var s=function(){var e=[];function t(){var n=e.shift();n&&n(t)}return function(n){e.push(n),1==e.length&&t()}}(),l=function(){var e=["Webkit","O","Moz","ms"],t={};function n(e){return e.replace(/^-ms-/,"ms-").replace(/-([\da-z])/gi,(function(e,t){return t.toUpperCase()}))}function r(t){var n=document.body.style;if(t in n)return t;for(var r,a=e.length,o=t.charAt(0).toUpperCase()+t.slice(1);a--;)if((r=e[a]+o)in n)return r;return t}function a(e){return e=n(e),t[e]||(t[e]=r(e))}function o(e,t,n){t=a(t),e.style[t]=n}return function(e,t){var n,r,a=arguments;if(2==a.length)for(n in t)void 0!==(r=t[n])&&t.hasOwnProperty(n)&&o(e,n,r);else o(e,a[1],a[2])}}();function c(e,t){return("string"==typeof e?e:p(e)).indexOf(" "+t+" ")>=0}function u(e,t){var n=p(e),r=n+t;c(n,t)||(e.className=r.substring(1))}function d(e,t){var n,r=p(e);c(e,t)&&(n=r.replace(" "+t+" "," "),e.className=n.substring(1,n.length-1))}function p(e){return(" "+(e.className||"")+" ").replace(/\s+/gi," ")}function f(e){e&&e.parentNode&&e.parentNode.removeChild(e)}return n},void 0===(a="function"==typeof r?r.call(t,n,t,e):r)||(e.exports=a)},7022:()=>{!function(e){var t="\\b(?:BASH|BASHOPTS|BASH_ALIASES|BASH_ARGC|BASH_ARGV|BASH_CMDS|BASH_COMPLETION_COMPAT_DIR|BASH_LINENO|BASH_REMATCH|BASH_SOURCE|BASH_VERSINFO|BASH_VERSION|COLORTERM|COLUMNS|COMP_WORDBREAKS|DBUS_SESSION_BUS_ADDRESS|DEFAULTS_PATH|DESKTOP_SESSION|DIRSTACK|DISPLAY|EUID|GDMSESSION|GDM_LANG|GNOME_KEYRING_CONTROL|GNOME_KEYRING_PID|GPG_AGENT_INFO|GROUPS|HISTCONTROL|HISTFILE|HISTFILESIZE|HISTSIZE|HOME|HOSTNAME|HOSTTYPE|IFS|INSTANCE|JOB|LANG|LANGUAGE|LC_ADDRESS|LC_ALL|LC_IDENTIFICATION|LC_MEASUREMENT|LC_MONETARY|LC_NAME|LC_NUMERIC|LC_PAPER|LC_TELEPHONE|LC_TIME|LESSCLOSE|LESSOPEN|LINES|LOGNAME|LS_COLORS|MACHTYPE|MAILCHECK|MANDATORY_PATH|NO_AT_BRIDGE|OLDPWD|OPTERR|OPTIND|ORBIT_SOCKETDIR|OSTYPE|PAPERSIZE|PATH|PIPESTATUS|PPID|PS1|PS2|PS3|PS4|PWD|RANDOM|REPLY|SECONDS|SELINUX_INIT|SESSION|SESSIONTYPE|SESSION_MANAGER|SHELL|SHELLOPTS|SHLVL|SSH_AUTH_SOCK|TERM|UID|UPSTART_EVENTS|UPSTART_INSTANCE|UPSTART_JOB|UPSTART_SESSION|USER|WINDOWID|XAUTHORITY|XDG_CONFIG_DIRS|XDG_CURRENT_DESKTOP|XDG_DATA_DIRS|XDG_GREETER_DATA_DIR|XDG_MENU_PREFIX|XDG_RUNTIME_DIR|XDG_SEAT|XDG_SEAT_PATH|XDG_SESSION_DESKTOP|XDG_SESSION_ID|XDG_SESSION_PATH|XDG_SESSION_TYPE|XDG_VTNR|XMODIFIERS)\\b",n={pattern:/(^(["']?)\w+\2)[ \t]+\S.*/,lookbehind:!0,alias:"punctuation",inside:null},r={bash:n,environment:{pattern:RegExp("\\$"+t),alias:"constant"},variable:[{pattern:/\$?\(\([\s\S]+?\)\)/,greedy:!0,inside:{variable:[{pattern:/(^\$\(\([\s\S]+)\)\)/,lookbehind:!0},/^\$\(\(/],number:/\b0x[\dA-Fa-f]+\b|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:[Ee]-?\d+)?/,operator:/--|\+\+|\*\*=?|<<=?|>>=?|&&|\|\||[=!+\-*/%<>^&|]=?|[?~:]/,punctuation:/\(\(?|\)\)?|,|;/}},{pattern:/\$\((?:\([^)]+\)|[^()])+\)|`[^`]+`/,greedy:!0,inside:{variable:/^\$\(|^`|\)$|`$/}},{pattern:/\$\{[^}]+\}/,greedy:!0,inside:{operator:/:[-=?+]?|[!\/]|##?|%%?|\^\^?|,,?/,punctuation:/[\[\]]/,environment:{pattern:RegExp("(\\{)"+t),lookbehind:!0,alias:"constant"}}},/\$(?:\w+|[#?*!@$])/],entity:/\\(?:[abceEfnrtv\\"]|O?[0-7]{1,3}|U[0-9a-fA-F]{8}|u[0-9a-fA-F]{4}|x[0-9a-fA-F]{1,2})/};e.languages.bash={shebang:{pattern:/^#!\s*\/.*/,alias:"important"},comment:{pattern:/(^|[^"{\\$])#.*/,lookbehind:!0},"function-name":[{pattern:/(\bfunction\s+)[\w-]+(?=(?:\s*\(?:\s*\))?\s*\{)/,lookbehind:!0,alias:"function"},{pattern:/\b[\w-]+(?=\s*\(\s*\)\s*\{)/,alias:"function"}],"for-or-select":{pattern:/(\b(?:for|select)\s+)\w+(?=\s+in\s)/,alias:"variable",lookbehind:!0},"assign-left":{pattern:/(^|[\s;|&]|[<>]\()\w+(?:\.\w+)*(?=\+?=)/,inside:{environment:{pattern:RegExp("(^|[\\s;|&]|[<>]\\()"+t),lookbehind:!0,alias:"constant"}},alias:"variable",lookbehind:!0},parameter:{pattern:/(^|\s)-{1,2}(?:\w+:[+-]?)?\w+(?:\.\w+)*(?=[=\s]|$)/,alias:"variable",lookbehind:!0},string:[{pattern:/((?:^|[^<])<<-?\s*)(\w+)\s[\s\S]*?(?:\r?\n|\r)\2/,lookbehind:!0,greedy:!0,inside:r},{pattern:/((?:^|[^<])<<-?\s*)(["'])(\w+)\2\s[\s\S]*?(?:\r?\n|\r)\3/,lookbehind:!0,greedy:!0,inside:{bash:n}},{pattern:/(^|[^\\](?:\\\\)*)"(?:\\[\s\S]|\$\([^)]+\)|\$(?!\()|`[^`]+`|[^"\\`$])*"/,lookbehind:!0,greedy:!0,inside:r},{pattern:/(^|[^$\\])'[^']*'/,lookbehind:!0,greedy:!0},{pattern:/\$'(?:[^'\\]|\\[\s\S])*'/,greedy:!0,inside:{entity:r.entity}}],environment:{pattern:RegExp("\\$?"+t),alias:"constant"},variable:r.variable,function:{pattern:/(^|[\s;|&]|[<>]\()(?:add|apropos|apt|apt-cache|apt-get|aptitude|aspell|automysqlbackup|awk|basename|bash|bc|bconsole|bg|bzip2|cal|cargo|cat|cfdisk|chgrp|chkconfig|chmod|chown|chroot|cksum|clear|cmp|column|comm|composer|cp|cron|crontab|csplit|curl|cut|date|dc|dd|ddrescue|debootstrap|df|diff|diff3|dig|dir|dircolors|dirname|dirs|dmesg|docker|docker-compose|du|egrep|eject|env|ethtool|expand|expect|expr|fdformat|fdisk|fg|fgrep|file|find|fmt|fold|format|free|fsck|ftp|fuser|gawk|git|gparted|grep|groupadd|groupdel|groupmod|groups|grub-mkconfig|gzip|halt|head|hg|history|host|hostname|htop|iconv|id|ifconfig|ifdown|ifup|import|install|ip|java|jobs|join|kill|killall|less|link|ln|locate|logname|logrotate|look|lpc|lpr|lprint|lprintd|lprintq|lprm|ls|lsof|lynx|make|man|mc|mdadm|mkconfig|mkdir|mke2fs|mkfifo|mkfs|mkisofs|mknod|mkswap|mmv|more|most|mount|mtools|mtr|mutt|mv|nano|nc|netstat|nice|nl|node|nohup|notify-send|npm|nslookup|op|open|parted|passwd|paste|pathchk|ping|pkill|pnpm|podman|podman-compose|popd|pr|printcap|printenv|ps|pushd|pv|quota|quotacheck|quotactl|ram|rar|rcp|reboot|remsync|rename|renice|rev|rm|rmdir|rpm|rsync|scp|screen|sdiff|sed|sendmail|seq|service|sftp|sh|shellcheck|shuf|shutdown|sleep|slocate|sort|split|ssh|stat|strace|su|sudo|sum|suspend|swapon|sync|sysctl|tac|tail|tar|tee|time|timeout|top|touch|tr|traceroute|tsort|tty|umount|uname|unexpand|uniq|units|unrar|unshar|unzip|update-grub|uptime|useradd|userdel|usermod|users|uudecode|uuencode|v|vcpkg|vdir|vi|vim|virsh|vmstat|wait|watch|wc|wget|whereis|which|who|whoami|write|xargs|xdg-open|yarn|yes|zenity|zip|zsh|zypper)(?=$|[)\s;|&])/,lookbehind:!0},keyword:{pattern:/(^|[\s;|&]|[<>]\()(?:case|do|done|elif|else|esac|fi|for|function|if|in|select|then|until|while)(?=$|[)\s;|&])/,lookbehind:!0},builtin:{pattern:/(^|[\s;|&]|[<>]\()(?:\.|:|alias|bind|break|builtin|caller|cd|command|continue|declare|echo|enable|eval|exec|exit|export|getopts|hash|help|let|local|logout|mapfile|printf|pwd|read|readarray|readonly|return|set|shift|shopt|source|test|times|trap|type|typeset|ulimit|umask|unalias|unset)(?=$|[)\s;|&])/,lookbehind:!0,alias:"class-name"},boolean:{pattern:/(^|[\s;|&]|[<>]\()(?:false|true)(?=$|[)\s;|&])/,lookbehind:!0},"file-descriptor":{pattern:/\B&\d\b/,alias:"important"},operator:{pattern:/\d?<>|>\||\+=|=[=~]?|!=?|<<[<-]?|[&\d]?>>|\d[<>]&?|[<>][&=]?|&[>&]?|\|[&|]?/,inside:{"file-descriptor":{pattern:/^\d/,alias:"important"}}},punctuation:/\$?\(\(?|\)\)?|\.\.|[{}[\];\\]/,number:{pattern:/(^|\s)(?:[1-9]\d*|0)(?:[.,]\d+)?\b/,lookbehind:!0}},n.inside=e.languages.bash;for(var a=["comment","function-name","for-or-select","assign-left","parameter","string","environment","function","keyword","builtin","boolean","file-descriptor","operator","punctuation","number"],o=r.variable[1].inside,i=0;i{Prism.languages.json={property:{pattern:/(^|[^\\])"(?:\\.|[^\\"\r\n])*"(?=\s*:)/,lookbehind:!0,greedy:!0},string:{pattern:/(^|[^\\])"(?:\\.|[^\\"\r\n])*"(?!\s*:)/,lookbehind:!0,greedy:!0},comment:{pattern:/\/\/.*|\/\*[\s\S]*?(?:\*\/|$)/,greedy:!0},number:/-?\b\d+(?:\.\d+)?(?:e[+-]?\d+)?\b/i,punctuation:/[{}[\],]/,operator:/:/,boolean:/\b(?:false|true)\b/,null:{pattern:/\bnull\b/,alias:"keyword"}},Prism.languages.webmanifest=Prism.languages.json},9700:()=>{!function(e){function t(e,t){return"___"+e.toUpperCase()+t+"___"}Object.defineProperties(e.languages["markup-templating"]={},{buildPlaceholders:{value:function(n,r,a,o){if(n.language===r){var i=n.tokenStack=[];n.code=n.code.replace(a,(function(e){if("function"==typeof o&&!o(e))return e;for(var a,s=i.length;-1!==n.code.indexOf(a=t(r,s));)++s;return i[s]=e,a})),n.grammar=e.languages.markup}}},tokenizePlaceholders:{value:function(n,r){if(n.language===r&&n.tokenStack){n.grammar=e.languages[r];var a=0,o=Object.keys(n.tokenStack);!function i(s){for(var l=0;l=o.length);l++){var c=s[l];if("string"==typeof c||c.content&&"string"==typeof c.content){var u=o[a],d=n.tokenStack[u],p="string"==typeof c?c:c.content,f=t(r,u),h=p.indexOf(f);if(h>-1){++a;var m=p.substring(0,h),g=new e.Token(r,e.tokenize(d,n.grammar),"language-"+r,d),y=p.substring(h+f.length),b=[];m&&b.push.apply(b,i([m])),b.push(g),y&&b.push.apply(b,i([y])),"string"==typeof c?s.splice.apply(s,[l,1].concat(b)):c.content=b}}else c.content&&i(c.content)}return s}(n.tokens)}}}})}(Prism)},83:()=>{!function(e){var t=/[*&][^\s[\]{},]+/,n=/!(?:<[\w\-%#;/?:@&=+$,.!~*'()[\]]+>|(?:[a-zA-Z\d-]*!)?[\w\-%#;/?:@&=+$.~*'()]+)?/,r="(?:"+n.source+"(?:[ \t]+"+t.source+")?|"+t.source+"(?:[ \t]+"+n.source+")?)",a=/(?:[^\s\x00-\x08\x0e-\x1f!"#%&'*,\-:>?@[\]`{|}\x7f-\x84\x86-\x9f\ud800-\udfff\ufffe\uffff]|[?:-])(?:[ \t]*(?:(?![#:])|:))*/.source.replace(//g,(function(){return/[^\s\x00-\x08\x0e-\x1f,[\]{}\x7f-\x84\x86-\x9f\ud800-\udfff\ufffe\uffff]/.source})),o=/"(?:[^"\\\r\n]|\\.)*"|'(?:[^'\\\r\n]|\\.)*'/.source;function i(e,t){t=(t||"").replace(/m/g,"")+"m";var n=/([:\-,[{]\s*(?:\s<>[ \t]+)?)(?:<>)(?=[ \t]*(?:$|,|\]|\}|(?:[\r\n]\s*)?#))/.source.replace(/<>/g,(function(){return r})).replace(/<>/g,(function(){return e}));return RegExp(n,t)}e.languages.yaml={scalar:{pattern:RegExp(/([\-:]\s*(?:\s<>[ \t]+)?[|>])[ \t]*(?:((?:\r?\n|\r)[ \t]+)\S[^\r\n]*(?:\2[^\r\n]+)*)/.source.replace(/<>/g,(function(){return r}))),lookbehind:!0,alias:"string"},comment:/#.*/,key:{pattern:RegExp(/((?:^|[:\-,[{\r\n?])[ \t]*(?:<>[ \t]+)?)<>(?=\s*:\s)/.source.replace(/<>/g,(function(){return r})).replace(/<>/g,(function(){return"(?:"+a+"|"+o+")"}))),lookbehind:!0,greedy:!0,alias:"atrule"},directive:{pattern:/(^[ \t]*)%.+/m,lookbehind:!0,alias:"important"},datetime:{pattern:i(/\d{4}-\d\d?-\d\d?(?:[tT]|[ \t]+)\d\d?:\d{2}:\d{2}(?:\.\d*)?(?:[ \t]*(?:Z|[-+]\d\d?(?::\d{2})?))?|\d{4}-\d{2}-\d{2}|\d\d?:\d{2}(?::\d{2}(?:\.\d*)?)?/.source),lookbehind:!0,alias:"number"},boolean:{pattern:i(/false|true/.source,"i"),lookbehind:!0,alias:"important"},null:{pattern:i(/null|~/.source,"i"),lookbehind:!0,alias:"important"},string:{pattern:i(o),lookbehind:!0,greedy:!0},number:{pattern:i(/[+-]?(?:0x[\da-f]+|0o[0-7]+|(?:\d+(?:\.\d*)?|\.\d+)(?:e[+-]?\d+)?|\.inf|\.nan)/.source,"i"),lookbehind:!0},tag:n,important:t,punctuation:/---|[:[\]{}\-,|>?]|\.\.\./},e.languages.yml=e.languages.yaml}(Prism)},793:(e,t,n)=>{var r={"./prism-bash":7022,"./prism-json":2514,"./prism-yaml":83};function a(e){var t=o(e);return n(t)}function o(e){if(!n.o(r,e)){var t=new Error("Cannot find module '"+e+"'");throw t.code="MODULE_NOT_FOUND",t}return r[e]}a.keys=function(){return Object.keys(r)},a.resolve=o,e.exports=a,a.id=793},2694:(e,t,n)=>{"use strict";var r=n(6925);function a(){}function o(){}o.resetWarningCache=a,e.exports=function(){function e(e,t,n,a,o,i){if(i!==r){var s=new Error("Calling PropTypes validators directly is not supported by the `prop-types` package. Use PropTypes.checkPropTypes() to call them. Read more at http://fb.me/use-check-prop-types");throw s.name="Invariant Violation",s}}function t(){return e}e.isRequired=e;var n={array:e,bigint:e,bool:e,func:e,number:e,object:e,string:e,symbol:e,any:e,arrayOf:t,element:e,elementType:e,instanceOf:t,node:e,objectOf:t,oneOf:t,oneOfType:t,shape:t,exact:t,checkPropTypes:o,resetWarningCache:a};return n.PropTypes=n,n}},5556:(e,t,n)=>{e.exports=n(2694)()},6925:e=>{"use strict";e.exports="SECRET_DO_NOT_PASS_THIS_OR_YOU_WILL_BE_FIRED"},2551:(e,t,n)=>{"use strict";var r=n(6540),a=n(9982);function o(e){for(var t="https://reactjs.org/docs/error-decoder.html?invariant="+e,n=1;n