From 40f838db09fe26f24a444ae876fa2c6453a36d65 Mon Sep 17 00:00:00 2001 From: Liqun Li Date: Wed, 9 Oct 2024 16:03:49 +0800 Subject: [PATCH] fix doc mismatching (#422) Co-authored-by: Jack-Q --- auto_eval/ds1000_scripts/README.md | 4 +- taskweaver/llm/openai.py | 5 --- website/blog/experience.md | 28 +++++++++----- website/docs/configurations/overview.md | 15 +++++--- website/docs/llms/aoai.md | 50 +++++++++++++------------ 5 files changed, 57 insertions(+), 45 deletions(-) diff --git a/auto_eval/ds1000_scripts/README.md b/auto_eval/ds1000_scripts/README.md index 7c984d93..f59d97a2 100644 --- a/auto_eval/ds1000_scripts/README.md +++ b/auto_eval/ds1000_scripts/README.md @@ -15,8 +15,8 @@ This directory contains the scripts used to evaluate the performance of the [DS- - metadata.json: the metadata of the test case. - prompt.txt: the composed prompt of the test case. - reference_code.py: the ground truth code. -4. Copy the example files from `ds1000_scritps/planner_examples` to `project/planner_examples` directory; - and the example files from `ds1000_scritps/codeinterpreter_examples` to `project/codeinterpreter_examples` directory. +4. Copy the example files from `ds1000_scritps/planner_examples` to `project/examples/planner_examples` directory; + and the example files from `ds1000_scritps/codeinterpreter_examples` to `project/examples/code_generator_examples` directory. Disable (or discard) the original example files from the project directory. See the notes below for understanding why. 5. Once the test cases are generated, follow the instructions in `auto_eval/README.md` to evaluate the performance of the benchmark. diff --git a/taskweaver/llm/openai.py b/taskweaver/llm/openai.py index d4752219..0f386937 100644 --- a/taskweaver/llm/openai.py +++ b/taskweaver/llm/openai.py @@ -51,11 +51,6 @@ def _configure(self) -> None: # openai specific config self.api_version = self._get_str("api_version", "2024-06-01") - self.api_auth_type = self._get_enum( - "api_auth_type", - ["openai", "azure", "azure_ad"], - "openai", - ) is_azure_ad_login = self.api_type == "azure_ad" self.aad_auth_mode = self._get_enum( "aad_auth_mode", diff --git a/website/blog/experience.md b/website/blog/experience.md index 831f281a..5c36e2b5 100644 --- a/website/blog/experience.md +++ b/website/blog/experience.md @@ -98,15 +98,25 @@ def reply(self, memory: Memory, **kwargs: ...) -> Post: In a role that needs to set the experience subdirectory, we can get the experience subdirectory from the shared memory. ```python -exp_sub_paths = memory.get_shared_memory_entries( - entry_type="experience_sub_path", -) - -if exp_sub_paths: - exp_sub_path = exp_sub_paths[0].content -else: - exp_sub_path = "" -selected_experiences = self.role_load_experience(query=query, sub_path=exp_sub_path) +def reply( + self, + memory: Memory, + post_proxy: Optional[PostEventProxy] = None, + prompt_log_path: Optional[str] = None, + **kwargs: ..., + ) -> Post: + ... + rounds = memory.get_role_rounds( + role=self.alias, + include_failure_rounds=False, + ) + + # obtain the query from the last round + query = rounds[-1].post_list[-1].message + + # retrieve the experience based on the query + self.role_load_experience(query=query, memory=memory) + ... ``` :::tip diff --git a/website/docs/configurations/overview.md b/website/docs/configurations/overview.md index 6de552df..aab9d111 100644 --- a/website/docs/configurations/overview.md +++ b/website/docs/configurations/overview.md @@ -24,22 +24,27 @@ The following table lists the parameters in the configuration file: | `logging.log_file` | The name of the log file. | `taskweaver.log` | | `logging.log_folder` | The folder to store the log file. | `logs` | | `plugin.base_path` | The folder to store plugins. | `${AppBaseDir}/plugins` | -| `planner.example_base_path` | The folder to store planner examples. | `${AppBaseDir}/planner_examples` | +| `{RoleName}.use_example` | Whether to use the example for the role. | `true` | +| `{RoleName}.example_base_path` | The folder to store the examples for the role. | `${AppBaseDir}/examples/{RoleName}_examples` | +| `{RoleName}.dynamic_example_sub_path` | Whether to enable dynamic example loading based on sub-path. | `false` | +| `{RoleName}.use_experience` | Whether to use experience summarized from the previous chat history for the role. | `false` | +| `{RoleName}.experience_dir` | The folder to store the experience for the role. | `${AppBaseDir}/experience/` | +| `{RoleName}.dynamic_experience_sub_path` | Whether to enable dynamic experience loading based on sub-path. | `false` | | `planner.prompt_compression` | Whether to compress the chat history for planner. | `false` | -| `planner.use_experience` | Whether to use experience summarized from the previous chat history in planner. | `false` | -| `code_generator.example_base_path` | The folder to store code interpreter examples. | `${AppBaseDir}/codeinterpreter_examples` | | `code_generator.prompt_compression` | Whether to compress the chat history for code interpreter. | `false` | | `code_generator.enable_auto_plugin_selection` | Whether to enable auto plugin selection. | `false` | -| `code_generator.use_experience` | Whether to use experience summarized from the previous chat history in code generator. | `false` | | `code_generator.auto_plugin_selection_topk` | The number of auto selected plugins in each round. | `3` | | `session.max_internal_chat_round_num` | The maximum number of internal chat rounds between Planner and Code Interpreter. | `10` | | `session.roles` | The roles included for the conversation. | ["planner", "code_interpreter"] | | `round_compressor.rounds_to_compress` | The number of rounds to compress. | `2` | | `round_compressor.rounds_to_retain` | The number of rounds to retain. | `3` | -| `execution_service.kernel_mode` | The mode of the code executor, could be `local` or `container`. | `local` | +| `execution_service.kernel_mode` | The mode of the code executor, could be `local` or `container`. | `container` | :::tip $\{AppBaseDir\} is the project directory. + +$\{RoleName\} is the name of the role, such as `planner` or `code_generator`. In the current implementation, the `code_interpreter` role has all code generation functions +in a "sub-role" named `code_generator`. So, the configuration for the code generation part should be set to `code_generator`. ::: :::tip diff --git a/website/docs/llms/aoai.md b/website/docs/llms/aoai.md index a98f8107..79762cde 100644 --- a/website/docs/llms/aoai.md +++ b/website/docs/llms/aoai.md @@ -8,24 +8,25 @@ description: Using LLMs from OpenAI/AOAI 1. Create an account on [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service) and get your API key. 2. Create a new deployment of the model and get the deployment name. 3. Add the following to your `taskweaver_config.json` file: -```jsonc showLineNumbers -{ - "llm.api_base":"YOUR_AOAI_ENDPOINT", // in the format of https://.openai.azure.com" - "llm.api_key":"YOUR_API_KEY", - "llm.api_type":"azure", - "llm.auth_mode":"api-key", - "llm.model":"gpt-4-1106-preview", // this is known as deployment_name in Azure OpenAI - "llm.response_format": "json_object" -} -``` + ```jsonc showLineNumbers + { + "llm.api_base":"YOUR_AOAI_ENDPOINT", // in the format of https://.openai.azure.com" + "llm.api_key":"YOUR_API_KEY", + "llm.api_type":"azure", + "llm.model":"gpt-4-1106-preview", // this is known as deployment_name in Azure OpenAI + "llm.response_format": "json_object", + "llm.azure.api_version": "2024-06-01" + } + ``` -:::info -For model versions or after `1106`, `llm.response_format` can be set to `json_object`. -However, for the earlier models, which do not support JSON response explicitly, `llm.response_format` should be set to `null`. -::: + :::info + For model versions or after `1106`, `llm.response_format` can be set to `json_object`. + However, for the earlier models, which do not support JSON response explicitly, `llm.response_format` should be set to `null`. + ::: 4. Start TaskWeaver and chat with TaskWeaver. -You can refer to the [Quick Start](../quickstart.md) for more details. + + You can refer to the [Quick Start](../quickstart.md) for more details. ## Using Entra Authentication @@ -33,15 +34,16 @@ You can refer to the [Quick Start](../quickstart.md) for more details. [assign the proper Azure RBAC Role](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/role-based-access-control) to your account (or service principal). 2. Create a new deployment of the model and get the deployment name. 3. Add the following to your `taskweaver_config.json` file: - ```jsonc showLineNumbers - { - "llm.api_base":"YOUR_AOAI_ENDPOINT", // in the format of https://.openai.azure.com" - "llm.api_type":"azure_ad", - "llm.auth_mode":"default_azure_credential", - "llm.model":"gpt-4-1106-preview", // this is known as deployment_name in Azure OpenAI - "llm.response_format": "json_object" - } - ``` + ```jsonc showLineNumbers + { + "llm.api_base":"YOUR_AOAI_ENDPOINT", // in the format of https://.openai.azure.com" + "llm.api_type":"azure_ad", + "llm.model":"gpt-4-1106-preview", // this is known as deployment_name in Azure OpenAI + "llm.response_format": "json_object", + "llm.azure_ad.api_version": "2024-06-01", + "llm.azure_ad.aad_auth_mode": "default_azure_credential" + } + ``` 4. Install extra dependencies: ```bash pip install azure-identity