From 139a89723d34274a4dbbd483e84f5f6521354765 Mon Sep 17 00:00:00 2001 From: Omar Solano Date: Sun, 28 Jul 2024 15:28:50 -0400 Subject: [PATCH] add openai_cookbooks data --- scripts/main.py | 36 +++++++++++++++++++++++++----------- scripts/setup.py | 35 +++++++++++++++++++++-------------- 2 files changed, 46 insertions(+), 25 deletions(-) diff --git a/scripts/main.py b/scripts/main.py index 00e9d44..4864738 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -10,9 +10,10 @@ AVAILABLE_SOURCES, AVAILABLE_SOURCES_UI, CONCURRENCY_COUNT, - custom_retriever_llamaindex, + custom_retriever_llama_index, + custom_retriever_openai_cookbooks, custom_retriever_peft, - custom_retriever_tf, + custom_retriever_transformers, custom_retriever_trl, ) @@ -20,26 +21,31 @@ def update_query_engine_tools(selected_sources): tools = [] source_mapping = { - "HF Transformers": ( - custom_retriever_tf, + "Transformers Docs": ( + custom_retriever_transformers, "Transformers_information", """Useful for general questions asking about the artificial intelligence (AI) field. Employ this tool to fetch general information on topics such as language models theory (transformer architectures), tips on prompting, models, quantization, etc.""", ), - "PEFT": ( + "PEFT Docs": ( custom_retriever_peft, "PEFT_information", """Useful for questions asking about efficient LLM fine-tuning. Employ this tool to fetch information on topics such as LoRA, QLoRA, etc.""", ), - "TRL": ( + "TRL Docs": ( custom_retriever_trl, "TRL_information", """Useful for questions asking about fine-tuning LLMs with reinforcement learning (RLHF). Includes information about the Supervised Fine-tuning step (SFT), Reward Modeling step (RM), and the Proximal Policy Optimization (PPO) step.""", ), "LlamaIndex Docs": ( - custom_retriever_llamaindex, + custom_retriever_llama_index, "LlamaIndex_information", """Useful for questions asking about retrieval augmented generation (RAG) with LLMs and embedding models. It is the documentation of the LlamaIndex framework, includes info about fine-tuning embedding models, building chatbots, and agents with llms, using vector databases, embeddings, information retrieval with cosine similarity or bm25, etc.""", ), + "OpenAI Cookbooks": ( + custom_retriever_openai_cookbooks, + "openai_cookbooks_info", + """Useful for questions asking about accomplishing common tasks with theĀ OpenAI API. Returns example code and guides stored in Jupyter notebooks, including info about ChatGPT GPT actions, OpenAI Assistants API, and How to fine-tune OpenAI's GPT-4o and GPT-4o-mini models with the OpenAI API.""", + ), } for source in selected_sources: @@ -148,9 +154,11 @@ def format_sources(completion) -> str: ) all_documents.append(document) - documents = "\n".join(all_documents) - - return documents_answer_template.format(documents=documents) + if len(all_documents) == 0: + return "" + else: + documents = "\n".join(all_documents) + return documents_answer_template.format(documents=documents) def save_completion(completion, history): @@ -165,7 +173,13 @@ def vote(data: gr.LikeData): sources = gr.CheckboxGroup( AVAILABLE_SOURCES_UI, label="Sources", - value=["HF Transformers", "PEFT", "TRL", "LlamaIndex Docs"], + value=[ + "Transformers Docs", + "PEFT Docs", + "TRL Docs", + "LlamaIndex Docs", + "OpenAI Cookbooks", + ], interactive=True, ) model = gr.Dropdown( diff --git a/scripts/setup.py b/scripts/setup.py index 689da10..fe7bbf0 100644 --- a/scripts/setup.py +++ b/scripts/setup.py @@ -63,15 +63,19 @@ def setup_database(db_collection, dict_file_name): # Setup retrievers -custom_retriever_tf = setup_database( +custom_retriever_transformers = setup_database( "chroma-db-transformers", - "document_dict_tf.pkl", + "document_dict_transformers.pkl", ) custom_retriever_peft = setup_database("chroma-db-peft", "document_dict_peft.pkl") custom_retriever_trl = setup_database("chroma-db-trl", "document_dict_trl.pkl") -custom_retriever_llamaindex = setup_database( - "chroma-db-llama-index", - "document_dict_llamaindex.pkl", +custom_retriever_llama_index = setup_database( + "chroma-db-llama_index", + "document_dict_llama_index.pkl", +) +custom_retriever_openai_cookbooks = setup_database( + "chroma-db-openai_cookbooks", + "document_dict_openai_cookbooks.pkl", ) # Constants @@ -79,19 +83,21 @@ def setup_database(db_collection, dict_file_name): MONGODB_URI = os.getenv("MONGODB_URI") AVAILABLE_SOURCES_UI = [ - "HF Transformers", - "PEFT", - "TRL", + "Transformers Docs", + "PEFT Docs", + "TRL Docs", "LlamaIndex Docs", + "OpenAI Cookbooks", # "Towards AI Blog", # "RAG Course", ] AVAILABLE_SOURCES = [ - "HF_Transformers", - "PEFT", - "TRL", - "LlamaIndex", + "transformers", + "peft", + "trl", + "llama_index", + "openai_cookbooks", # "towards_ai_blog", # "rag_course", ] @@ -103,10 +109,11 @@ def setup_database(db_collection, dict_file_name): # ) __all__ = [ - "custom_retriever_tf", + "custom_retriever_transformers", "custom_retriever_peft", "custom_retriever_trl", - "custom_retriever_llamaindex", + "custom_retriever_llama_index", + "custom_retriever_openai_cookbooks", "CONCURRENCY_COUNT", "MONGODB_URI", "AVAILABLE_SOURCES_UI",