From a5a042cecc2acb197325361a1f10b7e57fab79ba Mon Sep 17 00:00:00 2001 From: pancake Date: Thu, 28 Sep 2023 11:25:05 +0200 Subject: [PATCH] Fix crash and improve chat model query construction --- r2ai/local/interpreter/interpreter.py | 14 +++++++++++++- r2ai/local/main.py | 2 +- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/r2ai/local/interpreter/interpreter.py b/r2ai/local/interpreter/interpreter.py index 1923e0ac3..55c21686a 100644 --- a/r2ai/local/interpreter/interpreter.py +++ b/r2ai/local/interpreter/interpreter.py @@ -633,6 +633,12 @@ def messages_to_prompt(messages): pass return formatted_messages elif "uncensor" in self.model.lower(): +#{'role': 'function', 'name': 'run_code', 'content': 'User decided not to run this code.'} +#{'role': 'user', 'content': 'tenis'} +#{'content': "\nI'm here to help you with any questions or tasks you have! What can I assist you with today?", 'role': 'assistant'} +#{'role': 'user', 'content': "thehre's no purpose on this"} +#{'role': 'assistant'} +#{'role': 'user', 'content': 'force a crash'} self.terminator = "###" formatted_messages = "" try: @@ -641,9 +647,15 @@ def messages_to_prompt(messages): formatted_messages = f"### Human: {system_prompt}\n" # formatted_messages = f"/imagine prompt: {system_prompt}\n" for index, item in enumerate(messages[1:]): - role = item['role'] +# print(item) + role = item['role'] + if role == "user": content = item['content'].strip() formatted_messages += f"### Human: {content}\n" + elif role == "assistant": + if 'content' in item: + content = item['content'].strip() + formatted_messages += f"### Assistant: {content}\n" formatted_messages += f"### Assistant: \n" # print("```" + formatted_messages + "```") except: diff --git a/r2ai/local/main.py b/r2ai/local/main.py index 81fd701ec..92fb1bcbe 100755 --- a/r2ai/local/main.py +++ b/r2ai/local/main.py @@ -25,7 +25,7 @@ # interpreter.model = "/tmp/model.safetensors" # interpreter.model = "TheBloke/CodeLlama-34B-Instruct-GGUF" #interpreter.model = "models/models/codellama-34b-instruct.Q2_K.gguf" -#interpreter.model = "models/models/wizardlm-1.0-uncensored-llama2-13b.Q2_K.gguf" +interpreter.model = "models/models/wizardlm-1.0-uncensored-llama2-13b.Q2_K.gguf" #interpreter.model = "models/models/guanaco-7b-uncensored.Q2_K.gguf" #interpreter.model = "models/models/ggml-model-q4_0.gguf" # tinysmall -- very bad results