Skip to content

Commit

Permalink
Fix crash and improve chat model query construction
Browse files Browse the repository at this point in the history
  • Loading branch information
radare committed Sep 28, 2023
1 parent d91308a commit a5a042c
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 2 deletions.
14 changes: 13 additions & 1 deletion r2ai/local/interpreter/interpreter.py
Original file line number Diff line number Diff line change
Expand Up @@ -633,6 +633,12 @@ def messages_to_prompt(messages):
pass
return formatted_messages
elif "uncensor" in self.model.lower():
#{'role': 'function', 'name': 'run_code', 'content': 'User decided not to run this code.'}
#{'role': 'user', 'content': 'tenis'}
#{'content': "\nI'm here to help you with any questions or tasks you have! What can I assist you with today?", 'role': 'assistant'}
#{'role': 'user', 'content': "thehre's no purpose on this"}
#{'role': 'assistant'}
#{'role': 'user', 'content': 'force a crash'}
self.terminator = "###"
formatted_messages = ""
try:
Expand All @@ -641,9 +647,15 @@ def messages_to_prompt(messages):
formatted_messages = f"### Human: {system_prompt}\n"
# formatted_messages = f"/imagine prompt: {system_prompt}\n"
for index, item in enumerate(messages[1:]):
role = item['role']
# print(item)
role = item['role']
if role == "user":
content = item['content'].strip()
formatted_messages += f"### Human: {content}\n"
elif role == "assistant":
if 'content' in item:
content = item['content'].strip()
formatted_messages += f"### Assistant: {content}\n"
formatted_messages += f"### Assistant: \n"
# print("```" + formatted_messages + "```")
except:
Expand Down
2 changes: 1 addition & 1 deletion r2ai/local/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
# interpreter.model = "/tmp/model.safetensors"
# interpreter.model = "TheBloke/CodeLlama-34B-Instruct-GGUF"
#interpreter.model = "models/models/codellama-34b-instruct.Q2_K.gguf"
#interpreter.model = "models/models/wizardlm-1.0-uncensored-llama2-13b.Q2_K.gguf"
interpreter.model = "models/models/wizardlm-1.0-uncensored-llama2-13b.Q2_K.gguf"
#interpreter.model = "models/models/guanaco-7b-uncensored.Q2_K.gguf"
#interpreter.model = "models/models/ggml-model-q4_0.gguf" # tinysmall -- very bad results

Expand Down

0 comments on commit a5a042c

Please sign in to comment.