diff --git a/README.md b/README.md index c92156c..f748a8c 100644 --- a/README.md +++ b/README.md @@ -40,9 +40,9 @@ poetry install This will create a virtual environment and install all the necessary dependencies in that environment. -2. Set your [OpenRouter API key](https://openrouter.ai/docs#api-keys) and install the python requirements: +2. Set your [OpenAI API key](https://platform.openai.com/account/api-keys) and install the python requirements: -`export OPENROUTER_API_KEY=` +`export OPENAI_API_KEY=` `pip install -r requirements.txt` diff --git a/gpt_migrate/ai.py b/gpt_migrate/ai.py index bf24fdd..6bbcf5d 100644 --- a/gpt_migrate/ai.py +++ b/gpt_migrate/ai.py @@ -1,41 +1,32 @@ +from langchain.chat_models import ChatOpenAI +from config import OPENAI_API_KEY import os import openai from utils import parse_code_string from litellm import completion -openai.api_base = "https://openrouter.ai/api/v1" -openai.api_key = os.getenv("OPENROUTER_API_KEY") +openai.api_key = os.getenv("OPENAI_API_KEY") class AI: - def __init__(self, model="gpt-4-32k", model_provider="openai", modelrouter="openrouter", temperature=0.1, max_tokens=10000): + def __init__(self, model='openrouter/openai/gpt-4-32k', temperature=0.1, max_tokens=10000): self.temperature = temperature self.max_tokens = max_tokens - self.model_provider = model_provider self.model_name = model - self.modelrouter = modelrouter + try: + _ = ChatOpenAI(model_name=model) # check to see if model is available to user + except Exception as e: + print(e) + self.model_name = "gpt-3.5-turbo" def write_code(self, prompt): message=[{"role": "user", "content": str(prompt)}] - if self.modelrouter == "openrouter": - response = openai.ChatCompletion.create( - model="{}/{}".format(self.model_provider,self.model_name), # Optional (user controls the default) - messages=message, - stream=False, - max_tokens=self.max_tokens, - temperature=self.temperature, - headers={ - "HTTP-Referer": "https://gpt-migrate.com", - "X-Title": "GPT-Migrate", - }, - ) - else: - response = completion( - messages=message, - stream=False, - model=self.model_name, - max_tokens=self.max_tokens, - temperature=self.temperature - ) + response = completion( + messages=message, + stream=False, + model=self.model_name, + max_tokens=self.max_tokens, + temperature=self.temperature + ) if response["choices"][0]["message"]["content"].startswith("INSTRUCTIONS:"): return ("INSTRUCTIONS:","",response["choices"][0]["message"]["content"][14:]) else: @@ -44,32 +35,17 @@ def write_code(self, prompt): def run(self, prompt): message=[{"role": "user", "content": str(prompt)}] - if self.modelrouter == "openrouter": - response = openai.ChatCompletion.create( - model="{}/{}".format(self.model_provider,self.model_name), # Optional (user controls the default) - messages=message, - stream=False, - max_tokens=self.max_tokens, - temperature=self.temperature, - headers={ - "HTTP-Referer": "https://gpt-migrate.com", - "X-Title": "GPT-Migrate", - }, - ) - return response["choices"][0]["message"]["content"] - else: - response = completion( - messages=message, - stream=True, - model=self.model_name, - max_tokens=self.max_tokens, - temperature=self.temperature - ) - chat = "" - for chunk in response: - delta = chunk["choices"][0]["delta"] - msg = delta.get("content", "") - chat += msg - return chat - + response = completion( + messages=message, + stream=True, + model=self.model_name, + max_tokens=self.max_tokens, + temperature=self.temperature + ) + chat = "" + for chunk in response: + delta = chunk["choices"][0]["delta"] + msg = delta.get("content", "") + chat += msg + return chat \ No newline at end of file diff --git a/gpt_migrate/main.py b/gpt_migrate/main.py index bc79e63..a3a7221 100644 --- a/gpt_migrate/main.py +++ b/gpt_migrate/main.py @@ -31,9 +31,7 @@ def __init__(self, sourcedir, targetdir, sourcelang, targetlang, sourceentry, so @app.command() def main( - model: str = typer.Option("gpt-4-32k", help="Large Language Model to be used."), - model_provider: str = typer.Option("openai", help="Model provider to be used."), - modelrouter: str = typer.Option("openrouter", help="Model router to be used. Options are 'openrouter' or 'litellm'."), + model: str = typer.Option('openrouter/openai/gpt-4-32k', help="Large Language Model to be used. Default is 'openrouter/openai/gpt-4-32k'. To use OpenAI directly with your API key, use 'gpt-4-32k'."), temperature: float = typer.Option(0, help="Temperature setting for the AI model."), sourcedir: str = typer.Option("../benchmarks/flask-nodejs/source", help="Source directory containing the code to be migrated."), sourcelang: str = typer.Option(None, help="Source language or framework of the code to be migrated."), @@ -50,9 +48,7 @@ def main( ai = AI( model=model, - model_provider=model_provider, temperature=temperature, - modelrouter=modelrouter ) sourcedir = os.path.abspath(sourcedir) diff --git a/gpt_migrate/memory/external_dependencies b/gpt_migrate/memory/external_dependencies index d6d09bc..27ee83a 100644 --- a/gpt_migrate/memory/external_dependencies +++ b/gpt_migrate/memory/external_dependencies @@ -1,3 +1,6 @@ -express +rocket +serde +serde_json bcrypt -node-json-db +rusqlite +serde_json diff --git a/gpt_migrate/memory/gpt_migrate/db.js_sigs.json b/gpt_migrate/memory/gpt_migrate/db.js_sigs.json deleted file mode 100644 index d0f85d9..0000000 --- a/gpt_migrate/memory/gpt_migrate/db.js_sigs.json +++ /dev/null @@ -1 +0,0 @@ -[{"signature": "readItems() -> object", "description": "Reads the items from a JSON file and returns them as an object."}, {"signature": "writeItems(groceryItems: object)", "description": "Writes the given grocery items object to a JSON file."}] \ No newline at end of file