From 8193a22e6098f2382e15799fd0f3aafdd14e4d52 Mon Sep 17 00:00:00 2001 From: sumansaurabh Date: Wed, 13 Nov 2024 01:56:36 +0000 Subject: [PATCH] feat: add requirements file and update API endpoint with error handling --- .gitignore | 164 +++++++++++++++++++++++++++++++++++++++++++++++ apiCall.py | 25 +++++--- dualEncoder.py | 129 +++++++++++++++++++------------------ main.py | 30 +++++++-- requirements.txt | 4 ++ 5 files changed, 275 insertions(+), 77 deletions(-) create mode 100644 .gitignore create mode 100644 requirements.txt diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..3cd2b8b --- /dev/null +++ b/.gitignore @@ -0,0 +1,164 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +**/*.pyc +# C extensions +*.so +__pycache__/ + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ \ No newline at end of file diff --git a/apiCall.py b/apiCall.py index 756a832..c884b87 100644 --- a/apiCall.py +++ b/apiCall.py @@ -10,7 +10,7 @@ def generate_code_tree(file_path: str, content: str, modified_lines: List[int]) -> Dict[str, CodeTree]: """Generate a code tree for a file with modified lines.""" - url = "http://localhost:8000/api/v1/hook/file/generate/codetree" + url = "https://production-gateway.snorkell.ai/api/v1/hook/file/generate/codetree" data = { "file_path": file_path, "content": content, @@ -20,14 +20,19 @@ def generate_code_tree(file_path: str, content: str, modified_lines: List[int]) headers = { 'accept': 'application/json', 'Content-Type': 'application/json', - 'api-key': 'skl_ai_D7ZgnFMcAdKj7TcT' + 'api-key': 'skl_ai_gQs0G76hUSiCK8Uk' } - response = requests.post( - url, - headers=headers, - json=data - ) - - response.raise_for_status() - return response.json() + try: + response = requests.post( + url, + headers=headers, + json=data + ) + + response.raise_for_status() + return response.json() + except Exception as e: + print(e) + print(f"Error in generating code tree for file - {file_path}") + return {} diff --git a/dualEncoder.py b/dualEncoder.py index c0437c5..020c8eb 100644 --- a/dualEncoder.py +++ b/dualEncoder.py @@ -41,6 +41,7 @@ def __init__( # Auto-detect device if none specified if device is None: device = "cuda" if torch.cuda.is_available() else "cpu" + print(f"Using device: {device}") self.code_encoder = SentenceTransformer(code_model, device=device) self.doc_encoder = SentenceTransformer(doc_model, device=device) @@ -48,41 +49,6 @@ def __init__( self.doc_weight = 1 - code_weight self.functions: List[CodeFunction] = [] - def parse_python_file(self, file_path: str) -> List[CodeFunction]: - """Parse a Python file and extract functions with their documentation.""" - with open(file_path, 'r') as file: - content = file.read() - - tree = ast.parse(content) - functions = [] - - for node in ast.walk(tree): - if isinstance(node, ast.FunctionDef): - # Extract function code - code_lines = content.split('\n')[node.lineno-1:node.end_lineno] - code = '\n'.join(code_lines) - - # Extract docstring and comments - docstring = ast.get_docstring(node) or '' - - # Extract inline comments - comments = [] - for child in ast.walk(node): - if hasattr(child, 'lineno'): - line = code_lines[child.lineno - node.lineno] - if '#' in line: - comments.append(line[line.index('#')+1:].strip()) - - all_documentation = docstring + '\n' + '\n'.join(comments) - - functions.append(CodeFunction( - name=node.name, - code=code, - documentation=all_documentation, - file_path=file_path - )) - - return functions def load_documentation(self, docs_path: str) -> Dict[str, str]: """Load external documentation from a directory.""" @@ -96,14 +62,6 @@ def load_documentation(self, docs_path: str) -> Dict[str, str]: return docs - def preprocess_code(self, code: str) -> str: - """Preprocess code for better embedding.""" - # Remove comments - - print(code) - tree = ast.parse(code) - return ast.unparse(tree) - def encode_batch( self, texts: List[str], @@ -118,17 +76,29 @@ def encode_batch( convert_to_numpy=True ) + def index_repository(self, repo_path: str, docs_path: str, force_update: bool = False): """Index all Python files using both encoders.""" - python_files = glob.glob(os.path.join(repo_path, "**/*.py"), recursive=True) + + # external_docs = self.load_documentation(docs_path) index_path = f"{repo_path}/function_index.json" if not force_update: # Check if index already exists + print("checking if index exists") if os.path.exists(index_path): + print("index exists") self.load_index(index_path) return + + python_files = glob.glob(os.path.join(repo_path, "**/*"), recursive=True) + print("tot files - ", len(python_files)) + + # filter ["py","ts","cs","c","js", "kt"] + python_files = [file for file in python_files if file.split(".")[-1] in ["py","ts","cs","c","js", "kt"]] + + print("selected files - ", len(python_files)) # Collect all texts to encode codes_to_encode = [] @@ -136,8 +106,15 @@ def index_repository(self, repo_path: str, docs_path: str, force_update: bool = temp_functions: List[CodeFunction] = [] count = 0 + print(1) for file_path in python_files: + extension = file_path.split(".")[-1] + if extension not in ["py","ts","cs","c","js", "kt"]: + print(f"skipping extension - {extension}") + continue + + # count += 1 # if count > 2: # break @@ -151,30 +128,56 @@ def index_repository(self, repo_path: str, docs_path: str, force_update: bool = code_str = file.read() code_tree = generate_code_tree(file_path, code_str, []) - code_tree: CodeTree = CodeTree(**code_tree) - pprint(code_tree) + try: + code_tree: CodeTree = CodeTree(**code_tree) + except Exception as e: + print("Error in parsing code tree") + continue + # pprint(code_tree) + + if code_tree.methods is not None: + for func, func_dict in code_tree.methods.items(): + # Prepare code for embedding + processed_code = func_dict.content + codes_to_encode.append(processed_code) + func_name = func.split("~")[0] + + # Combine all documentation + combined_doc = f"MethodName: {func_name} \n{func_dict.docstring}" + + docs_to_encode.append(combined_doc) + temp_functions.append(CodeFunction( + name=func_name, + code=func_dict.content, + documentation=func_dict.docstring, + file_path=file_path + )) + if code_tree.classes is not None: + for class_details, class_dict in code_tree.classes.items(): + for func, func_dict in class_dict.methods.items(): + # Prepare code for embedding + processed_code = func_dict.content + codes_to_encode.append(processed_code) + func_name = func.split("~")[0] + + # Combine all documentation + combined_doc = f"MethodName: {func_name} \n{func_dict.docstring}" + + docs_to_encode.append(combined_doc) + temp_functions.append(CodeFunction( + name=func_name, + code=func_dict.content, + documentation=func_dict.docstring, + file_path=file_path + )) + else: + print("No class found in file - ", file_path) - - for func, func_dict in code_tree.methods.items(): - # Prepare code for embedding - processed_code = func_dict.content - codes_to_encode.append(processed_code) - func_name = func.split("~")[0] - - # Combine all documentation - combined_doc = f"MethodName: {func_name} \n{func_dict.docstring}" - - docs_to_encode.append(combined_doc) - temp_functions.append(CodeFunction( - name=func_name, - code=func_dict.content, - documentation=func_dict.docstring, - file_path=file_path - )) # Batch encode everything code_embeddings = self.encode_batch(codes_to_encode, self.code_encoder) doc_embeddings = self.encode_batch(docs_to_encode, self.doc_encoder) + print(2) # Assign embeddings to functions for func, code_emb, doc_emb in zip(temp_functions, code_embeddings, doc_embeddings): diff --git a/main.py b/main.py index ee3f1d9..1b0fcf8 100644 --- a/main.py +++ b/main.py @@ -9,6 +9,7 @@ # Index your repository repo = "/Users/sumansaurabh/Documents/singularityx/github/MoneyPrinterTurbo/app/" repo = "/Users/sumansaurabh/Documents/singularityx/github/snorkell-backend/backend/" +repo = "/home/azureuser/localfiles/pokerogue" encoder.index_repository( repo_path=repo, @@ -17,22 +18,24 @@ # Search with different focuses # 1. Search everything + +message = "Increment the turn in the battle scene" results = encoder.search( - "handle user authentication", + message, search_code=True, search_docs=True ) # 2. Search only code semantics code_results = encoder.search( - "implement binary search", + message, search_code=True, search_docs=False ) # 3. Search only documentation doc_results = encoder.search( - "user authentication flow", + message, search_code=False, search_docs=True ) @@ -40,6 +43,25 @@ # Process results for result in results: print(f"Function: {result.function.name}") + print(f"File Path: {result.function.file_path}") + print(f"Docstring: {result.function.documentation}") print(f"Code Similarity: {result.code_similarity:.2f}") print(f"Doc Similarity: {result.doc_similarity:.2f}") - print(f"Combined: {result.combined_similarity:.2f}") \ No newline at end of file + print(f"Combined: {result.combined_similarity:.2f}") + print("\n\n") + + + +# # code results +# for result in code_results: +# print(f"code: Function: {result.function.name}") +# print(f"code: Code Similarity: {result.code_similarity:.2f}") +# print(f"code: Doc Similarity: {result.doc_similarity:.2f}") +# print(f"code: Combined: {result.combined_similarity:.2f}") + +# # doc results +# for result in doc_results: +# print(f"doc: Function: {result.function.name}") +# print(f"doc: Code Similarity: {result.code_similarity:.2f}") +# print(f"doc: Doc Similarity: {result.doc_similarity:.2f}") +# print(f"doc: Combined: {result.combined_similarity:.2f}") \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..12f9479 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,4 @@ +numpy +sentence_transformers +torch +pydantic \ No newline at end of file