Skip to content

Commit

Permalink
refactored flow and dataset logic
Browse files Browse the repository at this point in the history
  • Loading branch information
SubhadityaMukherjee committed Jul 3, 2024
1 parent b151a91 commit 038f37e
Show file tree
Hide file tree
Showing 30 changed files with 2,464 additions and 1,124 deletions.
Binary file modified backend/data/.langchain.db
Binary file not shown.
3 changes: 0 additions & 3 deletions backend/modules/general_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,6 @@ def find_device(training: bool = False ) -> str:
if torch.cuda.is_available():
return "cuda"
elif torch.backends.mps.is_available():
if training == False:
# loading metadata on mps for inference is quite slow. So disabling for now.
return "cpu"
return "mps"
else:
return "cpu"
Expand Down
17 changes: 3 additions & 14 deletions backend/modules/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -256,13 +256,14 @@ def setup_vector_db_and_qa(config: dict, data_type: str, client:ClientAPI) -> la
"""

config["type_of_data"] = data_type

# Download the data if it does not exist
openml_data_object, data_id, all_metadata = get_all_metadata_from_openml(
openml_data_object, data_id, all_metadata, handler = get_all_metadata_from_openml(
config=config
)
# Create the combined metadata dataframe
metadata_df, all_metadata = create_metadata_dataframe(
openml_data_object, data_id, all_metadata, config=config
handler, openml_data_object, data_id, all_metadata, config=config
)
# Create the vector store
vectordb = load_document_and_create_vector_store(
Expand Down Expand Up @@ -297,15 +298,3 @@ def get_llm_chain(config: dict, local:bool =False) -> LLMChain|bool:
def get_llm_result_from_string(llm_chain, string):
return llm_chain.invoke({"docs": string})
# return llm_chain.stream({"docs": string})

# def get_llm_result(docs: Sequence[Document], config:dict):
# try:
# llm_chain = get_llm_chain(config=config, local=False)
# return llm_chain.invoke({"docs": docs})
# except Exception as e:
# # print(e)
# llm_chain = get_llm_chain(config=config, local=True)
# return llm_chain.invoke({"docs": docs})
# return "LLM model failed to generate a summary at the moment, please try again later."
# llm_chain = get_llm_chain(config=config, local=True)
# return llm_chain.invoke({"docs": docs})
Loading

0 comments on commit 038f37e

Please sign in to comment.