diff --git a/cli2.py b/cli2.py index 216f95e02..5a0de93e4 100644 --- a/cli2.py +++ b/cli2.py @@ -10,7 +10,7 @@ def check_command(command, message): if not shutil.which(command): - logger.info(message) + #logger.info(message) sys.exit(1) @@ -37,11 +37,11 @@ def run_server(shell=False,a_name=None,a_description=None,goals=None): def cleanup(api_process, ui_process, celery_process): - logger.info("Shutting down processes...") + #logger.info("Shutting down processes...") api_process.terminate() ui_process.terminate() celery_process.terminate() - logger.info("Processes terminated. Exiting.") + #logger.info("Processes terminated. Exiting.") sys.exit(1) diff --git a/main.py b/main.py index cdfb83486..dcae0a113 100644 --- a/main.py +++ b/main.py @@ -207,21 +207,21 @@ def replace_old_iteration_workflows(session): @app.on_event("startup") async def startup_event(): # Perform startup tasks here - logger.info("Running Startup tasks") + #logger.info("Running Startup tasks") Session = sessionmaker(bind=engine) session = Session() default_user = session.query(User).filter(User.email == "super6@agi.com").first() - logger.info(default_user) + #logger.info(default_user) if default_user is not None: organisation = session.query(Organisation).filter_by(id=default_user.organisation_id).first() - logger.info(organisation) + #logger.info(organisation) register_toolkits(session, organisation) def register_toolkit_for_all_organisation(): organizations = session.query(Organisation).all() for organization in organizations: register_toolkits(session, organization) - logger.info("Successfully registered local toolkits for all Organisations!") + #logger.info("Successfully registered local toolkits for all Organisations!") def register_toolkit_for_master_organisation(): marketplace_organisation_id = superagi.config.config.get_config("MARKETPLACE_ORGANISATION_ID") diff --git a/run_gui.py b/run_gui.py index 1e2e0e7ff..595207300 100644 --- a/run_gui.py +++ b/run_gui.py @@ -7,7 +7,7 @@ def check_command(command, message): if not shutil.which(command): - logger.info(message) + #logger.info(message) sys.exit(1) def run_npm_commands(): @@ -27,10 +27,10 @@ def run_server(): return api_process, ui_process def cleanup(api_process, ui_process): - logger.info("Shutting down processes...") + #logger.info("Shutting down processes...") api_process.terminate() ui_process.terminate() - logger.info("Processes terminated. Exiting.") + #logger.info("Processes terminated. Exiting.") sys.exit(1) if __name__ == "__main__": diff --git a/superagi/agent/agent_iteration_step_handler.py b/superagi/agent/agent_iteration_step_handler.py index 0e2c4fcec..4d070e8a5 100644 --- a/superagi/agent/agent_iteration_step_handler.py +++ b/superagi/agent/agent_iteration_step_handler.py @@ -70,7 +70,7 @@ def execute_step(self): .build_agent_messages(prompt, agent_feeds, history_enabled=iteration_workflow_step.history_enabled, completion_prompt=iteration_workflow_step.completion_prompt) - logger.debug("Prompt messages:", messages) + # logger.debug("Prompt messages:", messages) current_tokens = TokenCounter.count_message_tokens(messages = messages, model = self.llm.get_model()) response = self.llm.chat_completion(messages, TokenCounter(session=self.session, organisation_id=organisation.id).token_limit(self.llm.get_model()) - current_tokens) @@ -116,7 +116,7 @@ def execute_step(self): else: # moving to next step of iteration or workflow self._update_agent_execution_next_step(execution, iteration_workflow_step.next_step_id) - logger.info(f"Starting next job for agent execution id: {self.agent_execution_id}") + #logger.info(f"Starting next job for agent execution id: {self.agent_execution_id}") self.session.flush() diff --git a/superagi/agent/agent_prompt_builder.py b/superagi/agent/agent_prompt_builder.py index e31c745d1..259018223 100644 --- a/superagi/agent/agent_prompt_builder.py +++ b/superagi/agent/agent_prompt_builder.py @@ -87,7 +87,7 @@ def replace_main_variables(cls, super_agi_prompt: str, goals: List[str], instruc AgentPromptBuilder.add_list_items_to_string(constraints)) - # logger.info(tools) + # #logger.info(tools) tools_string = AgentPromptBuilder.add_tools_to_prompt(tools, add_finish_tool) super_agi_prompt = super_agi_prompt.replace("{tools}", tools_string) return super_agi_prompt diff --git a/superagi/agent/agent_tool_step_handler.py b/superagi/agent/agent_tool_step_handler.py index d1b045f54..977862af9 100644 --- a/superagi/agent/agent_tool_step_handler.py +++ b/superagi/agent/agent_tool_step_handler.py @@ -98,7 +98,7 @@ def _handle_next_step(self, next_step): def _process_input_instruction(self, agent_config, agent_execution_config, step_tool, workflow_step): tool_obj = self._build_tool_obj(agent_config, agent_execution_config, step_tool.tool_name) prompt = self._build_tool_input_prompt(step_tool, tool_obj, agent_execution_config) - logger.info("Prompt: ", prompt) + #logger.info("Prompt: ", prompt) agent_feeds = AgentExecutionFeed.fetch_agent_execution_feeds(self.session, self.agent_execution_id) messages = AgentLlmMessageBuilder(self.session, self.llm, self.llm.get_model(), self.agent_id, self.agent_execution_id) \ .build_agent_messages(prompt, agent_feeds, history_enabled=step_tool.history_enabled, diff --git a/superagi/agent/agent_workflow_step_wait_handler.py b/superagi/agent/agent_workflow_step_wait_handler.py index 223859d31..96076afd3 100644 --- a/superagi/agent/agent_workflow_step_wait_handler.py +++ b/superagi/agent/agent_workflow_step_wait_handler.py @@ -18,7 +18,7 @@ def __init__(self, session, agent_id, agent_execution_id): def execute_step(self): """Execute the agent wait step.""" - logger.info("Executing Wait Step") + #logger.info("Executing Wait Step") execution = AgentExecution.get_agent_execution_from_id(self.session, self.agent_execution_id) workflow_step = AgentWorkflowStep.find_by_id(self.session, execution.current_agent_step_id) step_wait = AgentWorkflowStepWait.find_by_id(self.session, workflow_step.action_reference_id) diff --git a/superagi/agent/output_handler.py b/superagi/agent/output_handler.py index 1fdeb7531..5a5f5b8eb 100644 --- a/superagi/agent/output_handler.py +++ b/superagi/agent/output_handler.py @@ -151,7 +151,7 @@ def handle(self, session, assistant_reply): for task in reversed(tasks): self.task_queue.add_task(task) if len(tasks) > 0: - logger.info("Adding task to queue: " + str(tasks)) + #logger.info("Adding task to queue: " + str(tasks)) agent_execution = AgentExecution.find_by_id(session, self.agent_execution_id) for task in tasks: agent_execution_feed = AgentExecutionFeed(agent_execution_id=self.agent_execution_id, @@ -182,7 +182,7 @@ def handle(self, session, assistant_reply): for task in reversed(tasks): self.task_queue.add_task(task) if len(tasks) > 0: - logger.info("Tasks reprioritized in order: " + str(tasks)) + #logger.info("Tasks reprioritized in order: " + str(tasks)) status = "COMPLETE" if len(self.task_queue.get_tasks()) == 0 else "PENDING" session.commit() return TaskExecutorResponse(status=status, retry=False) diff --git a/superagi/agent/output_parser.py b/superagi/agent/output_parser.py index 21257abec..7b4178575 100644 --- a/superagi/agent/output_parser.py +++ b/superagi/agent/output_parser.py @@ -35,7 +35,7 @@ def parse(self, response: str) -> AgentGPTAction: # OpenAI returns `str(content_dict)`, literal_eval reverses this try: - logger.debug("AgentSchemaOutputParser: ", response) + #logger.debug("AgentSchemaOutputParser: ", response) response_obj = ast.literal_eval(response) args = response_obj['tool']['args'] if 'args' in response_obj['tool'] else {} return AgentGPTAction( @@ -43,7 +43,7 @@ def parse(self, response: str) -> AgentGPTAction: args=args, ) except BaseException as e: - logger.info(f"AgentSchemaOutputParser: Error parsing JSON response {e}") + #logger.info(f"AgentSchemaOutputParser: Error parsing JSON response {e}") raise e @@ -58,7 +58,7 @@ def parse(self, response: str) -> AgentGPTAction: # OpenAI returns `str(content_dict)`, literal_eval reverses this try: - logger.debug("AgentSchemaOutputParser: ", response) + #logger.debug("AgentSchemaOutputParser: ", response) response_obj = ast.literal_eval(response) args = response_obj['args'] if 'args' in response_obj else {} return AgentGPTAction( @@ -66,5 +66,5 @@ def parse(self, response: str) -> AgentGPTAction: args=args, ) except BaseException as e: - logger.info(f"AgentSchemaToolOutputParser: Error parsing JSON response {e}") + #logger.info(f"AgentSchemaToolOutputParser: Error parsing JSON response {e}") raise e diff --git a/superagi/agent/queue_step_handler.py b/superagi/agent/queue_step_handler.py index 102d01e44..9df4667ce 100644 --- a/superagi/agent/queue_step_handler.py +++ b/superagi/agent/queue_step_handler.py @@ -79,11 +79,11 @@ def _process_reply(self, task_queue: TaskQueue, assistant_reply: str): task_array = np.array(eval(assistant_reply)).flatten().tolist() for task in task_array: task_queue.add_task(str(task)) - logger.info("RAMRAM: Added task to queue: ", task) + #logger.info("RAMRAM: Added task to queue: ", task) def _process_input_instruction(self, step_tool): prompt = self._build_queue_input_prompt(step_tool) - logger.info("Prompt: ", prompt) + #logger.info("Prompt: ", prompt) agent_feeds = AgentExecutionFeed.fetch_agent_execution_feeds(self.session, self.agent_execution_id) #print(".........//////////////..........2") messages = AgentLlmMessageBuilder(self.session, self.llm, self.llm.get_model(), self.agent_id, self.agent_execution_id) \ diff --git a/superagi/agent/tool_executor.py b/superagi/agent/tool_executor.py index 303b8364a..18c1485bb 100644 --- a/superagi/agent/tool_executor.py +++ b/superagi/agent/tool_executor.py @@ -26,7 +26,7 @@ def execute(self, session, tool_name, tool_args): tools = {t.name.lower().replace(" ", ""): t for t in self.tools} tool_name = tool_name.lower().replace(" ", "") if tool_name == ToolExecutor.FINISH or tool_name == "": - logger.info("\nTask Finished :) \n") + #logger.info("\nTask Finished :) \n") return ToolExecutorResponse(status="COMPLETE", result="") if tool_name in tools.keys(): status = "SUCCESS" @@ -61,7 +61,7 @@ def execute(self, session, tool_name, tool_args): ) output = ToolExecutorResponse(status="ERROR", result=result, retry=True) - logger.info("Tool Response : " + str(output) + "\n") + #logger.info("Tool Response : " + str(output) + "\n") return output def clean_tool_args(self, args): diff --git a/superagi/config/config.py b/superagi/config/config.py index e2ed0c9c8..8ae1a3a54 100644 --- a/superagi/config/config.py +++ b/superagi/config/config.py @@ -42,7 +42,7 @@ def load_config(cls, config_file: str) -> dict: # Merge environment variables and config data env_vars = dict(os.environ) - logger.info(env_vars) + #logger.info(env_vars) config_data = {**config_data, **env_vars} return config_data diff --git a/superagi/controllers/config.py b/superagi/controllers/config.py index 7f4836829..3bc990956 100644 --- a/superagi/controllers/config.py +++ b/superagi/controllers/config.py @@ -77,10 +77,10 @@ def create_config(config: ConfigurationIn, organisation_id: int, db.session.flush() return existing_config - logger.info("NEW CONFIG") + #logger.info("NEW CONFIG") new_config = Configuration(organisation_id=organisation_id, key=config.key, value=config.value) - logger.info(new_config) - logger.info("ORGANISATION ID : ", organisation_id) + #logger.info(new_config) + #logger.info("ORGANISATION ID : ", organisation_id) db.session.add(new_config) db.session.commit() db.session.flush() diff --git a/superagi/controllers/models_controller.py b/superagi/controllers/models_controller.py index 38fadf7e8..e45c18cd0 100644 --- a/superagi/controllers/models_controller.py +++ b/superagi/controllers/models_controller.py @@ -74,7 +74,7 @@ async def verify_end_point(model_api_key: str = None, end_point: str = None, mod async def store_model(request: StoreModelRequest, organisation=Depends(get_user_organisation)): try: #context_length = 4096 - logger.info(request) + #logger.info(request) if 'context_length' in request.dict(): return Models.store_model_details(db.session, organisation.id, request.model_name, request.description, request.end_point, request.model_provider_id, request.token_limit, request.type, request.version, request.context_length) else: @@ -205,9 +205,9 @@ def test_local_llm(): ] response = llm_model.create_chat_completion(messages=messages, grammar=llm_grammar) content = response["choices"][0]["message"]["content"] - logger.info(content) + #logger.info(content) return "Model loaded successfully." except Exception as e: - logger.info("Error: ",e) + #logger.info("Error: ",e) raise HTTPException(status_code=404, detail="Error while loading the model. Please check your model path and try again.") \ No newline at end of file diff --git a/superagi/controllers/organisation.py b/superagi/controllers/organisation.py index e366c5966..f7f845856 100644 --- a/superagi/controllers/organisation.py +++ b/superagi/controllers/organisation.py @@ -70,7 +70,7 @@ def create_organisation(organisation: OrganisationIn, db.session.commit() db.session.flush() register_toolkits(session=db.session, organisation=new_organisation) - logger.info(new_organisation) + #logger.info(new_organisation) return new_organisation diff --git a/superagi/controllers/project.py b/superagi/controllers/project.py index aed73b5f2..cc540ac1a 100644 --- a/superagi/controllers/project.py +++ b/superagi/controllers/project.py @@ -49,7 +49,7 @@ def create_project(project: ProjectIn, """ - logger.info("Organisation_id : ", project.organisation_id) + #logger.info("Organisation_id : ", project.organisation_id) organisation = db.session.query(Organisation).get(project.organisation_id) if not organisation: diff --git a/superagi/controllers/resources.py b/superagi/controllers/resources.py index ab83275e0..b7b5e458e 100644 --- a/superagi/controllers/resources.py +++ b/superagi/controllers/resources.py @@ -80,7 +80,7 @@ async def upload(agent_id: int, file: UploadFile = File(...), name=Form(...), si file_path = 'resources' + file_path try: s3.upload_fileobj(file.file, bucket_name, file_path) - logger.info("File uploaded successfully!") + #logger.info("File uploaded successfully!") except NoCredentialsError: raise HTTPException(status_code=500, detail="AWS credentials not found. Check your configuration.") @@ -92,7 +92,7 @@ async def upload(agent_id: int, file: UploadFile = File(...), name=Form(...), si db.session.flush() summarize_resource.delay(agent_id, resource.id) - logger.info(resource) + #logger.info(resource) return resource diff --git a/superagi/controllers/user.py b/superagi/controllers/user.py index f4dce4b4f..224ffee7f 100644 --- a/superagi/controllers/user.py +++ b/superagi/controllers/user.py @@ -74,7 +74,7 @@ def create_user(user: UserIn, db.session.flush() organisation = Organisation.find_or_create_organisation(db.session, db_user) Project.find_or_create_default_project(db.session, organisation.id) - logger.info("User created", db_user) + #logger.info("User created", db_user) #adding local llm configuration ModelsConfig.add_llm_config(db.session, organisation.id) @@ -146,5 +146,5 @@ def update_first_login_source(source: str, Authorize: AuthJWT = Depends(check_au user.first_login_source = source db.session.commit() db.session.flush() - logger.info("User : ",user) + #logger.info("User : ",user) return user diff --git a/superagi/helper/github_helper.py b/superagi/helper/github_helper.py index bb34eaf56..6536801f6 100644 --- a/superagi/helper/github_helper.py +++ b/superagi/helper/github_helper.py @@ -63,7 +63,7 @@ def check_repository_visibility(self, repository_owner, repository_name): repository_data = response.json() return repository_data['private'] else: - logger.info(f"Failed to fetch repository information: {response.status_code} - {response.text}") + #logger.info(f"Failed to fetch repository information: {response.status_code} - {response.text}") return None def search_repo(self, repository_owner, repository_name, file_name, folder_path=None): @@ -116,10 +116,10 @@ def sync_branch(self, repository_owner, repository_name, base_branch, head_branc } response = requests.patch(head_branch_url, json=data, headers=headers) if response.status_code == 200: - logger.info( + #logger.info( f'Successfully synced {self.github_username}:{head_branch} branch with {repository_owner}:{base_branch}') else: - logger.info('Failed to sync the branch. Check your inputs and permissions.') + #logger.info('Failed to sync the branch. Check your inputs and permissions.') def make_fork(self, repository_owner, repository_name, base_branch, headers): """ @@ -137,10 +137,10 @@ def make_fork(self, repository_owner, repository_name, base_branch, headers): fork_url = f'https://api.github.com/repos/{repository_owner}/{repository_name}/forks' fork_response = requests.post(fork_url, headers=headers) if fork_response.status_code == 202: - logger.info('Fork created successfully.') + #logger.info('Fork created successfully.') self.sync_branch(repository_owner, repository_name, base_branch, base_branch, headers) else: - logger.info('Failed to create the fork:', fork_response.json()['message']) + #logger.info('Failed to create the fork:', fork_response.json()['message']) return fork_response.status_code @@ -166,11 +166,11 @@ def create_branch(self, repository_name, base_branch, head_branch, headers): } branch_response = requests.post(branch_url, json=branch_params, headers=headers) if branch_response.status_code == 201: - logger.info('Branch created successfully.') + #logger.info('Branch created successfully.') elif branch_response.status_code == 422: - logger.info('Branch new-file already exists, making commits to new-file branch') + #logger.info('Branch new-file already exists, making commits to new-file branch') else: - logger.info('Failed to create branch:', branch_response.json()['message']) + #logger.info('Failed to create branch:', branch_response.json()['message']) return branch_response.status_code @@ -198,9 +198,9 @@ def delete_file(self, repository_name, file_name, folder_path, commit_message, h } file_response = requests.delete(file_url, json=file_params, headers=headers) if file_response.status_code == 200: - logger.info('File or folder delete successfully.') + #logger.info('File or folder delete successfully.') else: - logger.info('Failed to Delete file or folder:', file_response.json()) + #logger.info('Failed to Delete file or folder:', file_response.json()) return file_response.status_code @@ -232,11 +232,11 @@ def add_file(self, repository_owner, repository_name, file_name, folder_path, he } file_response = requests.put(file_url, json=file_params, headers=headers) if file_response.status_code == 201: - logger.info('File content uploaded successfully.') + #logger.info('File content uploaded successfully.') elif file_response.status_code == 422: - logger.info('File already exists') + #logger.info('File already exists') else: - logger.info('Failed to upload file content:', file_response.json()['message']) + #logger.info('Failed to upload file content:', file_response.json()['message']) return file_response.status_code @@ -265,11 +265,11 @@ def create_pull_request(self, repository_owner, repository_name, head_branch, ba pr_response = requests.post(pull_request_url, json=pull_request_params, headers=headers) if pr_response.status_code == 201: - logger.info('Pull request created successfully.') + #logger.info('Pull request created successfully.') elif pr_response.status_code == 422: - logger.info('Added changes to already existing pull request') + #logger.info('Added changes to already existing pull request') else: - logger.info('Failed to create pull request:', pr_response.json()['message']) + #logger.info('Failed to create pull request:', pr_response.json()['message']) return pr_response.status_code @@ -362,7 +362,7 @@ def get_pull_request_content(self, repository_owner, repository_name, pull_reque response = requests.get(pull_request_url, headers=headers) if response.status_code == 200: - logger.info('Successfully fetched pull request content.') + #logger.info('Successfully fetched pull request content.') return response.text elif response.status_code == 404: logger.warning('Pull request not found.') @@ -426,7 +426,7 @@ def add_line_comment_to_pull_request(self, repository_owner, repository_name, pu } response = requests.post(comments_url, headers=headers, json=data) if response.status_code == 201: - logger.info('Successfully added line comment to pull request.') + #logger.info('Successfully added line comment to pull request.') return response.json() else: logger.warning(f'Failed to add line comment: {response.json()["message"]}') diff --git a/superagi/helper/google_search.py b/superagi/helper/google_search.py index d8fd79390..6501d7caf 100644 --- a/superagi/helper/google_search.py +++ b/superagi/helper/google_search.py @@ -58,7 +58,7 @@ def search_run(self, query): all_snippets.append(item["snippet"]) links.append(item["link"]) else: - logger.info("No items found in the response.") + #logger.info("No items found in the response.") except ValueError as e: logger.error(f"Error while parsing JSON data: {e}") else: @@ -82,7 +82,7 @@ def get_result(self, query): attempts = 0 while snippets == [] and attempts < 2: attempts += 1 - logger.info("Google blocked the request. Trying again...") + #logger.info("Google blocked the request. Trying again...") time.sleep(3) snippets, links, error_code = self.search_run(query) diff --git a/superagi/helper/resource_helper.py b/superagi/helper/resource_helper.py index 961cbd998..0fd11a1c8 100644 --- a/superagi/helper/resource_helper.py +++ b/superagi/helper/resource_helper.py @@ -45,7 +45,7 @@ def make_written_file_resource(cls, file_name: str, agent: Agent, agent_executio file_path = ResourceHelper.get_agent_write_resource_path(file_name, agent, agent_execution) - logger.info("make_written_file_resource:", final_path) + #logger.info("make_written_file_resource:", final_path) if StorageType.get_storage_type(get_config("STORAGE_TYPE", StorageType.FILE.value)) == StorageType.S3: file_path = "resources" + file_path existing_resource = session.query(Resource).filter_by( diff --git a/superagi/helper/s3_helper.py b/superagi/helper/s3_helper.py index 31aad6b43..72284c8b4 100644 --- a/superagi/helper/s3_helper.py +++ b/superagi/helper/s3_helper.py @@ -48,7 +48,7 @@ def upload_file(self, file, path): """ try: self.s3.upload_fileobj(file, self.bucket_name, path) - logger.info("File uploaded to S3 successfully!") + #logger.info("File uploaded to S3 successfully!") except Exception: raise HTTPException(status_code=500, detail="AWS credentials not found. Check your configuration.") @@ -58,7 +58,7 @@ def check_file_exists_in_s3(self, file_path): def read_from_s3(self, file_path): file_path = "resources" + file_path - logger.info(f"Reading file from s3: {file_path}") + #logger.info(f"Reading file from s3: {file_path}") response = self.s3.get_object(Bucket=get_config("BUCKET_NAME"), Key=file_path) if response['ResponseMetadata']['HTTPStatusCode'] == 200: return response['Body'].read().decode('utf-8') @@ -67,7 +67,7 @@ def read_from_s3(self, file_path): def read_binary_from_s3(self, file_path): file_path = "resources" + file_path #print("____________________________________________LOG TEST: FINAL PATH_____________", file_path) - logger.info(f"Reading file from s3: {file_path}") + #logger.info(f"Reading file from s3: {file_path}") response = self.s3.get_object(Bucket=get_config("BUCKET_NAME"), Key=file_path) if response['ResponseMetadata']['HTTPStatusCode'] == 200: return response['Body'].read() @@ -106,7 +106,7 @@ def delete_file(self, path): try: path = "resources" + path self.s3.delete_object(Bucket=self.bucket_name, Key=path) - logger.info("File deleted from S3 successfully!") + #logger.info("File deleted from S3 successfully!") except: raise HTTPException(status_code=500, detail="AWS credentials not found. Check your configuration.") @@ -142,10 +142,10 @@ def get_download_url_of_resources(self, db_resources_arr): def list_files_from_s3(self, file_path): try: file_path = "resources" + file_path - logger.info(f"Listing files from s3 with prefix: {file_path}") + #logger.info(f"Listing files from s3 with prefix: {file_path}") response = self.s3.list_objects_v2(Bucket=get_config("BUCKET_NAME"), Prefix=file_path) if 'Contents' in response: - logger.info(response['Contents']) + #logger.info(response['Contents']) file_list = [obj['Key'] for obj in response['Contents']] return file_list else: diff --git a/superagi/helper/tool_helper.py b/superagi/helper/tool_helper.py index be1db8799..740151a55 100644 --- a/superagi/helper/tool_helper.py +++ b/superagi/helper/tool_helper.py @@ -37,7 +37,7 @@ def download_tool(tool_url, target_folder): with open(tool_zip_file_path, 'wb') as f: f.write(response.content) - logger.info("Reading Zip") + #logger.info("Reading Zip") with zipfile.ZipFile(tool_zip_file_path, 'r') as z: members = [m for m in z.namelist() if m.startswith(f"{owner}-{repo}") and f"{path}" in m] @@ -64,7 +64,7 @@ def download_tool(tool_url, target_folder): else: with open(target_path, 'wb') as outfile, z.open(member) as infile: outfile.write(infile.read()) - logger.info("Donwload Success!") + #logger.info("Donwload Success!") os.remove(tool_zip_file_path) @@ -262,13 +262,13 @@ def register_toolkits(session, organisation): # tool_paths.append("superagi/tools/marketplace_tools") if organisation is not None: process_files(tool_paths, session, organisation) - logger.info(f"Toolkits Registered Successfully for Organisation ID : {organisation.id}!") + #logger.info(f"Toolkits Registered Successfully for Organisation ID : {organisation.id}!") def register_marketplace_toolkits(session, organisation): tool_paths = ["superagi/tools", "superagi/tools/external_tools","superagi/tools/marketplace_tools"] if organisation is not None: process_files(tool_paths, session, organisation) - logger.info(f"Marketplace Toolkits Registered Successfully for Organisation ID : {organisation.id}!") + #logger.info(f"Marketplace Toolkits Registered Successfully for Organisation ID : {organisation.id}!") def extract_repo_name(repo_link): # Extract the repository name from the link diff --git a/superagi/helper/validate_csv.py b/superagi/helper/validate_csv.py index 882679ccf..00ac772ed 100644 --- a/superagi/helper/validate_csv.py +++ b/superagi/helper/validate_csv.py @@ -21,6 +21,6 @@ def correct_csv_encoding(file_path): df = pd.DataFrame(data) df.to_csv(file_path, encoding='utf-8', index=False) - logger.info("File is converted to utf-8 encoding.") + #logger.info("File is converted to utf-8 encoding.") else: - logger.info("File is already in utf-8 encoding.") \ No newline at end of file + #logger.info("File is already in utf-8 encoding.") \ No newline at end of file diff --git a/superagi/jobs/agent_executor.py b/superagi/jobs/agent_executor.py index 2e8a24afe..3b384b5df 100644 --- a/superagi/jobs/agent_executor.py +++ b/superagi/jobs/agent_executor.py @@ -66,7 +66,7 @@ def execute_next_step(self, agent_execution_id): model_api_key = model_config['api_key'] model_llm_source = model_config['provider'] except Exception as e: - logger.info(f"Unable to get model config...{e}") + #logger.info(f"Unable to get model config...{e}") return try: @@ -77,7 +77,7 @@ def execute_next_step(self, agent_execution_id): AgentExecutor.get_embedding(model_llm_source, model_api_key)) except Exception as e: - logger.info(f"Unable to setup the connection...{e}") + #logger.info(f"Unable to setup the connection...{e}") memory = None agent_workflow_step = session.query(AgentWorkflowStep).filter( @@ -87,13 +87,13 @@ def execute_next_step(self, agent_execution_id): model_api_key, organisation, session) except Exception as e: - logger.info("Exception in executing the step: {}".format(e)) + #logger.info("Exception in executing the step: {}".format(e)) superagi.worker.execute_agent.apply_async((agent_execution_id, datetime.now()), countdown=15) return agent_execution = session.query(AgentExecution).filter(AgentExecution.id == agent_execution_id).first() if agent_execution.status == "COMPLETED" or agent_execution.status == "WAITING_FOR_PERMISSION": - logger.info("Agent Execution is completed or waiting for permission") + #logger.info("Agent Execution is completed or waiting for permission") session.close() return superagi.worker.execute_agent.apply_async((agent_execution_id, datetime.now()), countdown=2) @@ -104,7 +104,7 @@ def execute_next_step(self, agent_execution_id): def __execute_workflow_step(self, agent, agent_config, agent_execution_id, agent_workflow_step, memory, model_api_key, organisation, session): - logger.info("Executing Workflow step : ", agent_workflow_step.action_type) + #logger.info("Executing Workflow step : ", agent_workflow_step.action_type) if agent_workflow_step.action_type == AgentWorkflowStepAction.TOOL.value: tool_step_handler = AgentToolStepHandler(session, llm=get_model(model=agent_config["model"], api_key=model_api_key, @@ -152,7 +152,7 @@ def _check_for_max_iterations(self, session, organisation_id, agent_config, agen "calls": db_agent_execution.num_of_calls}, db_agent_execution.agent_id, organisation_id) session.commit() - logger.info("ITERATION_LIMIT_CROSSED") + #logger.info("ITERATION_LIMIT_CROSSED") return True return False @@ -169,11 +169,11 @@ def execute_waiting_workflows(self): step_wait = AgentWorkflowStepWait.find_by_id(session, workflow_step.action_reference_id) if step_wait is not None: wait_time = step_wait.delay if not None else 0 - logger.info(f"Agent Execution ID: {agent_execution.id}") - logger.info(f"Wait time: {wait_time}") - logger.info(f"Wait begin time: {step_wait.wait_begin_time}") - logger.info(f"Current time: {datetime.now()}") - logger.info(f"Wait Difference : {(datetime.now() - step_wait.wait_begin_time).total_seconds()}") + #logger.info(f"Agent Execution ID: {agent_execution.id}") + #logger.info(f"Wait time: {wait_time}") + #logger.info(f"Wait begin time: {step_wait.wait_begin_time}") + #logger.info(f"Current time: {datetime.now()}") + #logger.info(f"Wait Difference : {(datetime.now() - step_wait.wait_begin_time).total_seconds()}") if ((datetime.now() - step_wait.wait_begin_time).total_seconds() > wait_time and step_wait.status == AgentWorkflowStepWaitStatus.WAITING.value): agent_execution.status = AgentExecutionStatus.RUNNING.value diff --git a/superagi/lib/logger.py b/superagi/lib/logger.py index f600ff11c..f67d892a0 100644 --- a/superagi/lib/logger.py +++ b/superagi/lib/logger.py @@ -56,9 +56,9 @@ def debug(self, message, *args): self.logger.debug(*args) def info(self, message, *args): - self.logger.info(message) + self.#logger.info(message) if args: - self.logger.info(*args) + self.#logger.info(*args) def warning(self, message, *args): self.logger.warning(message) diff --git a/superagi/llms/google_palm.py b/superagi/llms/google_palm.py index 928842f94..149f64055 100644 --- a/superagi/llms/google_palm.py +++ b/superagi/llms/google_palm.py @@ -74,7 +74,7 @@ def chat_completion(self, messages, max_tokens=get_config("MAX_MODEL_TOKEN_LIMIT # #print(completion.result) return {"response": completion, "content": completion.result} except Exception as exception: - logger.info("Google palm Exception:", exception) + #logger.info("Google palm Exception:", exception) return {"error": "ERROR_GOOGLE_PALM", "message": "Google palm exception"} def verify_access_key(self): @@ -88,7 +88,7 @@ def verify_access_key(self): models = palm.list_models() return True except Exception as exception: - logger.info("Google palm Exception:", exception) + #logger.info("Google palm Exception:", exception) return False def get_models(self): @@ -102,5 +102,5 @@ def get_models(self): models_supported = ["chat-bison-001"] return models_supported except Exception as exception: - logger.info("Google palm Exception:", exception) + #logger.info("Google palm Exception:", exception) return [] diff --git a/superagi/llms/hugging_face.py b/superagi/llms/hugging_face.py index 34d8a6f80..2976fab5f 100644 --- a/superagi/llms/hugging_face.py +++ b/superagi/llms/hugging_face.py @@ -93,7 +93,7 @@ def chat_completion(self, messages, max_tokens=get_config("MAX_MODEL_TOKEN_LIMIT } response = requests.post(self.end_point, headers=self.headers, data=json.dumps(payload)) completion = json.loads(response.content.decode("utf-8")) - logger.info(f"{completion=}") + #logger.info(f"{completion=}") if self.task == Tasks.TEXT_GENERATION: content = completion[0]["generated_text"] else: diff --git a/superagi/llms/local_llm.py b/superagi/llms/local_llm.py index a146d7daa..3c25c6af4 100644 --- a/superagi/llms/local_llm.py +++ b/superagi/llms/local_llm.py @@ -51,11 +51,11 @@ def chat_completion(self, messages, max_tokens=get_config("MAX_MODEL_TOKEN_LIMIT response = self.llm_model.create_chat_completion(messages=messages, functions=None, function_call=None, temperature=self.temperature, top_p=self.top_p, max_tokens=int(max_tokens), presence_penalty=self.presence_penalty, frequency_penalty=self.frequency_penalty, grammar=self.llm_grammar) content = response["choices"][0]["message"]["content"] - logger.info(content) + #logger.info(content) return {"response": response, "content": content} except Exception as exception: - logger.info("Exception:", exception) + #logger.info("Exception:", exception) return {"error": "ERROR", "message": "Error: "+str(exception)} def get_source(self): diff --git a/superagi/llms/openai.py b/superagi/llms/openai.py index 454f7ce19..be7a44d7e 100644 --- a/superagi/llms/openai.py +++ b/superagi/llms/openai.py @@ -76,16 +76,16 @@ def chat_completion(self, messages, max_tokens=get_config("MAX_MODEL_TOKEN_LIMIT content = response.choices[0].message["content"] return {"response": response, "content": content} except AuthenticationError as auth_error: - logger.info("OpenAi AuthenticationError:", auth_error) + #logger.info("OpenAi AuthenticationError:", auth_error) return {"error": "ERROR_AUTHENTICATION", "message": "Authentication error please check the api keys: "+str(auth_error)} except RateLimitError as api_error: - logger.info("OpenAi RateLimitError:", api_error) + #logger.info("OpenAi RateLimitError:", api_error) return {"error": "ERROR_RATE_LIMIT", "message": "Openai rate limit exceeded: "+str(api_error)} except InvalidRequestError as invalid_request_error: - logger.info("OpenAi InvalidRequestError:", invalid_request_error) + #logger.info("OpenAi InvalidRequestError:", invalid_request_error) return {"error": "ERROR_INVALID_REQUEST", "message": "Openai invalid request error: "+str(invalid_request_error)} except Exception as exception: - logger.info("OpenAi Exception:", exception) + #logger.info("OpenAi Exception:", exception) return {"error": "ERROR_OPENAI", "message": "Open ai exception: "+str(exception)} def verify_access_key(self): @@ -99,7 +99,7 @@ def verify_access_key(self): models = openai.Model.list() return True except Exception as exception: - logger.info("OpenAi Exception:", exception) + #logger.info("OpenAi Exception:", exception) return False def get_models(self): @@ -116,5 +116,5 @@ def get_models(self): models = [model for model in models if model in models_supported] return models except Exception as exception: - logger.info("OpenAi Exception:", exception) + #logger.info("OpenAi Exception:", exception) return [] diff --git a/superagi/llms/replicate.py b/superagi/llms/replicate.py index d100b23cd..78e863920 100644 --- a/superagi/llms/replicate.py +++ b/superagi/llms/replicate.py @@ -88,7 +88,7 @@ def chat_completion(self, messages, max_tokens=get_config("MAX_MODEL_TOKEN_LIMIT return {"error": "Replicate model didn't return any output."} #print(final_output) #print(temp_output) - logger.info("Replicate response:", final_output) + #logger.info("Replicate response:", final_output) return {"response": temp_output, "content": final_output} except Exception as exception: diff --git a/superagi/models/agent.py b/superagi/models/agent.py index 253f92fa5..bf900ad2d 100644 --- a/superagi/models/agent.py +++ b/superagi/models/agent.py @@ -137,12 +137,12 @@ def create_agent_with_config(cls, db, agent_with_config): db.session.commit() agent_workflow = AgentWorkflow.find_by_name(session=db.session, name=agent_with_config.agent_workflow) - logger.info("Agent workflow:", str(agent_workflow)) + #logger.info("Agent workflow:", str(agent_workflow)) db_agent.agent_workflow_id = agent_workflow.id # # if agent_with_config.agent_type == "Don't Maintain Task Queue": # agent_workflow = db.session.query(AgentWorkflow).filter(AgentWorkflow.name == "Goal Based Agent").first() - # logger.info(agent_workflow) + # #logger.info(agent_workflow) # db_agent.agent_workflow_id = agent_workflow.id # elif agent_with_config.agent_type == "Maintain Task Queue": # agent_workflow = db.session.query(AgentWorkflow).filter( diff --git a/superagi/models/agent_template.py b/superagi/models/agent_template.py index 64d7cd579..053ca1f11 100644 --- a/superagi/models/agent_template.py +++ b/superagi/models/agent_template.py @@ -163,7 +163,7 @@ def clone_agent_template_from_marketplace(cls, db, organisation_id: int, agent_t agent_workflow = db.session.query(AgentWorkflow).filter( AgentWorkflow.name == agent_template["agent_workflow_name"]).first() # keeping it backward compatible - logger.info("agent_workflow:" + str(agent_template["agent_workflow_name"])) + #logger.info("agent_workflow:" + str(agent_template["agent_workflow_name"])) if not agent_workflow: workflow_id = AgentTemplate.fetch_iteration_agent_template_mapping(db.session, agent_template["agent_workflow_name"]) agent_workflow = db.session.query(AgentWorkflow).filter(AgentWorkflow.id == workflow_id).first() diff --git a/superagi/models/db.py b/superagi/models/db.py index c6844cfd4..86843273a 100644 --- a/superagi/models/db.py +++ b/superagi/models/db.py @@ -45,7 +45,7 @@ def connect_db(): # Test the connection try: connection = engine.connect() - logger.info("Connected to the database! @ " + db_url) + #logger.info("Connected to the database! @ " + db_url) connection.close() except Exception as e: logger.error(f"Unable to connect to the database:{e}") diff --git a/superagi/models/models_config.py b/superagi/models/models_config.py index 03833ef05..1bf3063c2 100644 --- a/superagi/models/models_config.py +++ b/superagi/models/models_config.py @@ -127,7 +127,7 @@ def fetch_api_key(cls, session, organisation_id, model_provider): api_key_data = session.query(ModelsConfig.id, ModelsConfig.provider, ModelsConfig.api_key).filter( and_(ModelsConfig.org_id == organisation_id, ModelsConfig.provider == model_provider)).first() - logger.info(api_key_data) + #logger.info(api_key_data) if api_key_data is None: return [] elif api_key_data.provider == 'Local LLM': diff --git a/superagi/models/workflows/agent_workflow_step.py b/superagi/models/workflows/agent_workflow_step.py index b7a3ada16..5c851f859 100644 --- a/superagi/models/workflows/agent_workflow_step.py +++ b/superagi/models/workflows/agent_workflow_step.py @@ -154,7 +154,7 @@ def find_or_create_tool_workflow_step(cls, session, agent_workflow_id: int, uniq def find_or_create_wait_workflow_step(cls, session, agent_workflow_id: int, unique_id: str, wait_description: str, delay: int, step_type="NORMAL"): """ Find or create a wait workflow step""" - logger.info("Finding or creating wait step") + #logger.info("Finding or creating wait step") workflow_step = session.query(AgentWorkflowStep).filter( AgentWorkflowStep.agent_workflow_id == agent_workflow_id, AgentWorkflowStep.unique_id == unique_id).first() @@ -267,7 +267,7 @@ def fetch_next_step(cls, session, current_agent_step_id: int, step_response: str return "COMPLETE" return AgentWorkflowStep.find_by_unique_id(session, matching_steps[0]["step_id"]) - logger.info(f"Could not find next step for step_id: {current_agent_step_id} and step_response: {step_response}") + #logger.info(f"Could not find next step for step_id: {current_agent_step_id} and step_response: {step_response}") default_steps = [step for step in next_steps if str(step["step_response"]).lower() == "default"] if default_steps: diff --git a/superagi/resource_manager/file_manager.py b/superagi/resource_manager/file_manager.py index 4c20ba16d..e29084cd7 100644 --- a/superagi/resource_manager/file_manager.py +++ b/superagi/resource_manager/file_manager.py @@ -28,7 +28,7 @@ def write_binary_file(self, file_name: str, data): img.write(data) img.close() self.write_to_s3(file_name, final_path) - logger.info(f"Binary {file_name} saved successfully") + #logger.info(f"Binary {file_name} saved successfully") return f"Binary {file_name} saved successfully" except Exception as err: return f"Error write_binary_file: {err}" @@ -60,7 +60,7 @@ def write_file(self, file_name: str, content): file.write(content) file.close() self.write_to_s3(file_name, final_path) - logger.info(f"{file_name} - File written successfully") + #logger.info(f"{file_name} - File written successfully") return f"{file_name} - File written successfully" except Exception as err: return f"Error write_file: {err}" @@ -79,7 +79,7 @@ def write_csv_file(self, file_name: str, csv_data): writer = csv.writer(file, lineterminator="\n") writer.writerows(csv_data) self.write_to_s3(file_name, final_path) - logger.info(f"{file_name} - File written successfully") + #logger.info(f"{file_name} - File written successfully") return f"{file_name} - File written successfully" except Exception as err: return f"Error write_csv_file: {err}" @@ -100,7 +100,7 @@ def read_file(self, file_name: str): try: with open(final_path, mode="r") as file: content = file.read() - logger.info(f"{file_name} - File read successfully") + #logger.info(f"{file_name} - File read successfully") return content except Exception as err: return f"Error while reading file {file_name}: {err}" diff --git a/superagi/resource_manager/resource_manager.py b/superagi/resource_manager/resource_manager.py index a386914d3..950429687 100644 --- a/superagi/resource_manager/resource_manager.py +++ b/superagi/resource_manager/resource_manager.py @@ -80,7 +80,7 @@ def save_document_to_vector_store(self, documents: list, resource_id: str, mode_ """ from llama_index import VectorStoreIndex, StorageContext if ModelSourceType.GooglePalm.value in model_source or ModelSourceType.Replicate.value in model_source: - logger.info("Resource embedding not supported for Google Palm..") + #logger.info("Resource embedding not supported for Google Palm..") return import openai openai.api_key = get_config("OPENAI_API_KEY") or mode_api_key diff --git a/superagi/tools/apollo/apollo_search.py b/superagi/tools/apollo/apollo_search.py index eba744d4f..7330474ce 100644 --- a/superagi/tools/apollo/apollo_search.py +++ b/superagi/tools/apollo/apollo_search.py @@ -71,7 +71,7 @@ def _execute(self, person_titles: list[str], page: int = 1, per_page: int = 25, """ people_data = self.apollo_search_results(page, per_page, person_titles, num_of_employees, person_location, organization_domains) - logger.info(people_data) + #logger.info(people_data) people_list = [] if people_data and 'people' in people_data and len(people_data['people']) > 0: for person in people_data['people']: diff --git a/superagi/tools/code/improve_code.py b/superagi/tools/code/improve_code.py index a825ba47a..f57f77a05 100644 --- a/superagi/tools/code/improve_code.py +++ b/superagi/tools/code/improve_code.py @@ -54,7 +54,7 @@ def _execute(self) -> str: """ # Get all file names that the CodingTool has written file_names = self.resource_manager.get_files() - logger.info(file_names) + #logger.info(file_names) # Loop through each file for file_name in file_names: if '.txt' not in file_name and '.sh' not in file_name and '.json' not in file_name: @@ -82,12 +82,12 @@ def _execute(self) -> str: # Extract the response first response = result.get('response') if not response: - logger.info("RESPONSE NOT AVAILABLE") + #logger.info("RESPONSE NOT AVAILABLE") # Now extract the choices from response choices = response.get('choices') if not choices: - logger.info("CHOICES NOT AVAILABLE") + #logger.info("CHOICES NOT AVAILABLE") # Now you can safely extract the message content improved_content = choices[0]["message"]["content"] diff --git a/superagi/tools/code/write_code.py b/superagi/tools/code/write_code.py index a799fc656..c22c2abab 100644 --- a/superagi/tools/code/write_code.py +++ b/superagi/tools/code/write_code.py @@ -71,7 +71,7 @@ def _execute(self, code_description: str) -> str: spec_response = self.tool_response_manager.get_last_response("WriteSpecTool") if spec_response != "": prompt = prompt.replace("{spec}", "Use this specs for generating the code:\n" + spec_response) - logger.info(prompt) + #logger.info(prompt) messages = [{"role": "system", "content": prompt}] organisation = Agent.find_org_by_agent_id(session=self.toolkit_config.session, agent_id=self.agent_id) diff --git a/superagi/tools/code/write_test.py b/superagi/tools/code/write_test.py index 3b446fc36..f76cf71ae 100644 --- a/superagi/tools/code/write_test.py +++ b/superagi/tools/code/write_test.py @@ -83,7 +83,7 @@ def _execute(self, test_description: str, test_file_name: str) -> str: "Please generate unit tests based on the following specification description:\n" + spec_response) messages = [{"role": "system", "content": prompt}] - logger.info(prompt) + #logger.info(prompt) organisation = Agent.find_org_by_agent_id(self.toolkit_config.session, agent_id=self.agent_id) total_tokens = TokenCounter.count_message_tokens(messages, self.llm.get_model()) diff --git a/superagi/tools/github/delete_file.py b/superagi/tools/github/delete_file.py index d90153bca..6eeabf7c9 100644 --- a/superagi/tools/github/delete_file.py +++ b/superagi/tools/github/delete_file.py @@ -76,7 +76,7 @@ def _execute(self, repository_name: str, base_branch: str, file_name: str, commi if repository_owner != github_username: fork_response = github_helper.make_fork(repository_owner, repository_name, base_branch, headers) branch_response = github_helper.create_branch(repository_name, base_branch, head_branch, headers) - logger.info("branch_response", branch_response) + #logger.info("branch_response", branch_response) if branch_response == 201 or branch_response == 422: github_helper.sync_branch(github_username, repository_name, base_branch, head_branch, headers) diff --git a/superagi/tools/searx/search_scraper.py b/superagi/tools/searx/search_scraper.py index 837314bb0..fe841855a 100644 --- a/superagi/tools/searx/search_scraper.py +++ b/superagi/tools/searx/search_scraper.py @@ -42,7 +42,7 @@ def search(query): searx_url + "/search", params={"q": query}, headers={"User-Agent": "Mozilla/5.0 (X11; Linux i686; rv:109.0) Gecko/20100101 Firefox/114.0"} ) if res.status_code != 200: - logger.info(res.status_code, searx_url) + #logger.info(res.status_code, searx_url) raise Exception(f"Searx returned {res.status_code} status code") return res.text diff --git a/superagi/vector_store/redis.py b/superagi/vector_store/redis.py index c422c6dc7..d106f9fbf 100644 --- a/superagi/vector_store/redis.py +++ b/superagi/vector_store/redis.py @@ -129,8 +129,8 @@ def create_index(self): try: # check to see if index exists temp = self.redis_client.ft(self.index).info() - logger.info(temp) - logger.info("Index already exists!") + #logger.info(temp) + #logger.info("Index already exists!") except: vector_dimensions = self.embedding_model.get_embedding("sample") # schema diff --git a/superagi/worker.py b/superagi/worker.py index 854909211..d4dad9dd9 100644 --- a/superagi/worker.py +++ b/superagi/worker.py @@ -58,7 +58,7 @@ def execute_waiting_workflows(): """Check if wait time of wait workflow step is over and can be resumed.""" from superagi.jobs.agent_executor import AgentExecutor - logger.info("Executing waiting workflows job") + #logger.info("Executing waiting workflows job") AgentExecutor().execute_waiting_workflows() @app.task(name="initialize-schedule-agent", autoretry_for=(Exception,), retry_backoff=2, max_retries=5) @@ -75,7 +75,7 @@ def execute_agent(agent_execution_id: int, time): """Execute an agent step in background.""" from superagi.jobs.agent_executor import AgentExecutor handle_tools_import() - logger.info("Execute agent:" + str(time) + "," + str(agent_execution_id)) + #logger.info("Execute agent:" + str(time) + "," + str(agent_execution_id)) AgentExecutor().execute_next_step(agent_execution_id=agent_execution_id) @@ -104,7 +104,7 @@ def summarize_resource(agent_id: int, resource_id: int): else: documents = ResourceManager(str(agent_id)).create_llama_document(file_path) - logger.info("Summarize resource:" + str(agent_id) + "," + str(resource_id)) + #logger.info("Summarize resource:" + str(agent_id) + "," + str(resource_id)) resource_summarizer = ResourceSummarizer(session=session, agent_id=agent_id, model=agent_config["model"]) resource_summarizer.add_to_vector_store_and_create_summary(resource_id=resource_id, documents=documents) diff --git a/test.py b/test.py index 5a4fdcd61..31f6367da 100644 --- a/test.py +++ b/test.py @@ -44,7 +44,7 @@ def run_superagi_cli(agent_name=None, agent_description=None, agent_goals=None): session.add(organization) session.flush() # Flush pending changes to generate the agent's ID session.commit() - logger.info(organization) + #logger.info(organization) # Create default project associated with the organization project = Project(name='Default Project', description='Default project description', @@ -52,7 +52,7 @@ def run_superagi_cli(agent_name=None, agent_description=None, agent_goals=None): session.add(project) session.flush() # Flush pending changes to generate the agent's ID session.commit() - logger.info(project) + #logger.info(project) # Agent if agent_name is None: @@ -63,7 +63,7 @@ def run_superagi_cli(agent_name=None, agent_description=None, agent_goals=None): session.add(agent) session.flush() session.commit() - logger.info(agent) + #logger.info(agent) # Agent Config # Create Agent Configuration @@ -92,16 +92,16 @@ def run_superagi_cli(agent_name=None, agent_description=None, agent_goals=None): session.add_all(agent_configurations) session.commit() - logger.info("Agent Config : ") - logger.info(agent_configurations) + #logger.info("Agent Config : ") + #logger.info(agent_configurations) # Create agent execution in RUNNING state associated with the agent execution = AgentExecution(status='RUNNING', agent_id=agent.id, last_execution_time=datetime.utcnow()) session.add(execution) session.commit() - logger.info("Final Execution") - logger.info(execution) + #logger.info("Final Execution") + #logger.info(execution) execute_agent.delay(execution.id, datetime.now()) diff --git a/ui.py b/ui.py index a030a8dcc..66ad00f9e 100644 --- a/ui.py +++ b/ui.py @@ -8,7 +8,7 @@ def check_command(command, message): if not shutil.which(command): - logger.info(message) + #logger.info(message) sys.exit(1) @@ -33,11 +33,11 @@ def run_server(shell=False): def cleanup(api_process, ui_process, celery_process): - logger.info("Shutting down processes...") + #logger.info("Shutting down processes...") api_process.terminate() ui_process.terminate() celery_process.terminate() - logger.info("Processes terminated. Exiting.") + #logger.info("Processes terminated. Exiting.") sys.exit(1)