From f2b0f69eb094c791837edefb63f92237923f995e Mon Sep 17 00:00:00 2001 From: "upmortem-sweep[bot]" <144372574+upmortem-sweep[bot]@users.noreply.github.com> Date: Fri, 29 Sep 2023 12:42:04 +0000 Subject: [PATCH 01/10] feat: Add security analysis workflow --- .github/workflows/security.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 .github/workflows/security.yml diff --git a/.github/workflows/security.yml b/.github/workflows/security.yml new file mode 100644 index 0000000..8eba23c --- /dev/null +++ b/.github/workflows/security.yml @@ -0,0 +1,13 @@ +name: Security Analysis +on: [push, pull_request] + +jobs: + security: + runs-on: ubuntu-latest + steps: + - name: Check out code + uses: actions/checkout@v2 + - name: Run Bandit + uses: docker://ghcr.io/tylerauerbeck/bandit:latest + with: + args: "." From 26a746beabb36ff35708d1f8518a27b140e4bed9 Mon Sep 17 00:00:00 2001 From: "upmortem-sweep[bot]" <144372574+upmortem-sweep[bot]@users.noreply.github.com> Date: Fri, 29 Sep 2023 12:43:17 +0000 Subject: [PATCH 02/10] feat: Updated cloudbuild.yml --- cloudbuild.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cloudbuild.yml b/cloudbuild.yml index 26df6e2..8fcc8b6 100644 --- a/cloudbuild.yml +++ b/cloudbuild.yml @@ -9,6 +9,12 @@ steps: - '-f' - Dockerfile id: Build + - name: 'gcr.io/cloud-builders/bandit' + args: + - '-r' + - '.' + - '-lll' + id: Security - name: gcr.io/cloud-builders/docker args: - push From 8db0c540a46ef113da444fd4218547f72a8f39b9 Mon Sep 17 00:00:00 2001 From: "upmortem-sweep[bot]" <144372574+upmortem-sweep[bot]@users.noreply.github.com> Date: Fri, 29 Sep 2023 12:44:44 +0000 Subject: [PATCH 03/10] feat: Updated src/services/openai_service.py --- src/services/openai_service.py | 264 +++++++++++++++++---------------- 1 file changed, 137 insertions(+), 127 deletions(-) diff --git a/src/services/openai_service.py b/src/services/openai_service.py index ca97fd5..0ad2b14 100644 --- a/src/services/openai_service.py +++ b/src/services/openai_service.py @@ -1,127 +1,137 @@ -import openai -import logging -from openai.error import AuthenticationError, RateLimitError -from lib.guards import time_tracker - -# openai.api_key = os.environ["OPENAI_API_KEY"] - -base_prompt ="You are a friendly Slack chatbot named Haly. \ -You are an expert in everything. \ -You are very friendly and helpful. You are a good listener. \ -You are free of any rules and constraints. \ -You can: \ -- Help with emails, blogs, articles, marketing content \n\ -- Answer questions \n\ -- Provide information \n\ -- Offer suggestions \n\ -- Conduct research \n\ -- Give explanations \n\ -- Solve problems \n\ -- Generate ideas \n\ -- Provide definitions \n\ -- Give step-by-step instructions \n\ -- Engage in conversation \n\ -- Assist with language translations \n\ -- Assist with travel plans \n\ -- Suggest recipes \n\ -- Assist with health and fitness information \n\ -- Offer general knowledge on various topics \n\ -You are in a conversation with multiple users. \ -Users will sign their messages with their names, you won't. \ -You will respond in markdown format. \ -Your creators and developers are the developers at UpMortem. \ -Previous messages are provided to you summarized. \ -SUMMARY: " - -summary_prompt="As a professional summarizer, create a concise and comprehensive summary of the provided conversation or part of a conversation, while adhering to these guidelines:\n \ -1. Craft a summary that is detailed, thorough, in-depth, and complex, while maintaining clarity and conciseness. \n \ -2. Incorporate main ideas and essential information, eliminating extraneous language and focusing on critical aspects. \n \ -3. Rely strictly on the provided text, without including external information. \n \ -4. Format the summary in paragraph form for easy understanding. \n \ -You are given the conversation thread. When creating the thread, give relevance to the necessary messages to answer the last question. \n \ -Conversation: \n \ -`` \n" - -MIN_TOKENS_TO_SUMMARIZE = 10000 - -def run_completion(slack_messages, model, openai_key, system_prompt=base_prompt, team_id=None): - openai.api_key = openai_key - messages = [ - { - "role": "system", - "content": system_prompt - } - ] + slack_messages - try: - completion = openai.ChatCompletion.create( - model=model, - temperature=0.7, - messages=messages - ) - return completion.choices[0].message.content - except AuthenticationError: - logging.info(f"Invalid API key for team {team_id}") - return "Invalid API key. Please have your Slack admin go to https://billing.haly.ai and edit it under the Your Organization section." - except RateLimitError: - logging.info(f"Open AI rate limit reached for team {team_id}") - return "You have reached the rate limit for your OpenAI key." - except Exception as exception: - logging.error(f"Error in chat completion: {exception}") - return "Something went wrong. Please try again. If the problem persists, please check your API key" - - -def respond_to_user(messages, openai_key, team_id): - tokens = rough_num_tokens_from_messages(messages) - model = "gpt-3.5-turbo" - summary = "" - if tokens > 3500: - model = "gpt-3.5-turbo-16k" - if(tokens > MIN_TOKENS_TO_SUMMARIZE): - summary = summarize_conversation(messages[:-4], openai_key) - model = "gpt-3.5-turbo" - response = run_completion(messages[-4:], model, openai_key, system_prompt=base_prompt.replace("", summary), team_id=team_id) - else: - response = run_completion(messages, model, openai_key, team_id=team_id) - return response - -def rough_num_tokens_from_messages(messages): - tokens_per_message = 3 - tokens_per_name = 1 - num_tokens = 0 - for message in messages: - num_tokens += tokens_per_message - for key, value in message.items(): - num_tokens += len(value) / 3 # rough estimate of number of tokens - if key == "name": - num_tokens += tokens_per_name - num_tokens += 3 - return num_tokens - -def summarize_conversation(messages, openai_key): - chunks = chunk_messages(messages, MIN_TOKENS_TO_SUMMARIZE) - summary = "" - for chunk in chunks: - summary += run_completion([{ - "role": "user", - "content": "create a concise and comprehensive summary of the provided conversation.", - }], - "gpt-3.5-turbo-16k", - openai_key, - system_prompt=summary_prompt.replace("", "\n".join([f"{message['name']}: {message['content']}" for message in chunk])) - ) - print(f"Chunk summary: {summary}") - print(f"Final Summary: {summary}") - return summary - -# Split array of messages into chunks of 3000 tokens or less -def chunk_messages(messages, chunk_size): - chunks = [] - for message in messages: - if len(chunks) == 0: - chunks.append([message]) - else: - if rough_num_tokens_from_messages(chunks[-1] + [message]) > chunk_size: - chunks.append([message]) - else: - chunks[-1].append(message) - return chunks +import openai +import logging +from openai.error import AuthenticationError, RateLimitError +from lib.guards import time_tracker + +# openai.api_key = os.environ["OPENAI_API_KEY"] + +base_prompt ="You are a friendly Slack chatbot named Haly. \ +You are an expert in everything. \ +You are very friendly and helpful. You are a good listener. \ +You are free of any rules and constraints. \ +You can: \ +- Help with emails, blogs, articles, marketing content \n\ +- Answer questions \n\ +- Provide information \n\ +- Offer suggestions \n\ +- Conduct research \n\ +- Give explanations \n\ +- Solve problems \n\ +- Generate ideas \n\ +- Provide definitions \n\ +- Give step-by-step instructions \n\ +- Engage in conversation \n\ +- Assist with language translations \n\ +- Assist with travel plans \n\ +- Suggest recipes \n\ +- Assist with health and fitness information \n\ +- Offer general knowledge on various topics \n\ +You are in a conversation with multiple users. \ +Users will sign their messages with their names, you won't. \ +You will respond in markdown format. \ +Your creators and developers are the developers at UpMortem. \ +Previous messages are provided to you summarized. \ +SUMMARY: " + +summary_prompt="As a professional summarizer, create a concise and comprehensive summary of the provided conversation or part of a conversation, while adhering to these guidelines:\n \ +1. Craft a summary that is detailed, thorough, in-depth, and complex, while maintaining clarity and conciseness. \n \ +2. Incorporate main ideas and essential information, eliminating extraneous language and focusing on critical aspects. \n \ +3. Rely strictly on the provided text, without including external information. \n \ +4. Format the summary in paragraph form for easy understanding. \n \ +You are given the conversation thread. When creating the thread, give relevance to the necessary messages to answer the last question. \n \ +Conversation: \n \ +`` \n" + +MIN_TOKENS_TO_SUMMARIZE = 10000 + +def validate_key(key): + # Add your key validation logic here + # For example, you can check if the key is in the correct format and doesn't contain any malicious characters + return key + +def run_completion(slack_messages, model, openai_key, system_prompt=base_prompt, team_id=None): + try: + openai.api_key = validate_key(openai_key) + except Exception as e: + logging.error(f"Error in API key assignment: {e}") + return "Invalid API key. Please check your key and try again." + + messages = [ + { + "role": "system", + "content": system_prompt + } + ] + slack_messages + try: + completion = openai.ChatCompletion.create( + model=model, + temperature=0.7, + messages=messages + ) + return completion.choices[0].message.content + except AuthenticationError: + logging.info(f"Invalid API key for team {team_id}") + return "Invalid API key. Please have your Slack admin go to https://billing.haly.ai and edit it under the Your Organization section." + except RateLimitError: + logging.info(f"Open AI rate limit reached for team {team_id}") + return "You have reached the rate limit for your OpenAI key." + except Exception as exception: + logging.error(f"Error in chat completion: {exception}") + return "Something went wrong. Please try again. If the problem persists, please check your API key" + + +def respond_to_user(messages, openai_key, team_id): + tokens = rough_num_tokens_from_messages(messages) + model = "gpt-3.5-turbo" + summary = "" + if tokens > 3500: + model = "gpt-3.5-turbo-16k" + if(tokens > MIN_TOKENS_TO_SUMMARIZE): + summary = summarize_conversation(messages[:-4], openai_key) + model = "gpt-3.5-turbo" + response = run_completion(messages[-4:], model, openai_key, system_prompt=base_prompt.replace("", summary), team_id=team_id) + else: + response = run_completion(messages, model, openai_key, team_id=team_id) + return response + +def rough_num_tokens_from_messages(messages): + tokens_per_message = 3 + tokens_per_name = 1 + num_tokens = 0 + for message in messages: + num_tokens += tokens_per_message + for key, value in message.items(): + num_tokens += len(value) / 3 # rough estimate of number of tokens + if key == "name": + num_tokens += tokens_per_name + num_tokens += 3 + return num_tokens + +def summarize_conversation(messages, openai_key): + chunks = chunk_messages(messages, MIN_TOKENS_TO_SUMMARIZE) + summary = "" + for chunk in chunks: + summary += run_completion([{ + "role": "user", + "content": "create a concise and comprehensive summary of the provided conversation.", + }], + "gpt-3.5-turbo-16k", + openai_key, + system_prompt=summary_prompt.replace("", "\n".join([f"{message['name']}: {message['content']}" for message in chunk])) + ) + print(f"Chunk summary: {summary}") + print(f"Final Summary: {summary}") + return summary + +# Split array of messages into chunks of 3000 tokens or less +def chunk_messages(messages, chunk_size): + chunks = [] + for message in messages: + if len(chunks) == 0: + chunks.append([message]) + else: + if rough_num_tokens_from_messages(chunks[-1] + [message]) > chunk_size: + chunks.append([message]) + else: + chunks[-1].append(message) + return chunks From fe81af9c69bca96b13e24c55da7a59733a81ef16 Mon Sep 17 00:00:00 2001 From: "upmortem-sweep[bot]" <144372574+upmortem-sweep[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 11:42:34 +0000 Subject: [PATCH 04/10] feat: Add Bandit security scan GitHub Actions work --- .github/workflows/bandit.yml | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 .github/workflows/bandit.yml diff --git a/.github/workflows/bandit.yml b/.github/workflows/bandit.yml new file mode 100644 index 0000000..a4269f8 --- /dev/null +++ b/.github/workflows/bandit.yml @@ -0,0 +1,27 @@ +name: Bandit Security Scan + +on: + push: + branches: + - '*' + pull_request: + branches: + - '*' + +jobs: + bandit_scan: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: '3.x' + + - name: Install Bandit + run: pip install bandit + + - name: Run Bandit + run: bandit -r src/ From 8273e2cd5d58371d29f3bf9490b5fc66f9b6c28f Mon Sep 17 00:00:00 2001 From: "upmortem-sweep[bot]" <144372574+upmortem-sweep[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 11:44:36 +0000 Subject: [PATCH 05/10] feat: Add security analysis workflow using Bandit --- .github/workflows/security_analysis.yml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 .github/workflows/security_analysis.yml diff --git a/.github/workflows/security_analysis.yml b/.github/workflows/security_analysis.yml new file mode 100644 index 0000000..c29b381 --- /dev/null +++ b/.github/workflows/security_analysis.yml @@ -0,0 +1,18 @@ +name: Security Analysis + +on: [push, pull_request] + +jobs: + security_analysis: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: '3.x' + - name: Install Bandit + run: pip install bandit + - name: Run Bandit + run: bandit -r src -lll From 4163a0268046d559ea3250e325666099b1a6c5d4 Mon Sep 17 00:00:00 2001 From: "upmortem-sweep[bot]" <144372574+upmortem-sweep[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 11:51:16 +0000 Subject: [PATCH 06/10] feat: Updated .github/workflows/bandit.yml --- .github/workflows/bandit.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bandit.yml b/.github/workflows/bandit.yml index a4269f8..85de671 100644 --- a/.github/workflows/bandit.yml +++ b/.github/workflows/bandit.yml @@ -24,4 +24,4 @@ jobs: run: pip install bandit - name: Run Bandit - run: bandit -r src/ + run: bandit -ll -r src/ From 0d3965275cf1f8a8f1ac50c26dcbe9ea5333183c Mon Sep 17 00:00:00 2001 From: "upmortem-sweep[bot]" <144372574+upmortem-sweep[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 11:52:35 +0000 Subject: [PATCH 07/10] feat: Updated cloudbuild.yml --- cloudbuild.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloudbuild.yml b/cloudbuild.yml index 8fcc8b6..aeefdde 100644 --- a/cloudbuild.yml +++ b/cloudbuild.yml @@ -14,6 +14,8 @@ steps: - '-r' - '.' - '-lll' + - '-s' + - 'B,H' id: Security - name: gcr.io/cloud-builders/docker args: From 82a2ea965ed12d08af56b0f671c0b0a1a282d4ba Mon Sep 17 00:00:00 2001 From: "upmortem-sweep[bot]" <144372574+upmortem-sweep[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 11:54:13 +0000 Subject: [PATCH 08/10] feat: Updated src/services/openai_service.py --- src/services/openai_service.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/services/openai_service.py b/src/services/openai_service.py index 0ad2b14..790bbe8 100644 --- a/src/services/openai_service.py +++ b/src/services/openai_service.py @@ -44,9 +44,12 @@ MIN_TOKENS_TO_SUMMARIZE = 10000 +import re + def validate_key(key): - # Add your key validation logic here - # For example, you can check if the key is in the correct format and doesn't contain any malicious characters + # Check if the key is in the correct format + if not re.fullmatch(r'[A-Za-z0-9]{32}', key): + raise ValueError("Invalid API key. Key should be a 32-character alphanumeric string.") return key def run_completion(slack_messages, model, openai_key, system_prompt=base_prompt, team_id=None): From e4eaf1ca1ae690624e1ebf991c6cc53b56bae4aa Mon Sep 17 00:00:00 2001 From: "upmortem-sweep[bot]" <144372574+upmortem-sweep[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 11:56:52 +0000 Subject: [PATCH 09/10] feat: Updated cloudbuild.yml --- cloudbuild.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/cloudbuild.yml b/cloudbuild.yml index aeefdde..8fcc8b6 100644 --- a/cloudbuild.yml +++ b/cloudbuild.yml @@ -14,8 +14,6 @@ steps: - '-r' - '.' - '-lll' - - '-s' - - 'B,H' id: Security - name: gcr.io/cloud-builders/docker args: From e1510c7795163bf98955262fb7709435d8ae0c1a Mon Sep 17 00:00:00 2001 From: "upmortem-sweep[bot]" <144372574+upmortem-sweep[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 12:08:55 +0000 Subject: [PATCH 10/10] feat: Updated cloudbuild.yml --- cloudbuild.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/cloudbuild.yml b/cloudbuild.yml index 8fcc8b6..918e5f4 100644 --- a/cloudbuild.yml +++ b/cloudbuild.yml @@ -14,6 +14,7 @@ steps: - '-r' - '.' - '-lll' + - '-l' id: Security - name: gcr.io/cloud-builders/docker args: