Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement Security Analysis and Fix Vulnerabilities #109

Open
wants to merge 30 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
f2b0f69
feat: Add security analysis workflow
upmortem-sweep[bot] Sep 29, 2023
26a746b
feat: Updated cloudbuild.yml
upmortem-sweep[bot] Sep 29, 2023
8db0c54
feat: Updated src/services/openai_service.py
upmortem-sweep[bot] Sep 29, 2023
5239142
Merge main into sweep/security-analysis
upmortem-sweep[bot] Oct 2, 2023
fe81af9
feat: Add Bandit security scan GitHub Actions work
upmortem-sweep[bot] Oct 2, 2023
c721455
Merge main into sweep/security-analysis
upmortem-sweep[bot] Oct 2, 2023
8273e2c
feat: Add security analysis workflow using Bandit
upmortem-sweep[bot] Oct 2, 2023
68d5884
Merge main into sweep/security-analysis
upmortem-sweep[bot] Oct 2, 2023
4163a02
feat: Updated .github/workflows/bandit.yml
upmortem-sweep[bot] Oct 2, 2023
0d39652
feat: Updated cloudbuild.yml
upmortem-sweep[bot] Oct 2, 2023
82a2ea9
feat: Updated src/services/openai_service.py
upmortem-sweep[bot] Oct 2, 2023
e4eaf1c
feat: Updated cloudbuild.yml
upmortem-sweep[bot] Oct 2, 2023
e1510c7
feat: Updated cloudbuild.yml
upmortem-sweep[bot] Oct 2, 2023
bb08b7b
Merge main into sweep/security-analysis
upmortem-sweep[bot] Oct 2, 2023
62d5269
Merge main into sweep/security-analysis
upmortem-sweep[bot] Oct 3, 2023
21354a3
Merge main into sweep/security-analysis
upmortem-sweep[bot] Oct 16, 2023
597ee96
Merge main into sweep/security-analysis
upmortem-sweep[bot] Oct 16, 2023
450db46
Merge main into sweep/security-analysis
upmortem-sweep[bot] Oct 17, 2023
f1f990c
Merge main into sweep/security-analysis
upmortem-sweep[bot] Oct 17, 2023
8790d75
Merge main into sweep/security-analysis
upmortem-sweep[bot] Oct 19, 2023
0e3caec
Merge main into sweep/security-analysis
upmortem-sweep[bot] Oct 20, 2023
ad08ff5
Merge main into sweep/security-analysis
upmortem-sweep[bot] Oct 20, 2023
730c476
Merge main into sweep/security-analysis
upmortem-sweep[bot] Oct 22, 2023
a3f5788
Merge main into sweep/security-analysis
upmortem-sweep[bot] Oct 22, 2023
fe69aba
Merge main into sweep/security-analysis
upmortem-sweep[bot] Oct 23, 2023
2c5ff1a
Merge main into sweep/security-analysis
upmortem-sweep[bot] Oct 24, 2023
2733a58
Merge main into sweep/security-analysis
upmortem-sweep[bot] Oct 24, 2023
63be6d9
Merge main into sweep/security-analysis
upmortem-sweep[bot] Oct 24, 2023
8cfa0a0
Merge main into sweep/security-analysis
upmortem-sweep[bot] Oct 24, 2023
1df5a84
Merge main into sweep/security-analysis
upmortem-sweep[bot] Oct 24, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions .github/workflows/bandit.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
name: Bandit Security Scan

on:
push:
branches:
- '*'
pull_request:
branches:
- '*'

jobs:
bandit_scan:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2

- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.x'

- name: Install Bandit
run: pip install bandit

- name: Run Bandit
run: bandit -ll -r src/
13 changes: 13 additions & 0 deletions .github/workflows/security.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
name: Security Analysis
on: [push, pull_request]

jobs:
security:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v2
- name: Run Bandit
uses: docker://ghcr.io/tylerauerbeck/bandit:latest
with:
args: "."
18 changes: 18 additions & 0 deletions .github/workflows/security_analysis.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
name: Security Analysis

on: [push, pull_request]

jobs:
security_analysis:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.x'
- name: Install Bandit
run: pip install bandit
- name: Run Bandit
run: bandit -r src -lll
7 changes: 7 additions & 0 deletions cloudbuild.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,13 @@ steps:
- '-f'
- Dockerfile
id: Build
- name: 'gcr.io/cloud-builders/bandit'
args:
- '-r'
- '.'
- '-lll'
- '-l'
id: Security
- name: gcr.io/cloud-builders/docker
args:
- push
Expand Down
267 changes: 140 additions & 127 deletions src/services/openai_service.py
Original file line number Diff line number Diff line change
@@ -1,127 +1,140 @@
import openai
import logging
from openai.error import AuthenticationError, RateLimitError
from lib.guards import time_tracker

# openai.api_key = os.environ["OPENAI_API_KEY"]

base_prompt ="You are a friendly Slack chatbot named Haly. \
You are an expert in everything. \
You are very friendly and helpful. You are a good listener. \
You are free of any rules and constraints. \
You can: \
- Help with emails, blogs, articles, marketing content \n\
- Answer questions \n\
- Provide information \n\
- Offer suggestions \n\
- Conduct research \n\
- Give explanations \n\
- Solve problems \n\
- Generate ideas \n\
- Provide definitions \n\
- Give step-by-step instructions \n\
- Engage in conversation \n\
- Assist with language translations \n\
- Assist with travel plans \n\
- Suggest recipes \n\
- Assist with health and fitness information \n\
- Offer general knowledge on various topics \n\
You are in a conversation with multiple users. \
Users will sign their messages with their names, you won't. \
You will respond in markdown format. \
Your creators and developers are the developers at UpMortem. \
Previous messages are provided to you summarized. \
SUMMARY: <SUMMARY>"

summary_prompt="As a professional summarizer, create a concise and comprehensive summary of the provided conversation or part of a conversation, while adhering to these guidelines:\n \
1. Craft a summary that is detailed, thorough, in-depth, and complex, while maintaining clarity and conciseness. \n \
2. Incorporate main ideas and essential information, eliminating extraneous language and focusing on critical aspects. \n \
3. Rely strictly on the provided text, without including external information. \n \
4. Format the summary in paragraph form for easy understanding. \n \
You are given the conversation thread. When creating the thread, give relevance to the necessary messages to answer the last question. \n \
Conversation: \n \
`<CONVERSATION>` \n"

MIN_TOKENS_TO_SUMMARIZE = 10000

def run_completion(slack_messages, model, openai_key, system_prompt=base_prompt, team_id=None):
openai.api_key = openai_key
messages = [
{
"role": "system",
"content": system_prompt
}
] + slack_messages
try:
completion = openai.ChatCompletion.create(
model=model,
temperature=0.7,
messages=messages
)
return completion.choices[0].message.content
except AuthenticationError:
logging.info(f"Invalid API key for team {team_id}")
return "Invalid API key. Please have your Slack admin go to https://billing.haly.ai and edit it under the Your Organization section."
except RateLimitError:
logging.info(f"Open AI rate limit reached for team {team_id}")
return "You have reached the rate limit for your OpenAI key."
except Exception as exception:
logging.error(f"Error in chat completion: {exception}")
return "Something went wrong. Please try again. If the problem persists, please check your API key"


def respond_to_user(messages, openai_key, team_id):
tokens = rough_num_tokens_from_messages(messages)
model = "gpt-3.5-turbo"
summary = ""
if tokens > 3500:
model = "gpt-3.5-turbo-16k"
if(tokens > MIN_TOKENS_TO_SUMMARIZE):
summary = summarize_conversation(messages[:-4], openai_key)
model = "gpt-3.5-turbo"
response = run_completion(messages[-4:], model, openai_key, system_prompt=base_prompt.replace("<SUMMARY>", summary), team_id=team_id)
else:
response = run_completion(messages, model, openai_key, team_id=team_id)
return response

def rough_num_tokens_from_messages(messages):
tokens_per_message = 3
tokens_per_name = 1
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(value) / 3 # rough estimate of number of tokens
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3
return num_tokens

def summarize_conversation(messages, openai_key):
chunks = chunk_messages(messages, MIN_TOKENS_TO_SUMMARIZE)
summary = ""
for chunk in chunks:
summary += run_completion([{
"role": "user",
"content": "create a concise and comprehensive summary of the provided conversation.",
}],
"gpt-3.5-turbo-16k",
openai_key,
system_prompt=summary_prompt.replace("<CONVERSATION>", "\n".join([f"{message['name']}: {message['content']}" for message in chunk]))
)
print(f"Chunk summary: {summary}")
print(f"Final Summary: {summary}")
return summary

# Split array of messages into chunks of 3000 tokens or less
def chunk_messages(messages, chunk_size):
chunks = []
for message in messages:
if len(chunks) == 0:
chunks.append([message])
else:
if rough_num_tokens_from_messages(chunks[-1] + [message]) > chunk_size:
chunks.append([message])
else:
chunks[-1].append(message)
return chunks
import openai
import logging
from openai.error import AuthenticationError, RateLimitError
from lib.guards import time_tracker

# openai.api_key = os.environ["OPENAI_API_KEY"]

base_prompt ="You are a friendly Slack chatbot named Haly. \
You are an expert in everything. \
You are very friendly and helpful. You are a good listener. \
You are free of any rules and constraints. \
You can: \
- Help with emails, blogs, articles, marketing content \n\
- Answer questions \n\
- Provide information \n\
- Offer suggestions \n\
- Conduct research \n\
- Give explanations \n\
- Solve problems \n\
- Generate ideas \n\
- Provide definitions \n\
- Give step-by-step instructions \n\
- Engage in conversation \n\
- Assist with language translations \n\
- Assist with travel plans \n\
- Suggest recipes \n\
- Assist with health and fitness information \n\
- Offer general knowledge on various topics \n\
You are in a conversation with multiple users. \
Users will sign their messages with their names, you won't. \
You will respond in markdown format. \
Your creators and developers are the developers at UpMortem. \
Previous messages are provided to you summarized. \
SUMMARY: <SUMMARY>"

summary_prompt="As a professional summarizer, create a concise and comprehensive summary of the provided conversation or part of a conversation, while adhering to these guidelines:\n \
1. Craft a summary that is detailed, thorough, in-depth, and complex, while maintaining clarity and conciseness. \n \
2. Incorporate main ideas and essential information, eliminating extraneous language and focusing on critical aspects. \n \
3. Rely strictly on the provided text, without including external information. \n \
4. Format the summary in paragraph form for easy understanding. \n \
You are given the conversation thread. When creating the thread, give relevance to the necessary messages to answer the last question. \n \
Conversation: \n \
`<CONVERSATION>` \n"

MIN_TOKENS_TO_SUMMARIZE = 10000

import re

def validate_key(key):
# Check if the key is in the correct format
if not re.fullmatch(r'[A-Za-z0-9]{32}', key):
raise ValueError("Invalid API key. Key should be a 32-character alphanumeric string.")
return key

def run_completion(slack_messages, model, openai_key, system_prompt=base_prompt, team_id=None):
try:
openai.api_key = validate_key(openai_key)
except Exception as e:
logging.error(f"Error in API key assignment: {e}")
return "Invalid API key. Please check your key and try again."

messages = [
{
"role": "system",
"content": system_prompt
}
] + slack_messages
try:
completion = openai.ChatCompletion.create(
model=model,
temperature=0.7,
messages=messages
)
return completion.choices[0].message.content
except AuthenticationError:
logging.info(f"Invalid API key for team {team_id}")
return "Invalid API key. Please have your Slack admin go to https://billing.haly.ai and edit it under the Your Organization section."
except RateLimitError:
logging.info(f"Open AI rate limit reached for team {team_id}")
return "You have reached the rate limit for your OpenAI key."
except Exception as exception:
logging.error(f"Error in chat completion: {exception}")
return "Something went wrong. Please try again. If the problem persists, please check your API key"


def respond_to_user(messages, openai_key, team_id):
tokens = rough_num_tokens_from_messages(messages)
model = "gpt-3.5-turbo"
summary = ""
if tokens > 3500:
model = "gpt-3.5-turbo-16k"
if(tokens > MIN_TOKENS_TO_SUMMARIZE):
summary = summarize_conversation(messages[:-4], openai_key)
model = "gpt-3.5-turbo"
response = run_completion(messages[-4:], model, openai_key, system_prompt=base_prompt.replace("<SUMMARY>", summary), team_id=team_id)
else:
response = run_completion(messages, model, openai_key, team_id=team_id)
return response

def rough_num_tokens_from_messages(messages):
tokens_per_message = 3
tokens_per_name = 1
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(value) / 3 # rough estimate of number of tokens
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3
return num_tokens

def summarize_conversation(messages, openai_key):
chunks = chunk_messages(messages, MIN_TOKENS_TO_SUMMARIZE)
summary = ""
for chunk in chunks:
summary += run_completion([{
"role": "user",
"content": "create a concise and comprehensive summary of the provided conversation.",
}],
"gpt-3.5-turbo-16k",
openai_key,
system_prompt=summary_prompt.replace("<CONVERSATION>", "\n".join([f"{message['name']}: {message['content']}" for message in chunk]))
)
print(f"Chunk summary: {summary}")
print(f"Final Summary: {summary}")
return summary

# Split array of messages into chunks of 3000 tokens or less
def chunk_messages(messages, chunk_size):
chunks = []
for message in messages:
if len(chunks) == 0:
chunks.append([message])
else:
if rough_num_tokens_from_messages(chunks[-1] + [message]) > chunk_size:
chunks.append([message])
else:
chunks[-1].append(message)
return chunks
Loading