diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 0000000..29d87dc --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,30 @@ +name: Release + +on: + push: + tags: + - "v*.*.*" + +env: + REGISTRY: ghcr.io + +jobs: + build-push-image: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - name: Log in to the Container registry + uses: docker/login-action@v2 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Build Docker image + run: | + docker build -t ${{ env.REGISTRY }}/${{ github.repository_owner }}/kube-copilot:${{ github.ref_name }} . + docker push ${{ env.REGISTRY }}/${{ github.repository_owner }}/kube-copilot:${{ github.ref_name }} diff --git a/Makefile b/Makefile index 2133b5c..be30a79 100644 --- a/Makefile +++ b/Makefile @@ -4,6 +4,10 @@ run: poetry run kube-copilot $(ARGS) +.PHONY: web +web: + streamlit run web/Home.py + .PHONY: build build: poetry build @@ -12,9 +16,16 @@ build: install: build pip install --force-reinstall --no-deps dist/$(shell ls -t dist | head -n 1) +.PHONY: versioning +versioning: + yq -i ".image.tag = \"v$(shell poetry version -s)\"" ./helm/kube-copilot/values.yaml + yq -i ".version = \"$(shell poetry version -s)\"" ./helm/kube-copilot/Chart.yaml + yq -i ".appVersion = \"$(shell poetry version -s)\"" ./helm/kube-copilot/Chart.yaml + .PHONY: publish publish: build poetry publish + gh release create v$(shell poetry version -s) .PHONY: release-helm release-helm: @@ -32,8 +43,7 @@ release-helm: git checkout main .PHONY: release -release: publish release-helm - gh release create v$(shell poetry version -s) +release: versioning publish release-helm .PHONY: clean clean: diff --git a/helm/README.md b/helm/README.md index 67b8599..7fb7e75 100644 --- a/helm/README.md +++ b/helm/README.md @@ -55,6 +55,7 @@ helm install kube-copilot kube-copilot \ | resources | object | `{}` | | | service.port | int | `80` | | | service.type | string | `"ClusterIP"` | | -| serviceAccount.create | bool | `true` | | +| serviceAccount.create | bool | `true` | Create the service account | +| serviceAccount.admin | bool | `true` | Create the admin RBAC binding (readonly RBAC would be set if set to false) | | serviceAccount.name | string | `"kube-copilot"` | | | tolerations | list | `[]` | | diff --git a/helm/kube-copilot/Chart.yaml b/helm/kube-copilot/Chart.yaml index 92c40f2..2fae832 100644 --- a/helm/kube-copilot/Chart.yaml +++ b/helm/kube-copilot/Chart.yaml @@ -1,7 +1,6 @@ apiVersion: v2 name: kube-copilot description: Kubernetes Copilot powered by OpenAI - # A chart can be either an 'application' or a 'library' chart. # # Application charts are a collection of templates that can be packaged into versioned archives @@ -11,14 +10,12 @@ description: Kubernetes Copilot powered by OpenAI # a dependency of application charts to inject those utilities and functions into the rendering # pipeline. Library charts do not define any templates and therefore cannot be deployed. type: application - # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.18 - +version: 0.1.19 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.1.18" +appVersion: 0.1.19 diff --git a/helm/kube-copilot/templates/serviceaccount.yaml b/helm/kube-copilot/templates/serviceaccount.yaml index 4803d79..3fa87a7 100644 --- a/helm/kube-copilot/templates/serviceaccount.yaml +++ b/helm/kube-copilot/templates/serviceaccount.yaml @@ -17,6 +17,18 @@ kind: ClusterRole metadata: name: {{ include "kube-copilot.serviceAccountName" . }}-reader rules: +{{- if .Values.serviceAccount.admin -}} +- apiGroups: + - '*' + resources: + - '*' + verbs: + - '*' +- nonResourceURLs: + - '*' + verbs: + - '*' +{{- else }} - apiGroups: - '*' resources: @@ -29,6 +41,7 @@ rules: verbs: - 'get' - 'list' +{{- end }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/helm/kube-copilot/values.yaml b/helm/kube-copilot/values.yaml index 3780f43..9ecdf54 100644 --- a/helm/kube-copilot/values.yaml +++ b/helm/kube-copilot/values.yaml @@ -3,57 +3,51 @@ # Declare variables to be passed into your templates. replicaCount: 1 - image: repository: ghcr.io/feiskyer/kube-copilot pullPolicy: Always - tag: "latest" - + tag: v0.1.19 imagePullSecrets: [] nameOverride: "" fullnameOverride: "" - openai: apiModel: "gpt-4" apiBase: "" apiKey: "" - google: apiKey: "" cseId: "" - serviceAccount: # Specifies whether a service account should be created create: true + # Specifies whether to create admin role binding + admin: true # Annotations to add to the service account annotations: {} # The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: "kube-copilot" - podAnnotations: {} - podSecurityContext: {} - # fsGroup: 2000 +# fsGroup: 2000 securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 +# capabilities: +# drop: +# - ALL +# readOnlyRootFilesystem: true +# runAsNonRoot: true +# runAsUser: 1000 service: type: ClusterIP port: 80 - ingress: enabled: false className: "" annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" hosts: - host: chart-example.local paths: @@ -63,21 +57,18 @@ ingress: # - secretName: chart-example-tls # hosts: # - chart-example.local - resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# We usually recommend not to specify default resources and to leave this as a conscious +# choice for the user. This also increases chances charts run on environments with little +# resources, such as Minikube. If you do want to specify resources, uncomment the following +# lines, adjust them as necessary, and remove the curly braces after 'resources:'. +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi nodeSelector: {} - tolerations: [] - affinity: {} diff --git a/kube_copilot/labeler.py b/kube_copilot/labeler.py new file mode 100644 index 0000000..59b6e38 --- /dev/null +++ b/kube_copilot/labeler.py @@ -0,0 +1,41 @@ +from langchain.callbacks.streamlit.streamlit_callback_handler import ToolRecord, LLMThoughtLabeler + + +CHECKMARK_EMOJI = "✅" +THINKING_EMOJI = ":thinking_face:" +HISTORY_EMOJI = ":books:" +EXCEPTION_EMOJI = "⚠️" + + +class CustomLLMThoughtLabeler(LLMThoughtLabeler): + def get_tool_label(self, tool: ToolRecord, is_complete: bool) -> str: + """Return the label for an LLMThought that has an associated + tool. + + Parameters + ---------- + tool + The tool's ToolRecord + + is_complete + True if the thought is complete; False if the thought + is still receiving input. + + Returns + ------- + The markdown label for the thought's container. + + """ + input = tool.input_str.strip() + name = tool.name + emoji = CHECKMARK_EMOJI if is_complete else THINKING_EMOJI + if name == "_Exception": + emoji = EXCEPTION_EMOJI + name = "Parsing error" + # idx = min([60, len(input)]) + # input = input[0:idx] + # if len(tool.input_str) > idx: + # input = input + "..." + # input = input.replace("\n", " ") + label = f"{emoji} **{name}:** {input}" + return label diff --git a/pyproject.toml b/pyproject.toml index 1eea945..d0c77ff 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "kube-copilot" -version = "0.1.18" +version = "0.1.19" description = "Kubernetes Copilot" authors = ["Pengfei Ni "] readme = "README.md" diff --git a/web/Home.py b/web/Home.py index c971ae8..c4cc33b 100644 --- a/web/Home.py +++ b/web/Home.py @@ -10,6 +10,7 @@ from kube_copilot.llm import init_openai from kube_copilot.prompts import get_prompt from kube_copilot.kubeconfig import setup_kubeconfig +from kube_copilot.labeler import CustomLLMThoughtLabeler # setup logging logging.basicConfig(stream=sys.stdout, level=logging.WARNING) @@ -60,7 +61,8 @@ st.session_state.messages.append({"role": "user", "content": prompt}) st.chat_message("user").write(prompt) - st_cb = StreamlitCallbackHandler(st.container()) + st_cb = StreamlitCallbackHandler( + st.container(), thought_labeler=CustomLLMThoughtLabeler()) chain = ReActLLM(model=model, verbose=True, enable_python=True, diff --git a/web/pages/Analyze.py b/web/pages/Analyze.py index 3e6d664..4a01ba5 100644 --- a/web/pages/Analyze.py +++ b/web/pages/Analyze.py @@ -9,6 +9,7 @@ from kube_copilot.chains import ReActLLM from kube_copilot.llm import init_openai from kube_copilot.prompts import get_analyze_prompt +from kube_copilot.labeler import CustomLLMThoughtLabeler logging.basicConfig(stream=sys.stdout, level=logging.CRITICAL) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) @@ -59,7 +60,7 @@ st.stop() prompt = get_analyze_prompt(namespace, resource_type, resource_name) - st_cb = StreamlitCallbackHandler(st.container()) + st_cb = StreamlitCallbackHandler(st.container(), thought_labeler=CustomLLMThoughtLabeler()) chain = ReActLLM(model=model, verbose=True, enable_python=True, diff --git a/web/pages/Audit.py b/web/pages/Audit.py index bed670f..51eaba0 100644 --- a/web/pages/Audit.py +++ b/web/pages/Audit.py @@ -9,6 +9,7 @@ from kube_copilot.chains import ReActLLM from kube_copilot.llm import init_openai from kube_copilot.prompts import get_audit_prompt +from kube_copilot.labeler import CustomLLMThoughtLabeler logging.basicConfig(stream=sys.stdout, level=logging.CRITICAL) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) @@ -56,7 +57,7 @@ st.stop() prompt = get_audit_prompt(namespace, pod) - st_cb = StreamlitCallbackHandler(st.container()) + st_cb = StreamlitCallbackHandler(st.container(), thought_labeler=CustomLLMThoughtLabeler()) chain = ReActLLM(model=model, verbose=True, enable_python=False, diff --git a/web/pages/Diagnose.py b/web/pages/Diagnose.py index 5e432e9..500d037 100644 --- a/web/pages/Diagnose.py +++ b/web/pages/Diagnose.py @@ -9,6 +9,7 @@ from kube_copilot.chains import ReActLLM from kube_copilot.llm import init_openai from kube_copilot.prompts import get_diagnose_prompt +from kube_copilot.labeler import CustomLLMThoughtLabeler logging.basicConfig(stream=sys.stdout, level=logging.CRITICAL) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) @@ -56,7 +57,7 @@ st.stop() prompt = get_diagnose_prompt(namespace, pod) - st_cb = StreamlitCallbackHandler(st.container()) + st_cb = StreamlitCallbackHandler(st.container(), thought_labeler=CustomLLMThoughtLabeler()) # chain = PlanAndExecuteLLM(model=model, enable_python=True) chain = ReActLLM(model=model, verbose=True, diff --git a/web/pages/Generate.py b/web/pages/Generate.py index a67406c..ebcdf3e 100644 --- a/web/pages/Generate.py +++ b/web/pages/Generate.py @@ -4,11 +4,14 @@ import sys import streamlit as st +import yaml from langchain.callbacks import StreamlitCallbackHandler from kube_copilot.chains import ReActLLM from kube_copilot.llm import init_openai from kube_copilot.prompts import get_generate_prompt +from kube_copilot.shell import KubeProcess +from kube_copilot.labeler import CustomLLMThoughtLabeler logging.basicConfig(stream=sys.stdout, level=logging.CRITICAL) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) @@ -34,7 +37,7 @@ prompt = st.text_input("Prompt", key="prompt", placeholder="") -if st.button("Generate"): +if st.button("Generate", key="generate"): if not os.getenv("OPENAI_API_KEY", ""): if not openai_api_key: st.info("Please add your OpenAI API key to continue.") @@ -51,17 +54,33 @@ st.info("Please add your prompt to continue.") st.stop() - st_cb = StreamlitCallbackHandler(st.container()) + st.session_state["response"] = "" + st.session_state["manifests"] = "" + st_cb = StreamlitCallbackHandler(st.container(), thought_labeler=CustomLLMThoughtLabeler()) chain = ReActLLM(model=model, verbose=True, enable_python=True, auto_approve=True) response = chain.run(get_generate_prompt(prompt), callbacks=[st_cb]) - st.write(response) - -# # Apply the generated manifests in cluster -# if click.confirm('Do you approve to apply the generated manifests to cluster?'): -# manifests = result.removeprefix( -# '```').removeprefix('yaml').removesuffix('```') -# print(KubeProcess(command="kubectl").run( -# 'kubectl apply -f -', input=bytes(manifests, 'utf-8'))) + st.session_state["response"] = response + +if st.session_state.get("response", "") != "": + response = st.session_state.get("response", "") + with st.container(): + st.markdown(response) + + manifests = response.removeprefix( + '```').removeprefix('yaml').removesuffix('```').strip() + + try: + yamls = yaml.safe_load_all(manifests) + st.session_state["manifests"] = manifests + except Exception as e: + st.error("The generated manifests are not valid YAML.") + st.stop() + +if st.session_state.get("manifests", "") != "": + if st.button("Apply to the cluster", key="apply_manifests"): + st.write("Applying the generated manifests...") + st.write(KubeProcess(command="kubectl").run( + 'kubectl apply -f -', input=bytes(manifests, 'utf-8')))