diff --git a/charts/agent-docker/CHANGELOG.md b/charts/agent-docker/CHANGELOG.md index d94abac..8806c72 100644 --- a/charts/agent-docker/CHANGELOG.md +++ b/charts/agent-docker/CHANGELOG.md @@ -9,6 +9,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [UNRELEASED] +## [v0.3.2] + +### Updated + +- Note about relation to agent-k8s chart. + ## [v0.3.1] ### Added diff --git a/charts/agent-docker/Chart.yaml b/charts/agent-docker/Chart.yaml index 99f7742..ccc09b8 100644 --- a/charts/agent-docker/Chart.yaml +++ b/charts/agent-docker/Chart.yaml @@ -5,6 +5,12 @@ description: | where runs are executed in [dind](https://hub.docker.com/_/docker) sidecar container. Run phases are isolated into docker containers. + > **Note** + > We suggest to install this chart only is you already rely on scalr-agent, + > and migrating your AgentPool from other installation methods: e.g. rpm / deb / docker. + > For new deployments we encourage you to try the new [`agent-k8s`](/charts/agent-k8s) chart, + > that has many advantages over the `agent-docker`. + Kuberentes deployment doesn't scale on multiple replicas. Consequently, the capacity of compute resources that can be managed by a single agent remains constrained by a single node. @@ -13,7 +19,7 @@ description: |  type: application -version: 0.3.1 +version: 0.3.2 appVersion: "0.1.33" home: https://github.com/Scalr/agent-helm/tree/master/charts/agent-docker maintainers: diff --git a/charts/agent-docker/README.md b/charts/agent-docker/README.md index 38b404c..ce1ba06 100644 --- a/charts/agent-docker/README.md +++ b/charts/agent-docker/README.md @@ -1,11 +1,17 @@ # agent-docker -   +   A Helm chart for the scalr-agent deployment on ths Kubernetes cluster, where runs are executed in [dind](https://hub.docker.com/_/docker) sidecar container. Run phases are isolated into docker containers. +> **Note** +> We suggest to install this chart only is you already rely on scalr-agent, +> and migrating your AgentPool from other installation methods: e.g. rpm / deb / docker. +> For new deployments we encourage you to try the new [`agent-k8s`](/charts/agent-k8s) chart, +> that has many advantages over the `agent-docker`. + Kuberentes deployment doesn't scale on multiple replicas. Consequently, the capacity of compute resources that can be managed by a single agent remains constrained by a single node. diff --git a/charts/agent-k8s/CHANGELOG.md b/charts/agent-k8s/CHANGELOG.md index b398441..f0ba561 100644 --- a/charts/agent-k8s/CHANGELOG.md +++ b/charts/agent-k8s/CHANGELOG.md @@ -9,18 +9,28 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [UNRELEASED] +## [v0.2.2] + +### Fixed + +- Improve the agent-k8s chart description +- Note about relation to agent-docker chart. + ## [v0.2.1] ### Added + - Chart description - Kubernetes deployment diagram ## [v0.2.0] ### Added + - Automatically rollout deployment on the Sclar token change ## [v0.1.0] ### Added + - Initial release. diff --git a/charts/agent-k8s/Chart.yaml b/charts/agent-k8s/Chart.yaml index a190613..101cd91 100644 --- a/charts/agent-k8s/Chart.yaml +++ b/charts/agent-k8s/Chart.yaml @@ -5,6 +5,11 @@ description: | where runs are executed in Pods in the same cluster. Run phases are isolated in kubernetes containers with resource limits. + > **Note** + > This new deployment architecture is currently in preview. + > It has many advantages over the [`agent-docker`](/charts/agent-docker) chart and + > would eventually replace it. + Agent pool DaemomSet scales up/down with the cluster, registering and deregistering agents from the pool. When an Agent receives a job from Scalr, it schedules a Pod for execution. The Kubernetes workload scheduler assigns the Pod @@ -14,7 +19,7 @@ description: |  type: application -version: 0.2.1 +version: 0.2.2 appVersion: "0.1.33" home: https://github.com/Scalr/agent-helm/tree/master/charts/agent-k8s maintainers: diff --git a/charts/agent-k8s/README.md b/charts/agent-k8s/README.md index 8fd3d0a..0938adc 100644 --- a/charts/agent-k8s/README.md +++ b/charts/agent-k8s/README.md @@ -1,11 +1,16 @@ # agent-k8s -   +   A Helm chart for the scalr-agent deployment on the Kubernetes cluster, where runs are executed in Pods in the same cluster. Run phases are isolated in kubernetes containers with resource limits. +> **Note** +> This new deployment architecture is currently in preview. +> It has many advantages over the [`agent-docker`](/charts/agent-docker) chart and +> would eventually replace it. + Agent pool DaemomSet scales up/down with the cluster, registering and deregistering agents from the pool. When an Agent receives a job from Scalr, it schedules a Pod for execution. The Kubernetes workload scheduler assigns the Pod @@ -27,11 +32,11 @@ linearly based on the load. | Key | Type | Default | Description | |-----|------|---------|-------------| -| agent.container_task_cpu_limit | float | `8` | If your container only needs ¼ of a core, you would put a value of 0.25 cores. | -| agent.container_task_cpu_request | float | `1` | put the value 2. If your container only needs ¼ of a core, you would put a value of 0.25 cores. | +| agent.container_task_cpu_limit | float | `8` | CPU resource limit defined in cores. If your container needs two full cores to run, you would put the value 2. If your container only needs ¼ of a core, you would put a value of 0.25 cores. | +| agent.container_task_cpu_request | float | `1` | CPU resource request defined in cores. If your container needs two full cores to run, you would put the value 2. If your container only needs ¼ of a core, you would put a value of 0.25 cores. | | agent.container_task_mem_limit | int | `16384` | Memory resource limit defined in megabytes. | | agent.container_task_mem_request | int | `1024` | Memory resource request defined in megabytes. | -| agent.container_task_scheduling_timeout | int | `120` | does not allocate resources for the container in that timeout, the task will be switched to the errored status. | +| agent.container_task_scheduling_timeout | int | `120` | The container task's (e.g., Kubernetes Pod) scheduling timeout in seconds. The task will be waiting for the scheduling in the queued status; if the cluster does not allocate resources for the container in that timeout, the task will be switched to the errored status. | | agent.data_home | string | `"/home/kubernetes/flexvolume/agent-k8s"` | The agent working directory on the cluster host node. | | agent.debug | bool | `false` | Enable debug logs | | agent.disconnect_on_stop | bool | `true` | Determines if the agent should automatically disconnect from the Scalr agent pool when the service is stopping. | @@ -44,8 +49,8 @@ linearly based on the load. | agent.worker_drain_timeout | int | `3600` | The timeout for draining worker tasks in seconds. After this timeout, tasks will be terminated via the SIGTERM signal. | | agent.worker_on_stop_action | string | `"drain"` | Defines the SIGTERM/SIGHUP/SIGINT signal handler's shutdown behavior. Options: "drain" or "grace-shutdown" or "force-shutdown". | | fullnameOverride | string | `""` | | -| image.pullPolicy | string | `"Always"` | | -| image.repository | string | `"scalr/agent"` | | +| image.pullPolicy | string | `"Always"` | The pullPolicy for a container and the tag of the image. | +| image.repository | string | `"scalr/agent"` | Docker repository for the Scalr Agent image. | | image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | | imagePullSecrets | list | `[]` | | | nameOverride | string | `""` | | diff --git a/charts/agent-k8s/values.yaml b/charts/agent-k8s/values.yaml index 25b7e2f..e232739 100644 --- a/charts/agent-k8s/values.yaml +++ b/charts/agent-k8s/values.yaml @@ -1,9 +1,9 @@ # Default values for agent-k8s. image: - # Docker repository for the Scalr Agent image. + # -- Docker repository for the Scalr Agent image. repository: scalr/agent - # The pullPolicy for a container and the tag of the image. + # -- The pullPolicy for a container and the tag of the image. pullPolicy: Always # -- Overrides the image tag whose default is the chart appVersion. tag: "" @@ -28,14 +28,14 @@ agent: # -- The agent working directory on the cluster host node. data_home: "/home/kubernetes/flexvolume/agent-k8s" # -- The container task's (e.g., Kubernetes Pod) scheduling timeout in seconds. - # -- The task will be waiting for the scheduling in the queued status; if the cluster - # -- does not allocate resources for the container in that timeout, the task will be switched to the errored status. + # The task will be waiting for the scheduling in the queued status; if the cluster + # does not allocate resources for the container in that timeout, the task will be switched to the errored status. container_task_scheduling_timeout: 120 # -- CPU resource request defined in cores. If your container needs two full cores to run, you would - # -- put the value 2. If your container only needs ¼ of a core, you would put a value of 0.25 cores. + # put the value 2. If your container only needs ¼ of a core, you would put a value of 0.25 cores. container_task_cpu_request: 1.0 # -- CPU resource limit defined in cores. If your container needs two full cores to run, you would put the value 2. - # -- If your container only needs ¼ of a core, you would put a value of 0.25 cores. + # If your container only needs ¼ of a core, you would put a value of 0.25 cores. container_task_cpu_limit: 8.0 # -- Memory resource request defined in megabytes. container_task_mem_request: 1024