-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathvalues.yaml
101 lines (96 loc) · 4.41 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
global:
# -- allows to override the release's namespace
# @default -- `.Release.Namespace`
namespace:
# -- overrides the generated full-name for generated resources
# @default -- release-name + chart-name truncated to 63 chars
fullnameOverride:
# -- overrides the generated name for generated resources
# @default -- `.Chart.Name`
nameOverride:
deployment:
# -- additional labels to add to the deployment's metadata
extraLabels: {}
# -- additional annotation to add to the deployment's metadata
extraAnnotations: {}
# -- additional environment-variables to add to the pod
spec:
# -- additional labels to add to the pods's metadata
extraLabels: {}
# -- additional annotation to add to the pods's metadata
extraAnnotations: {}
extraEnv: []
# -- additional environment-refs to add to the pod
extraEnvFrom: []
# -- additional volumes for the container. configures the pods `volumeMounts` and `volumes`-sections:
# <pre>- name: my-volume<br> mountPath: /my-path<br> emptyDir: {}</pre>
# `name` and `mountPath` are used both in `volumeMounts` and `volumes`, `readOnly` only applies to
# `volumeMounts` and any other key is added to `volumes` only
extraVolumes: []
strategy:
# -- Update-strategy for the agent's pods
# `Recreate` guarantees that no two snapshots get taken at the same time
# `RollingUpdate` ensures that there's always one instance of the agent running
type: "Recreate"
image:
# -- Image that is deployed (change e.g. for private registry-proxy)
repo: ghcr.io/argelbargel/vault-raft-snapshot-agent
# -- (string) the image's tag
# @default -- `.Chart.AppVersion`
tag:
# -- New releases of vault-raft-snapshot-agent always change the
# `.Chart.AppVersion` of this chart thus must only be changed
# if you use another repository than the default
pullPolicy: IfNotPresent
# -- resource limits and requests for the deployment
resources:
# -- resource limits of the deployment
limits: {}
# -- resource requests by the deployment
requests: {}
# -- specify optional init-container. Only properties `name`, `image`, `command` and `env` are used.
# `name` and `image` are optional, by default alpine:3.19.1, which is the agents-base-image, is used as image
# The init-container has access to all extraVolumes.
initContainer: {}
# -- (int) see [kubernetes docs](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#clean-up-policy)
# You might want to change this to a small value to avoid cluttering up the
# UI of a Continuous Delivery Tool like Argo-CD
revisionHistoryLimit:
serviceAccount:
# -- Specifies whether a service account should be created
create: true
# -- name of the service account to use.
# @default -- chart name according to name settings. `.fullNameOverride` and `.nameOverride` are taken into account
name:
# -- Annotations to add to the service account
annotations: {}
# -- Defines the contents of the configuration-file for vault-raft-snapshot-agent.
# Except for `local_storage` the keys and values are the same as in the agent's
# [configuration file](https://github.com/Argelbargel/vault-raft-snapshot-agent)
config:
vault:
nodes:
# -- Urls to the vault-nodes. Recommended to use a single url always pointing to the *leader* of your vault-cluster, e.g. `https?://vault-active.<vault-namespace>.svc.cluster.local:<vault-server service-port>`
urls:
- http://127.0.0.1:8200
autoDetectLeader: false
auth:
kubernetes:
role: vault-raft-snapshot-agent
snapshots:
frequency: 1h
retain: 72
storages:
local:
# -- Enables/disables the local storage of snaphots.
# If disabled the corresponding volume and volume-mounts will not be created
enabled: true
# @ignored
path: /vault/snapshots
# -- Defines the kind of volume used to store the snapshots locally.
# If you specify `persistentVolumeClaim` the chart can generate the
# PVC for you. Just specify the claim as you would [normally do](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#claims-as-volumes)
# and add the property `create: true` and the relevant properties of your [PersistentVolumeClaimSpec]()
# as key of `persistentVolumeClaim`.
volume:
emptyDir: {}