-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpod.yaml
86 lines (81 loc) · 3.29 KB
/
pod.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
api: 1.0.0-beta2
runtime: podman
podVersion: 0.2.0
volumes:
# conf name hardcoded into the ingress generator and cannot be changed.
# The generated haproxy.cfg file is written to it inside the cluster project `cluster_dir/_config/ingress/conf` directory.
#ifntrue ${devmode}
- name: conf
type: config
#endif
#iftrue ${devmode}
# In devmode we have to use a already existing haproxy.cfg
- name: _devconf
type: config
#endif
# Certs provided by the auto issuer pod (letsencrypt pod) are fetched to this volume.
# If these auto certs are not used, this volume must still be present otherwise the haproxy.cfg will fail.
- name: autocerts
type: ramdisk
size: 28M
# User provided certs, such as Extended Validation certificates which include human validation.
# Certs are to be placed here (`cluster_dir/_config/ingress/usercerts`) manually by the user and synced.
- name: usercerts
type: config
# We can make this into an encrypted secret (when that feature is implemented in sns).
encrypted: false
containers:
#iftrue ${useFetcher}
- name: fetcher
image: ghcr.io/simplenetes-io/ingress-fetcher:${podVersion}
# Restart this container once each 12 hours to fetch any updated certificates.
restart: on-interval:43200
env:
# This is the cluster port the Letsencrypt pod is listening to.
# Set it as en env variable which our "fetcher" program will use.
- name: autoCertClusterPort
value: 64000
mounts:
# This is the RAMDISK defined above, to here fetched certificates are written.
# The destination is hardcoded into the fetcher program.
- volume: autocerts
dest: /mnt/autocerts
startupProbe:
# Allow 20 seconds for the container to get started.
timeout: 20
# Our probe is simply that we wait for the container to exit with code 0.
exit: true
# Signal the haproxy container to reload when we exit successfully.
# This will make the haproxy container reload once every 12 h, regardless of there are new certs or not. But there's no harm in doing this so instead
# of complicating the logic of not reloading when there are no new certs, we KISS it and only provide this simpler path.
# Since we have a startup probe, that means that the pod creation process will fail if this container does not exit properly.
signal:
- container: haproxy
#endif
- name: haproxy
image: haproxy:2.1.3-alpine
restart: always
# Using the host network means that haproxy will bind directly to the host interface,
# and no port mappings need to be done.
network: host
signal:
- sig: USR2
command:
- haproxy
- -f
- /mnt/conf/haproxy.cfg
- -W
mounts:
# The destinations for mounting certs are hardcoded into the ingress haproxy.cfg generator and must not be changed.
- volume: usercerts
dest: /mnt/usercerts
- volume: autocerts
dest: /mnt/autocerts
#iftrue ${devmode}
- volume: _devconf
dest: /mnt/conf
#endif
#ifntrue ${devmode}
- volume: conf
dest: /mnt/conf
#endif