-
Notifications
You must be signed in to change notification settings - Fork 20
/
Copy pathkafka-persistent.yaml
197 lines (196 loc) · 5.58 KB
/
kafka-persistent.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
kind: Template
apiVersion: v1
metadata:
name: kafka-persistent
annotations:
openshift.io/display-name: Kafka (Persistent)
description: Create a Kafka cluster, with persistent storage.
iconClass: icon-database
tags: messaging,kafka
labels:
template: kafka-persistent
component: kafka
parameters:
- name: NAME
description: Name.
required: true
value: kafka-persistent
- name: KAFKA_VERSION
description: Kafka Version (Scala and kafka version).
required: true
value: "2.13-2.5.0"
- name: SOURCE_IMAGE
description: Container image source.
value: kafka
required: true
- name: REPLICAS
description: Number of replicas.
required: true
value: "3"
- name: KAFKA_HEAP_OPTS
description: Kafka JVM Heap options. Consider value of params RESOURCE_MEMORY_REQ and RESOURCE_MEMORY_LIMIT.
required: true
value: "-Xmx256M -Xms256M"
- name: SERVER_NUM_PARTITIONS
description: >
The default number of log partitions per topic.
More partitions allow greater
parallelism for consumption, but this will also result in more files across
the brokers.
required: true
value: "1"
- name: SERVER_DELETE_TOPIC_ENABLE
description: >
Topic deletion enabled.
Switch to enable topic deletion or not, default value is 'true'
value: "true"
- name: SERVER_LOG_RETENTION_HOURS
description: >
Log retention hours.
The minimum age of a log file to be eligible for deletion.
value: "2147483647"
- name: SERVER_ZOOKEEPER_CONNECT
description: >
Zookeeper conection string, a list as URL with nodes separated by ','.
value: "zk.myproject.svc:2181"
required: true
- name: SERVER_ZOOKEEPER_CONNECT_TIMEOUT
description: >
The max time that the client waits to establish a connection to zookeeper (ms).
value: "6000"
required: true
- name: VOLUME_KAFKA_CAPACITY
description: Kafka logs capacity.
required: true
value: "1Gi"
- name: RESOURCE_MEMORY_REQ
description: The memory resource request.
value: "512M"
- name: RESOURCE_MEMORY_LIMIT
description: The limits for memory resource.
value: "512M"
- name: RESOURCE_CPU_REQ
description: The CPU resource request.
value: "300m"
- name: RESOURCE_CPU_LIMIT
description: The limits for CPU resource.
value: "300m"
objects:
- apiVersion: v1
kind: Service
metadata:
name: ${NAME}
labels:
app: ${NAME}
component: kafka
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
ports:
- port: 9092
name: server
clusterIP: None
selector:
app: ${NAME}
component: kafka
- apiVersion: apps/v1
kind: StatefulSet
metadata:
name: ${NAME}
labels:
app: ${NAME}
component: kafka
spec:
podManagementPolicy: "Parallel"
serviceName: ${NAME}
selector:
matchLabels:
app: ${NAME}
component: kafka
replicas: ${REPLICAS}
template:
metadata:
labels:
app: ${NAME}
component: kafka
# annotations:
# # Use this annotation if you want allocate each pod on different node
# # Note the number of nodes must be upper than REPLICAS parameter.
# scheduler.alpha.kubernetes.io/affinity: >
# {
# "podAntiAffinity": {
# "requiredDuringSchedulingIgnoredDuringExecution": [{
# "labelSelector": {
# "matchExpressions": [{
# "key": "app",
# "operator": "In",
# "values": ["${NAME}"]
# }]
# },
# "topologyKey": "kubernetes.io/hostname"
# }]
# }
# }
spec:
securityContext:
runAsUser: 1001
fsGroup: 1001
containers:
- name: ${NAME}
imagePullPolicy: IfNotPresent
image: ${SOURCE_IMAGE}:${KAFKA_VERSION}
resources:
requests:
memory: ${RESOURCE_MEMORY_REQ}
cpu: ${RESOURCE_CPU_REQ}
limits:
memory: ${RESOURCE_MEMORY_LIMIT}
cpu: ${RESOURCE_CPU_LIMIT}
ports:
- containerPort: 9092
name: server
env:
- name : KAFKA_REPLICAS
value: ${REPLICAS}
- name: KAFKA_ZK_LOCAL
value: "false"
- name: KAFKA_HEAP_OPTS
value: ${KAFKA_HEAP_OPTS}
- name: SERVER_num_partitions
value: ${SERVER_NUM_PARTITIONS}
- name: SERVER_delete_topic_enable
value: ${SERVER_DELETE_TOPIC_ENABLE}
- name: SERVER_log_retention_hours
value: ${SERVER_LOG_RETENTION_HOURS}
- name: SERVER_zookeeper_connect
value: ${SERVER_ZOOKEEPER_CONNECT}
- name: SERVER_log_dirs
value: "/opt/kafka/data/logs"
- name: SERVER_zookeeper_connection_timeout_ms
value: ${SERVER_ZOOKEEPER_CONNECT_TIMEOUT}
readinessProbe:
exec:
command:
- kafka_server_status.sh
initialDelaySeconds: 30
timeoutSeconds: 5
livenessProbe:
exec:
command:
- kafka_server_status.sh
initialDelaySeconds: 30
timeoutSeconds: 5
volumeMounts:
- name: kafka-data
mountPath: /opt/kafka/data
volumeClaimTemplates:
- metadata:
name: kafka-data
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: ${VOLUME_KAFKA_CAPACITY}
selector:
component: kafka
contents: data