-
Notifications
You must be signed in to change notification settings - Fork 231
/
example.yaml
59 lines (59 loc) · 1.61 KB
/
example.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
apiVersion: v1
kind: Namespace
metadata:
labels:
kubernetes.io/metadata.name: gpu-test-workloads
pod-security.kubernetes.io/enforce: privileged
name: gpu-test-workloads
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cuda-sample-vector-add
namespace: gpu-test-workloads
labels:
app: cuda-sample-vector-add
spec:
replicas: 1
selector:
matchLabels:
app: cuda-sample-vector-add
template:
metadata:
labels:
app: cuda-sample-vector-add
spec:
containers:
- name: cuda-sample-vector-add
image: nvcr.io/nvidia/k8s/cuda-sample:vectoradd-cuda11.7.1-ubuntu20.04
command:
- /bin/bash
- '-c'
- '--'
args:
- while true; do /cuda-samples/vectorAdd; done
resources:
limits:
nvidia.com/gpu: 1 # declare how many physical GPUs the pod needs
nvidia.com/gpumem: 3000 # Each vGPU contains 3000M device memory (Optional,Integer)
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
hostPID: true
securityContext: {}
schedulerName: default-scheduler
tolerations:
- key: nvidia.com/gpu
operator: Exists
effect: NoSchedule
priorityClassName: system-cluster-critical
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600