Skip to content

Commit 2d00766

Browse files
authored
Add support for uperf driver (#118)
Similar to iperf, user can run uperf along with netperf using "--uperf" option benchmark-wrapper is used a reference for parsing 1) user options and creating uperf config file (input to uperf client command) 2) uperf output uperf driver supports only TCP_STREAM, UPD_STREAM, TCP_RR, and UDP_RR tests. For each test in full-run.yaml, Uperf driver will create a uperf profile file inside the client pod and uses it to run the test. Parallelism is implemented using uperf's nproc option uperf server can't be run using "&&" option in the same container inside server pod i.e "netserver && iperf3 -s -p 22865 && uperf -s -v -P 30000 && sleep 10000000" So we are creating a separate containers for netperf, iperf and uperf inside server pod. Add driver details in RR P99 Latency results
1 parent e5300db commit 2d00766

File tree

7 files changed

+327
-21
lines changed

7 files changed

+327
-21
lines changed

README.md

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,7 @@ Flags:
6262
--debug Enable debug log
6363
-h, --help help for k8s-netperf
6464
--iperf Use iperf3 as load driver (along with netperf)
65+
--uperf Use uperf as load driver (along with netperf)
6566
--json Instead of human-readable output, return JSON to stdout
6667
--local Run network performance tests with Server-Pods/Client-Pods on the same Node
6768
--metrics Show all system metrics retrieved from prom
@@ -124,16 +125,22 @@ $ ./k8s-netperf --tcp-tolerance 1
124125
+-------------------+---------+------------+-------------+--------------+---------+--------------+-----------+----------+---------+--------------------+
125126
| 📊 Stream Results | netperf | TCP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 2661.006667 (Mb/s) |
126127
| 📊 Stream Results | iperf3 | TCP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 2483.078229 (Mb/s) |
128+
| 📊 Stream Results | uperf | TCP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 2581.705097 (Mb/s) |
127129
| 📊 Stream Results | netperf | TCP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 2702.230000 (Mb/s) |
128130
| 📊 Stream Results | iperf3 | TCP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 2523.434069 (Mb/s) |
131+
| 📊 Stream Results | uperf | TCP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 2567.665412 (Mb/s) |
129132
| 📊 Stream Results | netperf | TCP_STREAM | 1 | true | false | 8192 | false | 10 | 3 | 2697.276667 (Mb/s) |
130133
| 📊 Stream Results | iperf3 | TCP_STREAM | 1 | true | false | 8192 | false | 10 | 3 | 2542.793728 (Mb/s) |
134+
| 📊 Stream Results | uperf | TCP_STREAM | 1 | true | false | 8192 | false | 10 | 3 | 2571.881579 (Mb/s) |
131135
| 📊 Stream Results | netperf | TCP_STREAM | 1 | false | false | 8192 | false | 10 | 3 | 2707.076667 (Mb/s) |
132136
| 📊 Stream Results | iperf3 | TCP_STREAM | 1 | false | false | 8192 | false | 10 | 3 | 2604.067072 (Mb/s) |
137+
| 📊 Stream Results | uperf | TCP_STREAM | 1 | false | false | 8192 | false | 10 | 3 | 2687.276667 (Mb/s) |
133138
| 📊 Stream Results | netperf | UDP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 1143.926667 (Mb/s) |
134139
| 📊 Stream Results | iperf3 | UDP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 1202.428288 (Mb/s) |
140+
| 📊 Stream Results | uperf | UDP_STREAM | 1 | true | false | 1024 | false | 10 | 3 | 1242.059988 (Mb/s) |
135141
| 📊 Stream Results | netperf | UDP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 1145.066667 (Mb/s) |
136142
| 📊 Stream Results | iperf3 | UDP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 1239.580672 (Mb/s) |
143+
| 📊 Stream Results | uperf | UDP_STREAM | 1 | false | false | 1024 | false | 10 | 3 | 1261.840000 (Mb/s) |
137144
+-------------------+---------+------------+-------------+--------------+---------+--------------+-----------+----------+---------+--------------------+
138145
+---------------+---------+----------+-------------+--------------+---------+--------------+-----------+----------+---------+---------------------+
139146
| RESULT TYPE | DRIVER | SCENARIO | PARALLELISM | HOST NETWORK | SERVICE | MESSAGE SIZE | SAME NODE | DURATION | SAMPLES | AVG VALUE |

cmd/k8s-netperf/k8s-netperf.go

Lines changed: 40 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ import (
2020
"github.com/cloud-bulldozer/k8s-netperf/pkg/netperf"
2121
result "github.com/cloud-bulldozer/k8s-netperf/pkg/results"
2222
"github.com/cloud-bulldozer/k8s-netperf/pkg/sample"
23+
uperf_driver "github.com/cloud-bulldozer/k8s-netperf/pkg/uperf"
2324
"github.com/google/uuid"
2425
"github.com/spf13/cobra"
2526
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -36,6 +37,7 @@ var (
3637
nl bool
3738
clean bool
3839
iperf3 bool
40+
uperf bool
3941
acrossAZ bool
4042
full bool
4143
debug bool
@@ -158,24 +160,36 @@ var rootCmd = &cobra.Command{
158160
if s.HostNetwork {
159161
// No need to run hostNetwork through Service.
160162
if !nc.Service {
161-
npr := executeWorkload(nc, s, true, false)
163+
npr := executeWorkload(nc, s, true, false, false)
162164
sr.Results = append(sr.Results, npr)
163165
if iperf3 {
164-
ipr := executeWorkload(nc, s, true, true)
166+
ipr := executeWorkload(nc, s, true, true, false)
165167
if len(ipr.Profile) > 1 {
166168
sr.Results = append(sr.Results, ipr)
167169
}
168170
}
171+
if uperf {
172+
upr := executeWorkload(nc, s, true, false, true)
173+
if len(upr.Profile) > 1 {
174+
sr.Results = append(sr.Results, upr)
175+
}
176+
}
169177
}
170178
}
171-
npr := executeWorkload(nc, s, false, false)
179+
npr := executeWorkload(nc, s, false, false, false)
172180
sr.Results = append(sr.Results, npr)
173181
if iperf3 {
174-
ipr := executeWorkload(nc, s, false, true)
182+
ipr := executeWorkload(nc, s, false, true, false)
175183
if len(ipr.Profile) > 1 {
176184
sr.Results = append(sr.Results, ipr)
177185
}
178186
}
187+
if uperf {
188+
upr := executeWorkload(nc, s, false, false, true)
189+
if len(upr.Profile) > 1 {
190+
sr.Results = append(sr.Results, upr)
191+
}
192+
}
179193
}
180194

181195
var fTime time.Time
@@ -323,7 +337,7 @@ func cleanup(client *kubernetes.Clientset) {
323337

324338
}
325339

326-
func executeWorkload(nc config.Config, s config.PerfScenarios, hostNet bool, iperf3 bool) result.Data {
340+
func executeWorkload(nc config.Config, s config.PerfScenarios, hostNet bool, iperf3 bool, uperf bool) result.Data {
327341
serverIP := ""
328342
service := false
329343
sameNode := true
@@ -332,6 +346,8 @@ func executeWorkload(nc config.Config, s config.PerfScenarios, hostNet bool, ipe
332346
service = true
333347
if iperf3 {
334348
serverIP = s.IperfService.Spec.ClusterIP
349+
} else if uperf {
350+
serverIP = s.UperfService.Spec.ClusterIP
335351
} else {
336352
serverIP = s.NetperfService.Spec.ClusterIP
337353
}
@@ -356,6 +372,12 @@ func executeWorkload(nc config.Config, s config.PerfScenarios, hostNet bool, ipe
356372
return npr
357373
}
358374
}
375+
if uperf {
376+
// uperf doesn't support all tests cases
377+
if !uperf_driver.TestSupported(nc.Profile) {
378+
return npr
379+
}
380+
}
359381

360382
npr.Config = nc
361383
npr.Metric = nc.Metric
@@ -383,6 +405,18 @@ func executeWorkload(nc config.Config, s config.PerfScenarios, hostNet bool, ipe
383405
log.Error(err)
384406
os.Exit(1)
385407
}
408+
} else if uperf {
409+
npr.Driver = "uperf"
410+
r, err := uperf_driver.Run(s.ClientSet, s.RestConfig, nc, Client, serverIP)
411+
if err != nil {
412+
log.Error(err)
413+
os.Exit(1)
414+
}
415+
nr, err = uperf_driver.ParseResults(&r)
416+
if err != nil {
417+
log.Error(err)
418+
os.Exit(1)
419+
}
386420
} else {
387421
npr.Driver = "netperf"
388422
r, err := netperf.Run(s.ClientSet, s.RestConfig, nc, Client, serverIP)
@@ -435,6 +469,7 @@ func executeWorkload(nc config.Config, s config.PerfScenarios, hostNet bool, ipe
435469
func main() {
436470
rootCmd.Flags().StringVar(&cfgfile, "config", "netperf.yml", "K8s netperf Configuration File")
437471
rootCmd.Flags().BoolVar(&iperf3, "iperf", false, "Use iperf3 as load driver (along with netperf)")
472+
rootCmd.Flags().BoolVar(&uperf, "uperf", false, "Use uperf as load driver (along with netperf)")
438473
rootCmd.Flags().BoolVar(&clean, "clean", true, "Clean-up resources created by k8s-netperf")
439474
rootCmd.Flags().BoolVar(&json, "json", false, "Instead of human-readable output, return JSON to stdout")
440475
rootCmd.Flags().BoolVar(&nl, "local", false, "Run network performance tests with Server-Pods/Client-Pods on the same Node")

containers/Containerfile

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,10 @@ ARG RHEL_VERSION
33
FROM registry.access.redhat.com/${RHEL_VERSION}:latest
44

55
COPY appstream.repo /etc/yum.repos.d/centos8-appstream.repo
6+
67
COPY netperf.diff /tmp/netperf.diff
8+
RUN dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && dnf clean all
9+
RUN dnf install -y uperf && dnf clean all
710

811
RUN dnf install -y --nodocs make automake --enablerepo=centos9 --allowerasing && \
912
dnf install -y --nodocs gcc git bc lksctp-tools-devel texinfo --enablerepo=*

pkg/config/config.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ type PerfScenarios struct {
4141
ServerHost apiv1.PodList
4242
NetperfService *apiv1.Service
4343
IperfService *apiv1.Service
44+
UperfService *apiv1.Service
4445
RestConfig rest.Config
4546
ClientSet *kubernetes.Clientset
4647
}

pkg/k8s/kubernetes.go

Lines changed: 48 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -16,14 +16,15 @@ import (
1616
)
1717

1818
// DeploymentParams describes the deployment
19+
// Server pod can run multiple containers, each command in Commands will represent a container command
1920
type DeploymentParams struct {
2021
HostNetwork bool
2122
Name string
2223
Namespace string
2324
Replicas int32
2425
Image string
2526
Labels map[string]string
26-
Command []string
27+
Commands [][]string
2728
PodAffinity apiv1.PodAffinity
2829
PodAntiAffinity apiv1.PodAntiAffinity
2930
NodeAffinity apiv1.NodeAffinity
@@ -47,12 +48,18 @@ const NetperfServerCtlPort = 12865
4748
// IperfServerCtlPort control port for the service
4849
const IperfServerCtlPort = 22865
4950

51+
// UperferverCtlPort control port for the service
52+
const UperfServerCtlPort = 30000
53+
5054
// NetperfServerDataPort data port for the service
5155
const NetperfServerDataPort = 42424
5256

5357
// IperfServerDataPort data port for the service
5458
const IperfServerDataPort = 43433
5559

60+
// UperfServerDataPort data port for the service
61+
const UperfServerDataPort = 30001
62+
5663
// Labels we will apply to k8s assets.
5764
const serverRole = "server"
5865
const clientRole = "client-local"
@@ -136,7 +143,7 @@ func BuildSUT(client *kubernetes.Clientset, s *config.PerfScenarios) error {
136143
Replicas: 1,
137144
Image: "quay.io/cloud-bulldozer/netperf:latest",
138145
Labels: map[string]string{"role": clientRole},
139-
Command: []string{"/bin/bash", "-c", "sleep 10000000"},
146+
Commands: [][]string{{"/bin/bash", "-c", "sleep 10000000"}},
140147
Port: NetperfServerCtlPort,
141148
}
142149
if z != "" && numNodes > 1 {
@@ -180,6 +187,19 @@ func BuildSUT(client *kubernetes.Clientset, s *config.PerfScenarios) error {
180187
return fmt.Errorf("😥 Unable to create iperf service: %v", err)
181188
}
182189

190+
// Create uperf service
191+
uperfSVC := ServiceParams{
192+
Name: "uperf-service",
193+
Namespace: "netperf",
194+
Labels: map[string]string{"role": serverRole},
195+
CtlPort: UperfServerCtlPort,
196+
DataPort: UperfServerDataPort,
197+
}
198+
s.UperfService, err = CreateService(uperfSVC, client)
199+
if err != nil {
200+
return fmt.Errorf("😥 Unable to create uperf service")
201+
}
202+
183203
// Create netperf service
184204
netperfSVC := ServiceParams{
185205
Name: "netperf-service",
@@ -198,7 +218,7 @@ func BuildSUT(client *kubernetes.Clientset, s *config.PerfScenarios) error {
198218
Replicas: 1,
199219
Image: "quay.io/cloud-bulldozer/netperf:latest",
200220
Labels: map[string]string{"role": clientAcrossRole},
201-
Command: []string{"/bin/bash", "-c", "sleep 10000000"},
221+
Commands: [][]string{{"/bin/bash", "-c", "sleep 10000000"}},
202222
Port: NetperfServerCtlPort,
203223
}
204224
cdpAcross.PodAntiAffinity = apiv1.PodAntiAffinity{
@@ -212,7 +232,7 @@ func BuildSUT(client *kubernetes.Clientset, s *config.PerfScenarios) error {
212232
HostNetwork: true,
213233
Image: "quay.io/cloud-bulldozer/netperf:latest",
214234
Labels: map[string]string{"role": hostNetClientRole},
215-
Command: []string{"/bin/bash", "-c", "sleep 10000000"},
235+
Commands: [][]string{{"/bin/bash", "-c", "sleep 10000000"}},
216236
Port: NetperfServerCtlPort,
217237
}
218238
if z != "" {
@@ -247,14 +267,20 @@ func BuildSUT(client *kubernetes.Clientset, s *config.PerfScenarios) error {
247267
return err
248268
}
249269
}
270+
271+
// Use separate containers for servers
272+
dpCommands := [][]string{{"/bin/bash", "-c", "netserver && sleep 10000000"},
273+
{"/bin/bash", "-c", fmt.Sprintf("iperf3 -s -p %d && sleep 10000000", IperfServerCtlPort)},
274+
{"/bin/bash", "-c", fmt.Sprintf("uperf -s -v -P %d && sleep 10000000", UperfServerCtlPort)}}
275+
250276
sdpHost := DeploymentParams{
251277
Name: "server-host",
252278
Namespace: "netperf",
253279
Replicas: 1,
254280
HostNetwork: true,
255281
Image: "quay.io/cloud-bulldozer/netperf:latest",
256282
Labels: map[string]string{"role": hostNetServerRole},
257-
Command: []string{"/bin/bash", "-c", fmt.Sprintf("netserver && iperf3 -s -p %d && sleep 10000000", IperfServerCtlPort)},
283+
Commands: dpCommands,
258284
Port: NetperfServerCtlPort,
259285
}
260286
// Start netperf server
@@ -264,7 +290,7 @@ func BuildSUT(client *kubernetes.Clientset, s *config.PerfScenarios) error {
264290
Replicas: 1,
265291
Image: "quay.io/cloud-bulldozer/netperf:latest",
266292
Labels: map[string]string{"role": serverRole},
267-
Command: []string{"/bin/bash", "-c", fmt.Sprintf("netserver && iperf3 -s -p %d && sleep 10000000", IperfServerCtlPort)},
293+
Commands: dpCommands,
268294
Port: NetperfServerCtlPort,
269295
}
270296
if s.NodeLocal {
@@ -451,6 +477,21 @@ func CreateDeployment(dp DeploymentParams, client *kubernetes.Clientset) (*appsv
451477
}
452478
log.Infof("🚀 Starting Deployment for: %s in namespace: %s", dp.Name, dp.Namespace)
453479
dc := client.AppsV1().Deployments(dp.Namespace)
480+
481+
// Add containers to deployment
482+
var cmdContainers []apiv1.Container
483+
for i := 0; i < len(dp.Commands); i++ {
484+
// each container should have a unique name
485+
containerName := fmt.Sprintf("%s-%d", dp.Name, i)
486+
cmdContainers = append(cmdContainers,
487+
apiv1.Container{
488+
Name: containerName,
489+
Image: dp.Image,
490+
Command: dp.Commands[i],
491+
ImagePullPolicy: apiv1.PullAlways,
492+
})
493+
}
494+
454495
deployment := &appsv1.Deployment{
455496
ObjectMeta: metav1.ObjectMeta{
456497
Name: dp.Name,
@@ -470,14 +511,7 @@ func CreateDeployment(dp DeploymentParams, client *kubernetes.Clientset) (*appsv
470511
Spec: apiv1.PodSpec{
471512
ServiceAccountName: sa,
472513
HostNetwork: dp.HostNetwork,
473-
Containers: []apiv1.Container{
474-
{
475-
Name: dp.Name,
476-
Image: dp.Image,
477-
Command: dp.Command,
478-
ImagePullPolicy: apiv1.PullAlways,
479-
},
480-
},
514+
Containers: cmdContainers,
481515
Affinity: &apiv1.Affinity{
482516
NodeAffinity: &dp.NodeAffinity,
483517
PodAffinity: &dp.PodAffinity,

pkg/results/result.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -281,11 +281,11 @@ func ShowRRResult(s ScenarioResults) {
281281
func ShowLatencyResult(s ScenarioResults) {
282282
if checkResults(s, "RR") {
283283
logging.Debug("Rendering RR P99 Latency results")
284-
table := initTable([]string{"Result Type", "Scenario", "Parallelism", "Host Network", "Service", "Message Size", "Same node", "Duration", "Samples", "Avg 99%tile value"})
284+
table := initTable([]string{"Result Type", "Driver", "Scenario", "Parallelism", "Host Network", "Service", "Message Size", "Same node", "Duration", "Samples", "Avg 99%tile value"})
285285
for _, r := range s.Results {
286286
if strings.Contains(r.Profile, "RR") {
287287
p99, _ := Average(r.LatencySummary)
288-
table.Append([]string{"RR Latency Results", r.Profile, strconv.Itoa(r.Parallelism), strconv.FormatBool(r.HostNetwork), strconv.FormatBool(r.Service), strconv.Itoa(r.MessageSize), strconv.FormatBool(r.SameNode), strconv.Itoa(r.Duration), strconv.Itoa(r.Samples), fmt.Sprintf("%f (%s)", p99, "usec")})
288+
table.Append([]string{"RR Latency Results", r.Driver, r.Profile, strconv.Itoa(r.Parallelism), strconv.FormatBool(r.HostNetwork), strconv.FormatBool(r.Service), strconv.Itoa(r.MessageSize), strconv.FormatBool(r.SameNode), strconv.Itoa(r.Duration), strconv.Itoa(r.Samples), fmt.Sprintf("%f (%s)", p99, "usec")})
289289
}
290290
}
291291
table.Render()

0 commit comments

Comments
 (0)