Skip to content

Commit

Permalink
*: init kperf-vc-nodepool add subcommand
Browse files Browse the repository at this point in the history
The users can use that following command to create a new virtual node
pool.

```bash
$ bin/kperf vc nodepool add vc-kperf-1
$ bin/kperf vc nodepool add vc-kperf-2 --nodes 1 --cpu 128 --memory 180
$ kubectl get nodes | grep vc-kperf-
vc-kperf-1-0                          Ready    agent   4m40s   fake
vc-kperf-1-1                          Ready    agent   4m40s   fake
vc-kperf-1-2                          Ready    agent   4m40s   fake
vc-kperf-1-3                          Ready    agent   4m40s   fake
vc-kperf-1-4                          Ready    agent   4m40s   fake
vc-kperf-1-5                          Ready    agent   4m40s   fake
vc-kperf-1-6                          Ready    agent   4m40s   fake
vc-kperf-1-7                          Ready    agent   4m40s   fake
vc-kperf-1-8                          Ready    agent   4m40s   fake
vc-kperf-1-9                          Ready    agent   4m40s   fake
vc-kperf-2-0                          Ready    agent   3m20s   fake
```

That nodepool is managed by helm charts. The users can also use `helm`
command to check.

```bash
$ helm -n virtualnodes-kperf-io get values vc-kperf-1
USER-SUPPLIED VALUES:
controllerNodeSelectors: {}
cpu: 8
memory: 16
name: vc-kperf-1
nodeLabels: {}
replicas: 10

$ helm -n virtualnodes-kperf-io get values vc-kperf-2
USER-SUPPLIED VALUES:
controllerNodeSelectors: {}
cpu: 128
memory: 180
name: vc-kperf-2
nodeLabels: {}
replicas: 1
```

Since we don't have kperf-vc-nodepool del, we should use `helm` to
cleanup nodepools.

```bash
$ helm -n virtualnodes-kperf-io uninstall vc-kperf-1
$ helm -n virtualnodes-kperf-io uninstall vc-kperf-2
```

Signed-off-by: Wei Fu <weifu@microsoft.com>
  • Loading branch information
fuweid committed Jan 3, 2024
1 parent 3fa3436 commit ef4908e
Show file tree
Hide file tree
Showing 3 changed files with 184 additions and 1 deletion.
22 changes: 21 additions & 1 deletion cmd/kperf/commands/virtualcluster/nodepool.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
package virtualcluster

import (
"context"
"fmt"
"strings"

"github.com/Azure/kperf/virtualcluster"

"github.com/urfave/cli"
)
Expand Down Expand Up @@ -44,7 +48,23 @@ var nodepoolAddCommand = cli.Command{
},
},
Action: func(cliCtx *cli.Context) error {
return fmt.Errorf("nodepool add - not implemented")
if cliCtx.NArg() != 1 {
return fmt.Errorf("required only one argument as nodepool name")
}
nodepoolName := strings.TrimSpace(cliCtx.Args().Get(0))
if len(nodepoolName) == 0 {
return fmt.Errorf("required non-empty nodepool name")
}

kubeCfgPath := cliCtx.String("kubeconfig")

return virtualcluster.CreateNodepool(context.Background(),
kubeCfgPath,
nodepoolName,
virtualcluster.WithNodepoolCPUOpt(cliCtx.Int("cpu")),
virtualcluster.WithNodepoolMemoryOpt(cliCtx.Int("memory")),
virtualcluster.WithNodepoolCountOpt(cliCtx.Int("nodes")),
)
},
}

Expand Down
109 changes: 109 additions & 0 deletions virtualcluster/nodes_common.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
package virtualcluster

import (
"fmt"

"github.com/Azure/kperf/helmcli"
)

var (
defaultNodepoolCfg = nodepoolConfig{
count: 10,
cpu: 8,
memory: 16, // GiB
}

// virtualnodeReleaseLabels is used to mark that helm chart release
// is managed by kperf.
virtualnodeReleaseLabels = map[string]string{
"virtualnodes.kperf.io/managed": "true",
}
)

const (
// virtualnodeChartName should be aligned with ../manifests/virtualcluster/nodes.
virtualnodeChartName = "virtualcluster/nodes"

// virtualnodeReleaseNamespace is used to host virtual nodes.
//
// NOTE: The Node resource is cluster-scope. Just in case that new node
// name is conflict with existing one, we should use fixed namespace
// to store all the resources related to virtual nodes.
virtualnodeReleaseNamespace = "virtualnodes-kperf-io"
)

type nodepoolConfig struct {
// count represents the desired number of node.
count int
// cpu represents a logical CPU resource provided by virtual node.
cpu int
// memory represents a logical memory resource provided by virtual node.
// The unit is GiB.
memory int
// labels is to be applied to each virtual node.
labels []string
// nodeSelectors forces virtual node's controller to nodes with that specific labels.
nodeSelectors map[string][]string
}

func (cfg *nodepoolConfig) validate() error {
if cfg.count <= 0 || cfg.cpu <= 0 || cfg.memory <= 0 {
return fmt.Errorf("invalid count=%d or cpu=%d or memory=%d",
cfg.count, cfg.cpu, cfg.memory)
}
return nil
}

// NodepoolOpt is used to update default node pool's setting.
type NodepoolOpt func(*nodepoolConfig)

// WithNodepoolCountOpt updates node count.
func WithNodepoolCountOpt(count int) NodepoolOpt {
return func(cfg *nodepoolConfig) {
cfg.count = count
}
}

// WithNodepoolCPUOpt updates CPU resource.
func WithNodepoolCPUOpt(cpu int) NodepoolOpt {
return func(cfg *nodepoolConfig) {
cfg.cpu = cpu
}
}

// WithNodepoolMemoryOpt updates Memory resource.
func WithNodepoolMemoryOpt(memory int) NodepoolOpt {
return func(cfg *nodepoolConfig) {
cfg.memory = memory
}
}

// WithNodepoolLabelsOpt updates node's labels.
func WithNodepoolLabelsOpt(labels []string) NodepoolOpt {
return func(cfg *nodepoolConfig) {
cfg.labels = labels
}
}

// WithNodepoolNodeControllerAffinity forces virtual node's controller to
// nodes with that specific labels.
func WithNodepoolNodeControllerAffinity(nodeSelectors map[string][]string) NodepoolOpt {
return func(cfg *nodepoolConfig) {
cfg.nodeSelectors = nodeSelectors
}
}

// toHelmValuesAppliers creates ValuesAppliers.
//
// NOTE: Please align with ../manifests/virtualcluster/nodes/values.yaml
//
// TODO: Add YAML ValuesAppliers to support array type.
func (cfg *nodepoolConfig) toHelmValuesAppliers(nodepoolName string) []helmcli.ValuesApplier {
res := make([]string, 0, 4)

res = append(res, fmt.Sprintf("name=%s", nodepoolName))
res = append(res, fmt.Sprintf("replicas=%d", cfg.count))
res = append(res, fmt.Sprintf("cpu=%d", cfg.cpu))
res = append(res, fmt.Sprintf("memory=%d", cfg.memory))
return []helmcli.ValuesApplier{helmcli.StringPathValuesApplier(res...)}
}
54 changes: 54 additions & 0 deletions virtualcluster/nodes_create.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
package virtualcluster

import (
"context"
"fmt"
"time"

"github.com/Azure/kperf/helmcli"
"github.com/Azure/kperf/manifests"
)

// CreateNodepool creates a new node pool.
//
// TODO:
// 1. create a new package to define ErrNotFound, ErrAlreadyExists, ... errors.
// 2. support configurable timeout.
func CreateNodepool(ctx context.Context, kubeconfigPath string, nodepoolName string, opts ...NodepoolOpt) error {
cfg := defaultNodepoolCfg
for _, opt := range opts {
opt(&cfg)
}

if err := cfg.validate(); err != nil {
return err
}

getCli, err := helmcli.NewGetCli(kubeconfigPath, virtualnodeReleaseNamespace)
if err != nil {
return fmt.Errorf("failed to create helm get client: %w", err)
}

_, err = getCli.Get(nodepoolName)
if err == nil {
return fmt.Errorf("nodepool %s already exists", nodepoolName)
}

ch, err := manifests.LoadChart(virtualnodeChartName)
if err != nil {
return fmt.Errorf("failed to load virtual node chart: %w", err)
}

releaseCli, err := helmcli.NewReleaseCli(
kubeconfigPath,
virtualnodeReleaseNamespace,
nodepoolName,
ch,
virtualnodeReleaseLabels,
cfg.toHelmValuesAppliers(nodepoolName)...,
)
if err != nil {
return fmt.Errorf("failed to create helm release client: %w", err)
}
return releaseCli.Deploy(ctx, 120*time.Second)
}

0 comments on commit ef4908e

Please sign in to comment.