diff --git a/cmd/kperf/commands/virtualcluster/nodepool.go b/cmd/kperf/commands/virtualcluster/nodepool.go index 2c64a90..13beefe 100644 --- a/cmd/kperf/commands/virtualcluster/nodepool.go +++ b/cmd/kperf/commands/virtualcluster/nodepool.go @@ -1,7 +1,11 @@ package virtualcluster import ( + "context" "fmt" + "strings" + + "github.com/Azure/kperf/virtualcluster" "github.com/urfave/cli" ) @@ -44,7 +48,23 @@ var nodepoolAddCommand = cli.Command{ }, }, Action: func(cliCtx *cli.Context) error { - return fmt.Errorf("nodepool add - not implemented") + if cliCtx.NArg() != 1 { + return fmt.Errorf("required only one argument as nodepool name") + } + nodepoolName := strings.TrimSpace(cliCtx.Args().Get(0)) + if len(nodepoolName) == 0 { + return fmt.Errorf("required non-empty nodepool name") + } + + kubeCfgPath := cliCtx.String("kubeconfig") + + return virtualcluster.CreateNodepool(context.Background(), + kubeCfgPath, + nodepoolName, + virtualcluster.WithNodepoolCPUOpt(cliCtx.Int("cpu")), + virtualcluster.WithNodepoolMemoryOpt(cliCtx.Int("memory")), + virtualcluster.WithNodepoolCountOpt(cliCtx.Int("nodes")), + ) }, } diff --git a/virtualcluster/nodes_common.go b/virtualcluster/nodes_common.go new file mode 100644 index 0000000..63d8a2c --- /dev/null +++ b/virtualcluster/nodes_common.go @@ -0,0 +1,109 @@ +package virtualcluster + +import ( + "fmt" + + "github.com/Azure/kperf/helmcli" +) + +var ( + defaultNodepoolCfg = nodepoolConfig{ + count: 10, + cpu: 8, + memory: 16, // GiB + } + + // virtualnodeReleaseLabels is used to mark that helm chart release + // is managed by kperf. + virtualnodeReleaseLabels = map[string]string{ + "virtualnodes.kperf.io/managed": "true", + } +) + +const ( + // virtualnodeChartName should be aligned with ../manifests/virtualcluster/nodes. + virtualnodeChartName = "virtualcluster/nodes" + + // virtualnodeReleaseNamespace is used to host virtual nodes. + // + // NOTE: The Node resource is cluster-scope. Just in case that new node + // name is conflict with existing one, we should use fixed namespace + // to store all the resources related to virtual nodes. + virtualnodeReleaseNamespace = "virtualnodes-kperf-io" +) + +type nodepoolConfig struct { + // count represents the desired number of node. + count int + // cpu represents a logical CPU resource provided by virtual node. + cpu int + // memory represents a logical memory resource provided by virtual node. + // The unit is GiB. + memory int + // labels is to be applied to each virtual node. + labels []string + // nodeSelectors forces virtual node's controller to nodes with that specific labels. + nodeSelectors map[string][]string +} + +func (cfg *nodepoolConfig) validate() error { + if cfg.count <= 0 || cfg.cpu <= 0 || cfg.memory <= 0 { + return fmt.Errorf("invalid count=%d or cpu=%d or memory=%d", + cfg.count, cfg.cpu, cfg.memory) + } + return nil +} + +// NodepoolOpt is used to update default node pool's setting. +type NodepoolOpt func(*nodepoolConfig) + +// WithNodepoolCountOpt updates node count. +func WithNodepoolCountOpt(count int) NodepoolOpt { + return func(cfg *nodepoolConfig) { + cfg.count = count + } +} + +// WithNodepoolCPUOpt updates CPU resource. +func WithNodepoolCPUOpt(cpu int) NodepoolOpt { + return func(cfg *nodepoolConfig) { + cfg.cpu = cpu + } +} + +// WithNodepoolMemoryOpt updates Memory resource. +func WithNodepoolMemoryOpt(memory int) NodepoolOpt { + return func(cfg *nodepoolConfig) { + cfg.memory = memory + } +} + +// WithNodepoolLabelsOpt updates node's labels. +func WithNodepoolLabelsOpt(labels []string) NodepoolOpt { + return func(cfg *nodepoolConfig) { + cfg.labels = labels + } +} + +// WithNodepoolNodeControllerAffinity forces virtual node's controller to +// nodes with that specific labels. +func WithNodepoolNodeControllerAffinity(nodeSelectors map[string][]string) NodepoolOpt { + return func(cfg *nodepoolConfig) { + cfg.nodeSelectors = nodeSelectors + } +} + +// toHelmValuesAppliers creates ValuesAppliers. +// +// NOTE: Please align with ../manifests/virtualcluster/nodes/values.yaml +// +// TODO: Add YAML ValuesAppliers to support array type. +func (cfg *nodepoolConfig) toHelmValuesAppliers(nodepoolName string) []helmcli.ValuesApplier { + res := make([]string, 0, 4) + + res = append(res, fmt.Sprintf("name=%s", nodepoolName)) + res = append(res, fmt.Sprintf("replicas=%d", cfg.count)) + res = append(res, fmt.Sprintf("cpu=%d", cfg.cpu)) + res = append(res, fmt.Sprintf("memory=%d", cfg.memory)) + return []helmcli.ValuesApplier{helmcli.StringPathValuesApplier(res...)} +} diff --git a/virtualcluster/nodes_create.go b/virtualcluster/nodes_create.go new file mode 100644 index 0000000..57e0e1b --- /dev/null +++ b/virtualcluster/nodes_create.go @@ -0,0 +1,54 @@ +package virtualcluster + +import ( + "context" + "fmt" + "time" + + "github.com/Azure/kperf/helmcli" + "github.com/Azure/kperf/manifests" +) + +// CreateNodepool creates a new node pool. +// +// TODO: +// 1. create a new package to define ErrNotFound, ErrAlreadyExists, ... errors. +// 2. support configurable timeout. +func CreateNodepool(ctx context.Context, kubeconfigPath string, nodepoolName string, opts ...NodepoolOpt) error { + cfg := defaultNodepoolCfg + for _, opt := range opts { + opt(&cfg) + } + + if err := cfg.validate(); err != nil { + return err + } + + getCli, err := helmcli.NewGetCli(kubeconfigPath, virtualnodeReleaseNamespace) + if err != nil { + return fmt.Errorf("failed to create helm get client: %w", err) + } + + _, err = getCli.Get(nodepoolName) + if err == nil { + return fmt.Errorf("nodepool %s already exists", nodepoolName) + } + + ch, err := manifests.LoadChart(virtualnodeChartName) + if err != nil { + return fmt.Errorf("failed to load virtual node chart: %w", err) + } + + releaseCli, err := helmcli.NewReleaseCli( + kubeconfigPath, + virtualnodeReleaseNamespace, + nodepoolName, + ch, + virtualnodeReleaseLabels, + cfg.toHelmValuesAppliers(nodepoolName)..., + ) + if err != nil { + return fmt.Errorf("failed to create helm release client: %w", err) + } + return releaseCli.Deploy(ctx, 120*time.Second) +}