-
Notifications
You must be signed in to change notification settings - Fork 2
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
*: init kperf-vc-nodepool add subcommand
The users can use that following command to create a new virtual node pool. ```bash $ bin/kperf vc nodepool add vc-kperf-1 $ bin/kperf vc nodepool add vc-kperf-2 --nodes 1 --cpu 128 --memory 180 $ kubectl get nodes | grep vc-kperf- vc-kperf-1-0 Ready agent 4m40s fake vc-kperf-1-1 Ready agent 4m40s fake vc-kperf-1-2 Ready agent 4m40s fake vc-kperf-1-3 Ready agent 4m40s fake vc-kperf-1-4 Ready agent 4m40s fake vc-kperf-1-5 Ready agent 4m40s fake vc-kperf-1-6 Ready agent 4m40s fake vc-kperf-1-7 Ready agent 4m40s fake vc-kperf-1-8 Ready agent 4m40s fake vc-kperf-1-9 Ready agent 4m40s fake vc-kperf-2-0 Ready agent 3m20s fake ``` That nodepool is managed by helm charts. The users can also use `helm` command to check. ```bash $ helm -n virtualnodes-kperf-io get values vc-kperf-1 USER-SUPPLIED VALUES: controllerNodeSelectors: {} cpu: 8 memory: 16 name: vc-kperf-1 nodeLabels: {} replicas: 10 $ helm -n virtualnodes-kperf-io get values vc-kperf-2 USER-SUPPLIED VALUES: controllerNodeSelectors: {} cpu: 128 memory: 180 name: vc-kperf-2 nodeLabels: {} replicas: 1 ``` Since we don't have kperf-vc-nodepool del, we should use `helm` to cleanup nodepools. ```bash $ helm -n virtualnodes-kperf-io uninstall vc-kperf-1 $ helm -n virtualnodes-kperf-io uninstall vc-kperf-2 ``` Signed-off-by: Wei Fu <weifu@microsoft.com>
- Loading branch information
Showing
3 changed files
with
184 additions
and
1 deletion.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,109 @@ | ||
package virtualcluster | ||
|
||
import ( | ||
"fmt" | ||
|
||
"github.com/Azure/kperf/helmcli" | ||
) | ||
|
||
var ( | ||
defaultNodepoolCfg = nodepoolConfig{ | ||
count: 10, | ||
cpu: 8, | ||
memory: 16, // GiB | ||
} | ||
|
||
// virtualnodeReleaseLabels is used to mark that helm chart release | ||
// is managed by kperf. | ||
virtualnodeReleaseLabels = map[string]string{ | ||
"virtualnodes.kperf.io/managed": "true", | ||
} | ||
) | ||
|
||
const ( | ||
// virtualnodeChartName should be aligned with ../manifests/virtualcluster/nodes. | ||
virtualnodeChartName = "virtualcluster/nodes" | ||
|
||
// virtualnodeReleaseNamespace is used to host virtual nodes. | ||
// | ||
// NOTE: The Node resource is cluster-scope. Just in case that new node | ||
// name is conflict with existing one, we should use fixed namespace | ||
// to store all the resources related to virtual nodes. | ||
virtualnodeReleaseNamespace = "virtualnodes-kperf-io" | ||
) | ||
|
||
type nodepoolConfig struct { | ||
// count represents the desired number of node. | ||
count int | ||
// cpu represents a logical CPU resource provided by virtual node. | ||
cpu int | ||
// memory represents a logical memory resource provided by virtual node. | ||
// The unit is GiB. | ||
memory int | ||
// labels is to be applied to each virtual node. | ||
labels []string | ||
// nodeSelectors forces virtual node's controller to nodes with that specific labels. | ||
nodeSelectors map[string][]string | ||
} | ||
|
||
func (cfg *nodepoolConfig) validate() error { | ||
if cfg.count <= 0 || cfg.cpu <= 0 || cfg.memory <= 0 { | ||
return fmt.Errorf("invalid count=%d or cpu=%d or memory=%d", | ||
cfg.count, cfg.cpu, cfg.memory) | ||
} | ||
return nil | ||
} | ||
|
||
// NodepoolOpt is used to update default node pool's setting. | ||
type NodepoolOpt func(*nodepoolConfig) | ||
|
||
// WithNodepoolCountOpt updates node count. | ||
func WithNodepoolCountOpt(count int) NodepoolOpt { | ||
return func(cfg *nodepoolConfig) { | ||
cfg.count = count | ||
} | ||
} | ||
|
||
// WithNodepoolCPUOpt updates CPU resource. | ||
func WithNodepoolCPUOpt(cpu int) NodepoolOpt { | ||
return func(cfg *nodepoolConfig) { | ||
cfg.cpu = cpu | ||
} | ||
} | ||
|
||
// WithNodepoolMemoryOpt updates Memory resource. | ||
func WithNodepoolMemoryOpt(memory int) NodepoolOpt { | ||
return func(cfg *nodepoolConfig) { | ||
cfg.memory = memory | ||
} | ||
} | ||
|
||
// WithNodepoolLabelsOpt updates node's labels. | ||
func WithNodepoolLabelsOpt(labels []string) NodepoolOpt { | ||
return func(cfg *nodepoolConfig) { | ||
cfg.labels = labels | ||
} | ||
} | ||
|
||
// WithNodepoolNodeControllerAffinity forces virtual node's controller to | ||
// nodes with that specific labels. | ||
func WithNodepoolNodeControllerAffinity(nodeSelectors map[string][]string) NodepoolOpt { | ||
return func(cfg *nodepoolConfig) { | ||
cfg.nodeSelectors = nodeSelectors | ||
} | ||
} | ||
|
||
// toHelmValuesAppliers creates ValuesAppliers. | ||
// | ||
// NOTE: Please align with ../manifests/virtualcluster/nodes/values.yaml | ||
// | ||
// TODO: Add YAML ValuesAppliers to support array type. | ||
func (cfg *nodepoolConfig) toHelmValuesAppliers(nodepoolName string) []helmcli.ValuesApplier { | ||
res := make([]string, 0, 4) | ||
|
||
res = append(res, fmt.Sprintf("name=%s", nodepoolName)) | ||
res = append(res, fmt.Sprintf("replicas=%d", cfg.count)) | ||
res = append(res, fmt.Sprintf("cpu=%d", cfg.cpu)) | ||
res = append(res, fmt.Sprintf("memory=%d", cfg.memory)) | ||
return []helmcli.ValuesApplier{helmcli.StringPathValuesApplier(res...)} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,54 @@ | ||
package virtualcluster | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
"time" | ||
|
||
"github.com/Azure/kperf/helmcli" | ||
"github.com/Azure/kperf/manifests" | ||
) | ||
|
||
// CreateNodepool creates a new node pool. | ||
// | ||
// TODO: | ||
// 1. create a new package to define ErrNotFound, ErrAlreadyExists, ... errors. | ||
// 2. support configurable timeout. | ||
func CreateNodepool(ctx context.Context, kubeconfigPath string, nodepoolName string, opts ...NodepoolOpt) error { | ||
cfg := defaultNodepoolCfg | ||
for _, opt := range opts { | ||
opt(&cfg) | ||
} | ||
|
||
if err := cfg.validate(); err != nil { | ||
return err | ||
} | ||
|
||
getCli, err := helmcli.NewGetCli(kubeconfigPath, virtualnodeReleaseNamespace) | ||
if err != nil { | ||
return fmt.Errorf("failed to create helm get client: %w", err) | ||
} | ||
|
||
_, err = getCli.Get(nodepoolName) | ||
if err == nil { | ||
return fmt.Errorf("nodepool %s already exists", nodepoolName) | ||
} | ||
|
||
ch, err := manifests.LoadChart(virtualnodeChartName) | ||
if err != nil { | ||
return fmt.Errorf("failed to load virtual node chart: %w", err) | ||
} | ||
|
||
releaseCli, err := helmcli.NewReleaseCli( | ||
kubeconfigPath, | ||
virtualnodeReleaseNamespace, | ||
nodepoolName, | ||
ch, | ||
virtualnodeReleaseLabels, | ||
cfg.toHelmValuesAppliers(nodepoolName)..., | ||
) | ||
if err != nil { | ||
return fmt.Errorf("failed to create helm release client: %w", err) | ||
} | ||
return releaseCli.Deploy(ctx, 120*time.Second) | ||
} |