Skip to content

Commit

Permalink
Merge pull request #482 from alexeldeib/ace/caks
Browse files Browse the repository at this point in the history
feat: aks provider
  • Loading branch information
k8s-ci-robot authored May 14, 2020
2 parents 8d4a40f + e09bf5a commit 4b04993
Show file tree
Hide file tree
Showing 50 changed files with 3,088 additions and 117 deletions.
3 changes: 2 additions & 1 deletion .dockerignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
!/cloud/**
!/controllers/**
!/exp/**
!/feature/**
!/pkg/**
!/main.go
!/go.mod
!/go.sum
!/go.sum
21 changes: 18 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ RBAC_ROOT ?= $(MANIFEST_ROOT)/rbac
PULL_POLICY ?= Always

CLUSTER_TEMPLATE ?= cluster-template.yaml
MANAGED_CLUSTER_TEMPLATE ?= cluster-template-aks.yaml

## --------------------------------------
## Help
Expand Down Expand Up @@ -319,7 +320,7 @@ create-management-cluster: $(KUSTOMIZE) $(ENVSUBST)
$(MAKE) kind-create

# Install cert manager and wait for availability
kubectl create -f https://github.com/jetstack/cert-manager/releases/download/v0.11.1/cert-manager.yaml
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v0.11.1/cert-manager.yaml
kubectl wait --for=condition=Available --timeout=5m apiservice v1beta1.webhook.cert-manager.io

# Deploy CAPI
Expand Down Expand Up @@ -347,16 +348,30 @@ create-workload-cluster: $(ENVSUBST)
$(ENVSUBST) < $(TEMPLATES_DIR)/$(CLUSTER_TEMPLATE) | kubectl apply -f -

# Wait for the kubeconfig to become available.
timeout 300 bash -c "while ! kubectl get secrets | grep $(CLUSTER_NAME)-kubeconfig; do sleep 1; done"
timeout --foreground 300 bash -c "while ! kubectl get secrets | grep $(CLUSTER_NAME)-kubeconfig; do sleep 1; done"
# Get kubeconfig and store it locally.
kubectl get secrets $(CLUSTER_NAME)-kubeconfig -o json | jq -r .data.value | base64 --decode > ./kubeconfig
timeout 600 bash -c "while ! kubectl --kubeconfig=./kubeconfig get nodes | grep master; do sleep 1; done"
timeout --foreground 600 bash -c "while ! kubectl --kubeconfig=./kubeconfig get nodes | grep master; do sleep 1; done"

# Deploy calico
kubectl --kubeconfig=./kubeconfig apply -f templates/addons/calico.yaml

@echo 'run "kubectl --kubeconfig=./kubeconfig ..." to work with the new target cluster'

.PHONY: create-aks-cluster
create-aks-cluster: $(KUSTOMIZE) $(ENVSUBST)
# Create managed Cluster.
$(ENVSUBST) < $(TEMPLATES_DIR)/$(MANAGED_CLUSTER_TEMPLATE) | kubectl apply -f -

# Wait for the kubeconfig to become available.
timeout --foreground 300 bash -c "while ! kubectl get secrets | grep $(CLUSTER_NAME)-kubeconfig; do sleep 1; done"
# Get kubeconfig and store it locally.
kubectl get secrets $(CLUSTER_NAME)-kubeconfig -o json | jq -r .data.value | base64 --decode > ./kubeconfig
timeout --foreground 600 bash -c "while ! kubectl --kubeconfig=./kubeconfig get nodes | grep master; do sleep 1; done"

@echo 'run "kubectl --kubeconfig=./kubeconfig ..." to work with the new target cluster'


.PHONY: create-cluster
create-cluster: create-management-cluster create-workload-cluster ## Create a workload development Kubernetes cluster on Azure in a kind management cluster.

Expand Down
7 changes: 7 additions & 0 deletions cloud/interfaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,3 +35,10 @@ type GetterService interface {
Reconcile(ctx context.Context, spec interface{}) error
Delete(ctx context.Context, spec interface{}) error
}

// CredentialGetter is a GetterService which knows how to retrieve credentials for an Azure
// resource in a resource group.
type CredentialGetter interface {
GetterService
GetCredentials(ctx context.Context, group string, cluster string) ([]byte, error)
}
6 changes: 6 additions & 0 deletions cloud/scope/machinepool.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ type (
// MachinePoolScope defines a scope defined around a machine pool and its cluster.
MachinePoolScope struct {
logr.Logger
AzureClients
client client.Client
patchHelper *patch.Helper
Cluster *capiv1.Cluster
Expand Down Expand Up @@ -85,6 +86,10 @@ func NewMachinePoolScope(params MachinePoolScopeParams) (*MachinePoolScope, erro
params.Logger = klogr.New()
}

if err := params.AzureClients.setCredentials(params.AzureCluster.Spec.SubscriptionID); err != nil {
return nil, errors.Wrap(err, "failed to create Azure session")
}

helper, err := patch.NewHelper(params.AzureMachinePool, params.Client)
if err != nil {
return nil, errors.Wrap(err, "failed to init patch helper")
Expand All @@ -95,6 +100,7 @@ func NewMachinePoolScope(params MachinePoolScopeParams) (*MachinePoolScope, erro
MachinePool: params.MachinePool,
AzureCluster: params.AzureCluster,
AzureMachinePool: params.AzureMachinePool,
AzureClients: params.AzureClients,
Logger: params.Logger,
patchHelper: helper,
}, nil
Expand Down
100 changes: 100 additions & 0 deletions cloud/scope/managedcontrolplane.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package scope

import (
"context"

"github.com/go-logr/logr"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/klogr"
infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha3"

"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/controller-runtime/pkg/client"
)

// ManagedControlPlaneScopeParams defines the input parameters used to create a new
type ManagedControlPlaneScopeParams struct {
AzureClients
Client client.Client
Logger logr.Logger
Cluster *clusterv1.Cluster
ControlPlane *infrav1exp.AzureManagedControlPlane
InfraMachinePool *infrav1exp.AzureManagedMachinePool
MachinePool *expv1.MachinePool
PatchTarget runtime.Object
}

// NewManagedControlPlaneScope creates a new Scope from the supplied parameters.
// This is meant to be called for each reconcile iteration.
func NewManagedControlPlaneScope(params ManagedControlPlaneScopeParams) (*ManagedControlPlaneScope, error) {
if params.Cluster == nil {
return nil, errors.New("failed to generate new scope from nil Cluster")
}

if params.ControlPlane == nil {
return nil, errors.New("failed to generate new scope from nil ControlPlane")
}

if params.Logger == nil {
params.Logger = klogr.New()
}

if err := params.AzureClients.setCredentials(params.ControlPlane.Spec.SubscriptionID); err != nil {
return nil, errors.Wrap(err, "failed to create Azure session")
}

helper, err := patch.NewHelper(params.PatchTarget, params.Client)
if err != nil {
return nil, errors.Wrap(err, "failed to init patch helper")
}

return &ManagedControlPlaneScope{
Logger: params.Logger,
Client: params.Client,
AzureClients: params.AzureClients,
Cluster: params.Cluster,
ControlPlane: params.ControlPlane,
MachinePool: params.MachinePool,
InfraMachinePool: params.InfraMachinePool,
PatchTarget: params.PatchTarget,
patchHelper: helper,
}, nil
}

// ManagedControlPlaneScope defines the basic context for an actuator to operate upon.
type ManagedControlPlaneScope struct {
logr.Logger
Client client.Client
patchHelper *patch.Helper

AzureClients
Cluster *clusterv1.Cluster
MachinePool *expv1.MachinePool
ControlPlane *infrav1exp.AzureManagedControlPlane
InfraMachinePool *infrav1exp.AzureManagedMachinePool
PatchTarget runtime.Object
}

// PatchObject persists the cluster configuration and status.
func (s *ManagedControlPlaneScope) PatchObject(ctx context.Context) error {
return s.patchHelper.Patch(ctx, s.PatchTarget)
}
133 changes: 133 additions & 0 deletions cloud/services/agentpools/agentpools.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package agentpools

import (
"context"
"fmt"

"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice"
"github.com/google/go-cmp/cmp"
"github.com/pkg/errors"
"k8s.io/klog"
azure "sigs.k8s.io/cluster-api-provider-azure/cloud"
)

// Spec contains properties to create a agent pool.
type Spec struct {
Name string
ResourceGroup string
Cluster string
Version *string
SKU string
Replicas int32
OSDiskSizeGB int32
}

// Get fetches a agent pool from Azure.
func (s *Service) Get(ctx context.Context, spec interface{}) (interface{}, error) {
agentPoolSpec, ok := spec.(*Spec)
if !ok {
return containerservice.AgentPool{}, errors.New("expected agent pool specification")
}
return s.Client.Get(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name)
}

// Reconcile idempotently creates or updates a agent pool, if possible.
func (s *Service) Reconcile(ctx context.Context, spec interface{}) error {
agentPoolSpec, ok := spec.(*Spec)
if !ok {
return errors.New("expected agent pool specification")
}

profile := containerservice.AgentPool{
ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{
VMSize: containerservice.VMSizeTypes(agentPoolSpec.SKU),
OsDiskSizeGB: &agentPoolSpec.OSDiskSizeGB,
Count: &agentPoolSpec.Replicas,
Type: containerservice.VirtualMachineScaleSets,
OrchestratorVersion: agentPoolSpec.Version,
},
}

existingSpec, err := s.Get(ctx, spec)
if err != nil && !azure.ResourceNotFound(err) {
return errors.Wrapf(err, "failed to get existing agent pool")
}
existingPool, ok := existingSpec.(containerservice.AgentPool)
if !ok {
return errors.New("expected agent pool specification")
}

// For updates, we want to pass whatever we find in the existing
// cluster, normalized to reflect the input we originally provided.
// AKS will populate defaults and read-only values, which we want
// to strip/clean to match what we expect.
isCreate := azure.ResourceNotFound(err)
if isCreate {
err = s.Client.CreateOrUpdate(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name, profile)
if err != nil {
return fmt.Errorf("failed to create or update agent pool, %#+v", err)
}
} else {
// Normalize individual agent pools to diff in case we need to update
existingProfile := containerservice.AgentPool{
ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{
VMSize: existingPool.ManagedClusterAgentPoolProfileProperties.VMSize,
OsDiskSizeGB: existingPool.ManagedClusterAgentPoolProfileProperties.OsDiskSizeGB,
Count: existingPool.ManagedClusterAgentPoolProfileProperties.Count,
Type: containerservice.VirtualMachineScaleSets,
OrchestratorVersion: existingPool.ManagedClusterAgentPoolProfileProperties.OrchestratorVersion,
},
}

// Diff and check if we require an update
diff := cmp.Diff(profile, existingProfile)
if diff != "" {
klog.V(2).Infof("Update required (+new -old):\n%s", diff)
err = s.Client.CreateOrUpdate(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name, profile)
if err != nil {
return fmt.Errorf("failed to create or update agent pool, %#+v", err.Error())
}
} else {
klog.V(2).Infof("Normalized and desired agent pool matched, no update needed")
}
}

return nil
}

// Delete deletes the virtual network with the provided name.
func (s *Service) Delete(ctx context.Context, spec interface{}) error {
agentPoolSpec, ok := spec.(*Spec)
if !ok {
return errors.New("expected agent pool specification")
}

klog.V(2).Infof("deleting agent pool %s ", agentPoolSpec.Name)
err := s.Client.Delete(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name)
if err != nil {
if azure.ResourceNotFound(err) {
// already deleted
return nil
}
return errors.Wrapf(err, "failed to delete agent pool %s in resource group %s", agentPoolSpec.Name, agentPoolSpec.ResourceGroup)
}

klog.V(2).Infof("Successfully deleted agent pool %s ", agentPoolSpec.Name)
return nil
}
Loading

0 comments on commit 4b04993

Please sign in to comment.