Skip to content

Commit

Permalink
ci: add kind testing
Browse files Browse the repository at this point in the history
  • Loading branch information
pandatix committed Jan 28, 2025
1 parent ece85ed commit ba5c065
Show file tree
Hide file tree
Showing 8 changed files with 289 additions and 87 deletions.
73 changes: 73 additions & 0 deletions .github/workflows/kind.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
name: Create Cluster with Registry

on:
push: {}

jobs:
setup-and-deploy:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3

- name: Set up Docker registry
run: |
docker network create kind || true
docker run -d --network kind --name registry -p 5000:5000 registry:2
- name: Write config file
run: |
cat <<EOF > kind-config.yaml
apiVersion: kind.x-k8s.io/v1alpha4
kind: Cluster
containerdConfigPatches:
- |
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:5000"]
endpoint = ["http://registry:5000"]
EOF
- name: Set up Kind cluster
uses: helm/kind-action@v1.7.0
with:
version: v0.20.0
config: kind-config.yaml
cluster_name: kind
env:
KIND_EXPERIMENTAL_DOCKER_NETWORK: kind

- name: Build and push CM
run: |
docker build \
-t localhost:5000/ctferio/chall-manager:${{ github.sha }} \
-f Dockerfile.chall-manager \
.
docker push localhost:5000/ctferio/chall-manager:${{ github.sha }}
- name: Build and push CMJ
run: |
docker build \
-t localhost:5000/ctferio/chall-manager-janitor:${{ github.sha }} \
-f Dockerfile.chall-manager-janitor \
.
docker push localhost:5000/ctferio/chall-manager-janitor:${{ github.sha }}
- name: Install Pulumi
uses: pulumi/actions@v4

- name: Configure stack
run: |
pulumi login --local
export PULUMI_CONFIG_PASSPHRASE=""
cd deploy
pulumi stack init dev
pulumi config set private-registry "localhost:5000/"
pulumi config set tag ${{ github.sha }}
pulumi config set no_pvc true
pulumi config set expose true
pulumi up -y
docker ps -a
URL="http://$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' kind-control-plane):$(pulumi stack output exposed_port)/healthcheck"
echo $URL
curl -v $URL
8 changes: 8 additions & 0 deletions deploy/Pulumi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,14 @@ config:
type: boolean
description: If set to true, turns on the REST API Swagger UI. Do not activate in production. (Optional)
default: false
no_pvc:
type: boolean
description: Whether to run without a PVC. This is not recommended for production workloads, but for CI purposes.
default: false
expose:
type: boolean
description: Whether to expose to external networking the Chall-Manager service. DO NOT TURN ON IF YOU DON'T UNDERSTAND THE IMPACT.
default: false
otel.endpoint:
type: string
description: The OpenTelemetry Collector endpoint to set signals to. (Optional)
Expand Down
7 changes: 7 additions & 0 deletions deploy/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@ func main() {
PrivateRegistry: pulumi.String(cfg.PrivateRegistry),
Replicas: pulumi.Int(cfg.Replicas),
Swagger: cfg.Swagger,
NoPVC: cfg.NoPVC,
Expose: cfg.Expose,
EtcdReplicas: nil,
JanitorCron: nil,
Otel: nil,
Expand All @@ -53,6 +55,7 @@ func main() {
}

ctx.Export("endpoint", cm.Endpoint)
ctx.Export("exposed_port", cm.ExposedPort)

return nil
})
Expand All @@ -67,6 +70,8 @@ type (
Replicas int `json:"replicas"`
Janitor *JanitorConfig `json:"janitor"`
Swagger bool `json:"swagger"`
NoPVC bool `json:"no_pvc"`
Expose bool `json:"expose"`
Otel *OtelConfig `json:"otel"`
}

Expand All @@ -92,6 +97,8 @@ func loadConfig(ctx *pulumi.Context) *Config {
PrivateRegistry: cfg.Get("private-registry"),
Replicas: cfg.GetInt("replicas"),
Swagger: cfg.GetBool("swagger"),
NoPVC: cfg.GetBool("no_pvc"),
Expose: cfg.GetBool("expose"),
}

var etcdC EtcdConfig
Expand Down
88 changes: 74 additions & 14 deletions deploy/services/chall-manager.go
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
package services

import (
"fmt"
"strconv"
"strings"

"github.com/ctfer-io/chall-manager/deploy/common"
"github.com/ctfer-io/chall-manager/deploy/services/parts"
corev1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/core/v1"
v1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/meta/v1"
netwv1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/networking/v1"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
Expand All @@ -19,13 +23,17 @@ type (
cm *parts.ChallManager
cmj *parts.ChallManagerJanitor

// Exposure
svc *corev1.Service

// Interface & ports network policies
cmToEtcd *netwv1.NetworkPolicy
cmjToCm *netwv1.NetworkPolicy

// Outputs

Endpoint pulumi.StringOutput
Endpoint pulumi.StringOutput
ExposedPort pulumi.IntPtrOutput
}

// ChallManagerArgs contains all the parametrization of a Chall-Manager
Expand All @@ -47,7 +55,7 @@ type (
JanitorCron pulumi.StringPtrInput
janitorCron pulumi.StringOutput

Swagger bool
Swagger, NoPVC, Expose bool

Otel *common.OtelArgs
}
Expand All @@ -62,19 +70,41 @@ const (
//
// It is not made to be exposed to outer world (outside of the cluster).
func NewChallManager(ctx *pulumi.Context, name string, args *ChallManagerArgs, opts ...pulumi.ResourceOption) (*ChallManager, error) {
cm := &ChallManager{}
args, err := cm.validate(args)
if err != nil {
return nil, err
}
if err := ctx.RegisterComponentResource("ctfer-io:chall-manager", name, cm, opts...); err != nil {
return nil, err
}
opts = append(opts, pulumi.Parent(cm))
if err := cm.provision(ctx, args, opts...); err != nil {
return nil, err
}
if err := cm.outputs(ctx); err != nil {
return nil, err
}
return cm, nil
}

func (cm *ChallManager) validate(args *ChallManagerArgs) (*ChallManagerArgs, error) {
if args == nil {
args = &ChallManagerArgs{}
}

if args.Tag == nil || args.Tag == pulumi.String("") {
args.tag = pulumi.String("dev").ToStringOutput()
} else {
args.tag = args.Tag.ToStringPtrOutput().Elem()
}

if args.JanitorCron == nil || args.JanitorCron == pulumi.String("") {
args.janitorCron = pulumi.String(defaultCron).ToStringOutput()
} else {
args.janitorCron = args.JanitorCron.ToStringPtrOutput().Elem()
}

if args.PrivateRegistry == nil {
args.privateRegistry = pulumi.String("").ToStringOutput()
} else {
Expand All @@ -92,23 +122,14 @@ func NewChallManager(ctx *pulumi.Context, name string, args *ChallManagerArgs, o
return str
}).(pulumi.StringOutput)
}

if args.Replicas == nil {
args.replicas = pulumi.Int(1).ToIntOutput()
} else {
args.replicas = args.Replicas.ToIntPtrOutput().Elem()
}

cm := &ChallManager{}
if err := ctx.RegisterComponentResource("ctfer-io:chall-manager", name, cm, opts...); err != nil {
return nil, err
}
opts = append(opts, pulumi.Parent(cm))
if err := cm.provision(ctx, args, opts...); err != nil {
return nil, err
}
cm.outputs()

return cm, nil
return args, nil
}

func (cm *ChallManager) provision(ctx *pulumi.Context, args *ChallManagerArgs, opts ...pulumi.ResourceOption) (err error) {
Expand Down Expand Up @@ -151,6 +172,7 @@ func (cm *ChallManager) provision(ctx *pulumi.Context, args *ChallManagerArgs, o
}).(pulumi.IntOutput),
Etcd: nil,
Swagger: args.Swagger,
NoPVC: args.NoPVC,
Otel: nil,
}
if args.EtcdReplicas != nil {
Expand All @@ -172,6 +194,34 @@ func (cm *ChallManager) provision(ctx *pulumi.Context, args *ChallManagerArgs, o
return
}

if args.Expose {
cm.svc, err = corev1.NewService(ctx, "cm-exposed", &corev1.ServiceArgs{
Metadata: v1.ObjectMetaArgs{
Labels: cm.cm.PodLabels,
Namespace: args.Namespace,
},
Spec: corev1.ServiceSpecArgs{
Type: pulumi.String("NodePort"),
Selector: cm.cm.PodLabels,
Ports: corev1.ServicePortArray{
corev1.ServicePortArgs{
Port: cm.cm.Endpoint.ApplyT(func(edp string) int {
// On bootstrap there is no valid URL, but port is assigned
pts := strings.Split(edp, ":")
p := pts[len(pts)-1]
port, _ := strconv.Atoi(p)
fmt.Printf("port: %v\n", port)
return port
}).(pulumi.IntOutput),
},
},
},
}, opts...)
if err != nil {
return
}
}

// Deploy janitor
var cmjOtel *common.OtelArgs
if args.Otel != nil {
Expand Down Expand Up @@ -294,6 +344,16 @@ func (cm *ChallManager) provision(ctx *pulumi.Context, args *ChallManagerArgs, o
return
}

func (cm *ChallManager) outputs() {
func (cm *ChallManager) outputs(ctx *pulumi.Context) error {
cm.Endpoint = cm.cm.Endpoint
if cm.svc != nil {
cm.ExposedPort = cm.svc.Spec.ApplyT(func(spec corev1.ServiceSpec) *int {
return spec.Ports[0].NodePort
}).(pulumi.IntPtrOutput)
}

return ctx.RegisterResourceOutputs(cm, pulumi.Map{
"endpoint": cm.Endpoint,
"exposed_port": cm.ExposedPort,
})
}
40 changes: 28 additions & 12 deletions deploy/services/parts/chall-manager-janitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,19 +53,41 @@ const (
)

func NewChallManagerJanitor(ctx *pulumi.Context, name string, args *ChallManagerJanitorArgs, opts ...pulumi.ResourceOption) (*ChallManagerJanitor, error) {
cmj := &ChallManagerJanitor{}
args, err := cmj.validate(args)
if err != nil {
return nil, err
}
if err := ctx.RegisterComponentResource("ctfer-io:chall-manager:chall-manager-janitor", name, cmj, opts...); err != nil {
return nil, err
}
opts = append(opts, pulumi.Parent(cmj))
if err := cmj.provision(ctx, args, opts...); err != nil {
return nil, err
}
if err := cmj.outputs(ctx); err != nil {
return nil, err
}
return cmj, nil
}

func (cmj *ChallManagerJanitor) validate(args *ChallManagerJanitorArgs) (*ChallManagerJanitorArgs, error) {
if args == nil {
args = &ChallManagerJanitorArgs{}
}

if args.Tag == nil || args.Tag == pulumi.String("") {
args.tag = pulumi.String("dev").ToStringOutput()
} else {
args.tag = args.Tag.ToStringPtrOutput().Elem()
}

if args.Cron == nil || args.Cron == pulumi.String("") {
args.cron = pulumi.String(defaultCron).ToStringOutput()
} else {
args.cron = args.Cron.ToStringPtrOutput().Elem()
}

if args.PrivateRegistry == nil || args.PrivateRegistry == pulumi.String("") {
args.privateRegistry = pulumi.String("").ToStringOutput()
} else {
Expand All @@ -80,17 +102,7 @@ func NewChallManagerJanitor(ctx *pulumi.Context, name string, args *ChallManager
}).(pulumi.StringOutput)
}

cmj := &ChallManagerJanitor{}
if err := ctx.RegisterComponentResource("ctfer-io:chall-manager:chall-manager-janitor", name, cmj, opts...); err != nil {
return nil, err
}
opts = append(opts, pulumi.Parent(cmj))
if err := cmj.provision(ctx, args, opts...); err != nil {
return nil, err
}
cmj.outputs()

return cmj, nil
return args, nil
}

func (cmj *ChallManagerJanitor) provision(ctx *pulumi.Context, args *ChallManagerJanitorArgs, opts ...pulumi.ResourceOption) (err error) {
Expand Down Expand Up @@ -171,6 +183,10 @@ func (cmj *ChallManagerJanitor) provision(ctx *pulumi.Context, args *ChallManage
return
}

func (cmj *ChallManagerJanitor) outputs() {
func (cmj *ChallManagerJanitor) outputs(ctx *pulumi.Context) error {
cmj.PodLabels = cmj.cjob.Spec.JobTemplate().Metadata().Labels()

return ctx.RegisterResourceOutputs(cmj, pulumi.Map{
"podLabels": cmj.PodLabels,
})
}
Loading

0 comments on commit ba5c065

Please sign in to comment.