From eda2d142d5a54549a72a7ca1dfc024c9767282ec Mon Sep 17 00:00:00 2001 From: Lucas TESSON Date: Sat, 25 Jan 2025 22:30:47 +0100 Subject: [PATCH] ci: add kind testing --- .github/workflows/kind.yaml | 110 ++++++++++++++++++ deploy/Pulumi.yaml | 8 ++ deploy/main.go | 15 ++- deploy/services/chall-manager.go | 102 +++++++++++++--- .../services/parts/chall-manager-janitor.go | 40 +++++-- deploy/services/parts/chall-manager.go | 98 ++++++++++------ deploy/services/parts/etcd.go | 30 +++-- go.work.sum | 2 + 8 files changed, 331 insertions(+), 74 deletions(-) create mode 100644 .github/workflows/kind.yaml diff --git a/.github/workflows/kind.yaml b/.github/workflows/kind.yaml new file mode 100644 index 0000000..273edc2 --- /dev/null +++ b/.github/workflows/kind.yaml @@ -0,0 +1,110 @@ +name: Create Cluster with Registry + +on: + push: {} + +jobs: + setup-and-deploy: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Set up Docker registry + run: | + docker network create kind || true + docker run -d --network kind --name registry -p 5000:5000 registry:2 + + - name: Write config file + run: | + cat < kind-config.yaml + apiVersion: kind.x-k8s.io/v1alpha4 + kind: Cluster + containerdConfigPatches: + - | + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:5000"] + endpoint = ["http://registry:5000"] + + kubeadmConfigPatches: + - | + kind: ClusterConfiguration + apiServer: + extraArgs: + "service-node-port-range": "30000-30005" + + nodes: + - role: control-plane + extraPortMappings: + - containerPort: 80 + hostPort: 80 + - containerPort: 30000 + hostPort: 30000 + - containerPort: 30001 + hostPort: 30001 + - containerPort: 30002 + hostPort: 30002 + - containerPort: 30003 + hostPort: 30003 + - containerPort: 30004 + hostPort: 30004 + - containerPort: 30005 + hostPort: 30005 + EOF + + - name: Set up Kind cluster + uses: helm/kind-action@v1.7.0 + with: + version: v0.20.0 + config: kind-config.yaml + cluster_name: kind + env: + KIND_EXPERIMENTAL_DOCKER_NETWORK: kind + + - name: Build and push CM + run: | + docker build \ + -t localhost:5000/ctferio/chall-manager:${{ github.sha }} \ + -f Dockerfile.chall-manager \ + . + docker push localhost:5000/ctferio/chall-manager:${{ github.sha }} + + - name: Build and push CMJ + run: | + docker build \ + -t localhost:5000/ctferio/chall-manager-janitor:${{ github.sha }} \ + -f Dockerfile.chall-manager-janitor \ + . + docker push localhost:5000/ctferio/chall-manager-janitor:${{ github.sha }} + + - name: Install Pulumi + uses: pulumi/actions@v4 + - name: Prepare environment + run: | + pulumi login --local + kubectl create ns cm-in-ci + + - name: Romeo environment + id: env + uses: ctfer-io/romeo/environment@1c42bc54350537e321f25b78acd8b2525a57f654 + with: + kubeconfig: ~/.kube/config + namespace: cm-in-ci + storage-class-name: standard + + - name: Configure stack + run: | + export PULUMI_CONFIG_PASSPHRASE="" + + cd deploy + pulumi stack init dev + pulumi config set private-registry "localhost:5000/" + pulumi config set tag ${{ github.sha }} + pulumi config set --path romeo.claim-name + pulumi config set namespace ${{ steps.env.outputs.namespace }} + pulumi config set pvc-access-mode ReadWriteOnce + pulumi config set expose true + pulumi up -y + + URL="http://$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' kind-control-plane):$(pulumi stack output exposed_port)" + echo $URL + curl "${URL}/healthcheck" diff --git a/deploy/Pulumi.yaml b/deploy/Pulumi.yaml index 482fcab..bb5508a 100644 --- a/deploy/Pulumi.yaml +++ b/deploy/Pulumi.yaml @@ -30,6 +30,14 @@ config: type: boolean description: If set to true, turns on the REST API Swagger UI. Do not activate in production. (Optional) default: false + pvc-access-mode: + type: string + description: The access mode to use for the PVC. (Optional) + default: ReadWriteMany + expose: + type: boolean + description: Whether to expose to external networking the Chall-Manager service. DO NOT TURN ON IF YOU DON'T UNDERSTAND THE IMPACT. + default: false otel.endpoint: type: string description: The OpenTelemetry Collector endpoint to set signals to. (Optional) diff --git a/deploy/main.go b/deploy/main.go index c090637..eef1fe6 100644 --- a/deploy/main.go +++ b/deploy/main.go @@ -30,9 +30,13 @@ func main() { PrivateRegistry: pulumi.String(cfg.PrivateRegistry), Replicas: pulumi.Int(cfg.Replicas), Swagger: cfg.Swagger, - EtcdReplicas: nil, - JanitorCron: nil, - Otel: nil, + PVCAccessModes: pulumi.ToStringArray([]string{ + cfg.PVCAccessMode, + }), + Expose: cfg.Expose, + EtcdReplicas: nil, + JanitorCron: nil, + Otel: nil, } if cfg.Etcd != nil { args.EtcdReplicas = pulumi.IntPtr(cfg.Etcd.Replicas) @@ -53,6 +57,7 @@ func main() { } ctx.Export("endpoint", cm.Endpoint) + ctx.Export("exposed_port", cm.ExposedPort) return nil }) @@ -67,6 +72,8 @@ type ( Replicas int `json:"replicas"` Janitor *JanitorConfig `json:"janitor"` Swagger bool `json:"swagger"` + PVCAccessMode string `json:"pvc-access-mode"` + Expose bool `json:"expose"` Otel *OtelConfig `json:"otel"` } @@ -92,6 +99,8 @@ func loadConfig(ctx *pulumi.Context) *Config { PrivateRegistry: cfg.Get("private-registry"), Replicas: cfg.GetInt("replicas"), Swagger: cfg.GetBool("swagger"), + PVCAccessMode: cfg.Get("pvc-access-mode"), + Expose: cfg.GetBool("expose"), } var etcdC EtcdConfig diff --git a/deploy/services/chall-manager.go b/deploy/services/chall-manager.go index c649f19..d081b3a 100644 --- a/deploy/services/chall-manager.go +++ b/deploy/services/chall-manager.go @@ -1,10 +1,14 @@ package services import ( + "fmt" + "strconv" "strings" "github.com/ctfer-io/chall-manager/deploy/common" "github.com/ctfer-io/chall-manager/deploy/services/parts" + corev1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/core/v1" + v1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/meta/v1" netwv1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/networking/v1" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) @@ -19,13 +23,17 @@ type ( cm *parts.ChallManager cmj *parts.ChallManagerJanitor + // Exposure + svc *corev1.Service + // Interface & ports network policies cmToEtcd *netwv1.NetworkPolicy cmjToCm *netwv1.NetworkPolicy // Outputs - Endpoint pulumi.StringOutput + Endpoint pulumi.StringOutput + ExposedPort pulumi.IntPtrOutput } // ChallManagerArgs contains all the parametrization of a Chall-Manager @@ -47,7 +55,11 @@ type ( JanitorCron pulumi.StringPtrInput janitorCron pulumi.StringOutput - Swagger bool + // PVCAccessModes defines the access modes supported by the PVC. + PVCAccessModes pulumi.StringArrayInput + pvcAccessModes pulumi.StringArrayOutput + + Swagger, Expose bool Otel *common.OtelArgs } @@ -62,19 +74,41 @@ const ( // // It is not made to be exposed to outer world (outside of the cluster). func NewChallManager(ctx *pulumi.Context, name string, args *ChallManagerArgs, opts ...pulumi.ResourceOption) (*ChallManager, error) { + cm := &ChallManager{} + args, err := cm.validate(args) + if err != nil { + return nil, err + } + if err := ctx.RegisterComponentResource("ctfer-io:chall-manager", name, cm, opts...); err != nil { + return nil, err + } + opts = append(opts, pulumi.Parent(cm)) + if err := cm.provision(ctx, args, opts...); err != nil { + return nil, err + } + if err := cm.outputs(ctx); err != nil { + return nil, err + } + return cm, nil +} + +func (cm *ChallManager) validate(args *ChallManagerArgs) (*ChallManagerArgs, error) { if args == nil { args = &ChallManagerArgs{} } + if args.Tag == nil || args.Tag == pulumi.String("") { args.tag = pulumi.String("dev").ToStringOutput() } else { args.tag = args.Tag.ToStringPtrOutput().Elem() } + if args.JanitorCron == nil || args.JanitorCron == pulumi.String("") { args.janitorCron = pulumi.String(defaultCron).ToStringOutput() } else { args.janitorCron = args.JanitorCron.ToStringPtrOutput().Elem() } + if args.PrivateRegistry == nil { args.privateRegistry = pulumi.String("").ToStringOutput() } else { @@ -92,23 +126,22 @@ func NewChallManager(ctx *pulumi.Context, name string, args *ChallManagerArgs, o return str }).(pulumi.StringOutput) } + if args.Replicas == nil { args.replicas = pulumi.Int(1).ToIntOutput() } else { args.replicas = args.Replicas.ToIntPtrOutput().Elem() } - cm := &ChallManager{} - if err := ctx.RegisterComponentResource("ctfer-io:chall-manager", name, cm, opts...); err != nil { - return nil, err - } - opts = append(opts, pulumi.Parent(cm)) - if err := cm.provision(ctx, args, opts...); err != nil { - return nil, err + if args.PVCAccessModes == nil { + args.pvcAccessModes = pulumi.ToStringArray([]string{ + "ReadWriteMany", + }).ToStringArrayOutput() + } else { + args.pvcAccessModes = args.PVCAccessModes.ToStringArrayOutput() } - cm.outputs() - return cm, nil + return args, nil } func (cm *ChallManager) provision(ctx *pulumi.Context, args *ChallManagerArgs, opts ...pulumi.ResourceOption) (err error) { @@ -149,9 +182,10 @@ func (cm *ChallManager) provision(ctx *pulumi.Context, args *ChallManagerArgs, o } return 1 // default replicas to 1 }).(pulumi.IntOutput), - Etcd: nil, - Swagger: args.Swagger, - Otel: nil, + Etcd: nil, + Swagger: args.Swagger, + PVCAccessModes: args.pvcAccessModes, + Otel: nil, } if args.EtcdReplicas != nil { cmArgs.Etcd = &parts.ChallManagerEtcdArgs{ @@ -172,6 +206,34 @@ func (cm *ChallManager) provision(ctx *pulumi.Context, args *ChallManagerArgs, o return } + if args.Expose { + cm.svc, err = corev1.NewService(ctx, "cm-exposed", &corev1.ServiceArgs{ + Metadata: v1.ObjectMetaArgs{ + Labels: cm.cm.PodLabels, + Namespace: args.Namespace, + }, + Spec: corev1.ServiceSpecArgs{ + Type: pulumi.String("NodePort"), + Selector: cm.cm.PodLabels, + Ports: corev1.ServicePortArray{ + corev1.ServicePortArgs{ + Port: cm.cm.Endpoint.ApplyT(func(edp string) int { + // On bootstrap there is no valid URL, but port is assigned + pts := strings.Split(edp, ":") + p := pts[len(pts)-1] + port, _ := strconv.Atoi(p) + fmt.Printf("port: %v\n", port) + return port + }).(pulumi.IntOutput), + }, + }, + }, + }, opts...) + if err != nil { + return + } + } + // Deploy janitor var cmjOtel *common.OtelArgs if args.Otel != nil { @@ -294,6 +356,16 @@ func (cm *ChallManager) provision(ctx *pulumi.Context, args *ChallManagerArgs, o return } -func (cm *ChallManager) outputs() { +func (cm *ChallManager) outputs(ctx *pulumi.Context) error { cm.Endpoint = cm.cm.Endpoint + if cm.svc != nil { + cm.ExposedPort = cm.svc.Spec.ApplyT(func(spec corev1.ServiceSpec) *int { + return spec.Ports[0].NodePort + }).(pulumi.IntPtrOutput) + } + + return ctx.RegisterResourceOutputs(cm, pulumi.Map{ + "endpoint": cm.Endpoint, + "exposed_port": cm.ExposedPort, + }) } diff --git a/deploy/services/parts/chall-manager-janitor.go b/deploy/services/parts/chall-manager-janitor.go index 3fd8d43..16abefc 100644 --- a/deploy/services/parts/chall-manager-janitor.go +++ b/deploy/services/parts/chall-manager-janitor.go @@ -53,19 +53,41 @@ const ( ) func NewChallManagerJanitor(ctx *pulumi.Context, name string, args *ChallManagerJanitorArgs, opts ...pulumi.ResourceOption) (*ChallManagerJanitor, error) { + cmj := &ChallManagerJanitor{} + args, err := cmj.validate(args) + if err != nil { + return nil, err + } + if err := ctx.RegisterComponentResource("ctfer-io:chall-manager:chall-manager-janitor", name, cmj, opts...); err != nil { + return nil, err + } + opts = append(opts, pulumi.Parent(cmj)) + if err := cmj.provision(ctx, args, opts...); err != nil { + return nil, err + } + if err := cmj.outputs(ctx); err != nil { + return nil, err + } + return cmj, nil +} + +func (cmj *ChallManagerJanitor) validate(args *ChallManagerJanitorArgs) (*ChallManagerJanitorArgs, error) { if args == nil { args = &ChallManagerJanitorArgs{} } + if args.Tag == nil || args.Tag == pulumi.String("") { args.tag = pulumi.String("dev").ToStringOutput() } else { args.tag = args.Tag.ToStringPtrOutput().Elem() } + if args.Cron == nil || args.Cron == pulumi.String("") { args.cron = pulumi.String(defaultCron).ToStringOutput() } else { args.cron = args.Cron.ToStringPtrOutput().Elem() } + if args.PrivateRegistry == nil || args.PrivateRegistry == pulumi.String("") { args.privateRegistry = pulumi.String("").ToStringOutput() } else { @@ -80,17 +102,7 @@ func NewChallManagerJanitor(ctx *pulumi.Context, name string, args *ChallManager }).(pulumi.StringOutput) } - cmj := &ChallManagerJanitor{} - if err := ctx.RegisterComponentResource("ctfer-io:chall-manager:chall-manager-janitor", name, cmj, opts...); err != nil { - return nil, err - } - opts = append(opts, pulumi.Parent(cmj)) - if err := cmj.provision(ctx, args, opts...); err != nil { - return nil, err - } - cmj.outputs() - - return cmj, nil + return args, nil } func (cmj *ChallManagerJanitor) provision(ctx *pulumi.Context, args *ChallManagerJanitorArgs, opts ...pulumi.ResourceOption) (err error) { @@ -171,6 +183,10 @@ func (cmj *ChallManagerJanitor) provision(ctx *pulumi.Context, args *ChallManage return } -func (cmj *ChallManagerJanitor) outputs() { +func (cmj *ChallManagerJanitor) outputs(ctx *pulumi.Context) error { cmj.PodLabels = cmj.cjob.Spec.JobTemplate().Metadata().Labels() + + return ctx.RegisterResourceOutputs(cmj, pulumi.Map{ + "podLabels": cmj.PodLabels, + }) } diff --git a/deploy/services/parts/chall-manager.go b/deploy/services/parts/chall-manager.go index 4f1eec7..01a04c2 100644 --- a/deploy/services/parts/chall-manager.go +++ b/deploy/services/parts/chall-manager.go @@ -53,6 +53,10 @@ type ( // Replicas of the chall-manager instance. If not specified, default to 1. Replicas pulumi.IntPtrInput + // PVCAccessModes defines the access modes supported by the PVC. + PVCAccessModes pulumi.StringArrayInput + pvcAccessModes pulumi.StringArrayOutput + Swagger bool Etcd *ChallManagerEtcdArgs @@ -87,15 +91,35 @@ var crudVerbs = []string{ // It creates the namespace the Chall-Manager will launch the scenarios into, then all // the recommended resources for a Kubernetes-native Micro Services deployment. func NewChallManager(ctx *pulumi.Context, name string, args *ChallManagerArgs, opts ...pulumi.ResourceOption) (*ChallManager, error) { - // Validate inputs and defaults if necessary + cm := &ChallManager{} + args, err := cm.validate(args) + if err != nil { + return nil, err + } + if err := ctx.RegisterComponentResource("ctfer-io:chall-manager:chall-manager", name, cm, opts...); err != nil { + return nil, err + } + opts = append(opts, pulumi.Parent(cm)) + if err := cm.provision(ctx, args, opts...); err != nil { + return nil, err + } + if err := cm.outputs(ctx); err != nil { + return nil, err + } + return cm, nil +} + +func (cm *ChallManager) validate(args *ChallManagerArgs) (*ChallManagerArgs, error) { if args == nil { args = &ChallManagerArgs{} } + if args.Tag == nil || args.Tag == pulumi.String("") { args.tag = pulumi.String("dev").ToStringOutput() } else { args.tag = args.Tag.ToStringPtrOutput().Elem() } + if args.PrivateRegistry == nil { args.privateRegistry = pulumi.String("").ToStringOutput() } else { @@ -114,18 +138,15 @@ func NewChallManager(ctx *pulumi.Context, name string, args *ChallManagerArgs, o }).(pulumi.StringOutput) } - // Register component resource, provision and export outputs - cm := &ChallManager{} - if err := ctx.RegisterComponentResource("ctfer-io:chall-manager:chall-manager", name, cm, opts...); err != nil { - return nil, err - } - opts = append(opts, pulumi.Parent(cm)) - if err := cm.provision(ctx, args, opts...); err != nil { - return nil, err + if args.PVCAccessModes == nil { + args.pvcAccessModes = pulumi.ToStringArray([]string{ + "ReadWriteMany", + }).ToStringArrayOutput() + } else { + args.pvcAccessModes = args.PVCAccessModes.ToStringArrayOutput() } - cm.outputs() - return cm, nil + return args, nil } func (cm *ChallManager) provision(ctx *pulumi.Context, args *ChallManagerArgs, opts ...pulumi.ResourceOption) (err error) { @@ -418,31 +439,6 @@ func (cm *ChallManager) provision(ctx *pulumi.Context, args *ChallManagerArgs, o return } - // => PersistentVolumeClaim - cm.pvc, err = corev1.NewPersistentVolumeClaim(ctx, "chall-manager-pvc", &corev1.PersistentVolumeClaimArgs{ - Metadata: metav1.ObjectMetaArgs{ - Namespace: args.Namespace, - Labels: pulumi.StringMap{ - "app.kubernetes.io/component": pulumi.String("chall-manager"), - "app.kubernetes.io/part-of": pulumi.String("chall-manager"), - }, - }, - Spec: corev1.PersistentVolumeClaimSpecArgs{ - // StorageClassName: pulumi.String("longhorn"), - AccessModes: pulumi.ToStringArray([]string{ - "ReadWriteMany", - }), - Resources: corev1.VolumeResourceRequirementsArgs{ - Requests: pulumi.ToStringMap(map[string]string{ - "storage": "2Gi", - }), - }, - }, - }, opts...) - if err != nil { - return - } - // => Deployment initCts := corev1.ContainerArray{} envs := corev1.EnvVarArray{ @@ -528,6 +524,29 @@ func (cm *ChallManager) provision(ctx *pulumi.Context, args *ChallManagerArgs, o } } + // => PersistentVolumeClaim + cm.pvc, err = corev1.NewPersistentVolumeClaim(ctx, "chall-manager-pvc", &corev1.PersistentVolumeClaimArgs{ + Metadata: metav1.ObjectMetaArgs{ + Namespace: args.Namespace, + Labels: pulumi.StringMap{ + "app.kubernetes.io/component": pulumi.String("chall-manager"), + "app.kubernetes.io/part-of": pulumi.String("chall-manager"), + }, + }, + Spec: corev1.PersistentVolumeClaimSpecArgs{ + AccessModes: args.PVCAccessModes, + Resources: corev1.VolumeResourceRequirementsArgs{ + Requests: pulumi.ToStringMap(map[string]string{ + "storage": "2Gi", + }), + }, + }, + }, opts...) + if err != nil { + return + } + + // => Deployment cm.dep, err = appsv1.NewDeployment(ctx, "chall-manager-deployment", &appsv1.DeploymentArgs{ Metadata: metav1.ObjectMetaArgs{ Namespace: args.Namespace, @@ -633,7 +652,12 @@ func (cm *ChallManager) provision(ctx *pulumi.Context, args *ChallManagerArgs, o return } -func (cm *ChallManager) outputs() { +func (cm *ChallManager) outputs(ctx *pulumi.Context) error { cm.PodLabels = cm.dep.Metadata.Labels() cm.Endpoint = pulumi.Sprintf("%s.%s:%d", cm.svc.Metadata.Name().Elem(), cm.svc.Metadata.Namespace().Elem(), port) + + return ctx.RegisterResourceOutputs(cm, pulumi.Map{ + "podLabels": cm.PodLabels, + "endpoint": cm.Endpoint, + }) } diff --git a/deploy/services/parts/etcd.go b/deploy/services/parts/etcd.go index 7b0dc67..5ed7f4b 100644 --- a/deploy/services/parts/etcd.go +++ b/deploy/services/parts/etcd.go @@ -31,11 +31,11 @@ type ( ) func NewEtcdCluster(ctx *pulumi.Context, name string, args *EtcdArgs, opts ...pulumi.ResourceOption) (*EtcdCluster, error) { - if args == nil { - args = &EtcdArgs{} - } - etcd := &EtcdCluster{} + args, err := etcd.validate(args) + if err != nil { + return nil, err + } if err := ctx.RegisterComponentResource("ctfer-io:chall-manager:etcd", name, etcd, opts...); err != nil { return nil, err } @@ -43,11 +43,20 @@ func NewEtcdCluster(ctx *pulumi.Context, name string, args *EtcdArgs, opts ...pu if err := etcd.provision(ctx, args, opts...); err != nil { return nil, err } - etcd.outputs() - + if err := etcd.outputs(ctx); err != nil { + return nil, err + } return etcd, nil } +func (etcd *EtcdCluster) validate(args *EtcdArgs) (*EtcdArgs, error) { + if args == nil { + args = &EtcdArgs{} + } + + return args, nil +} + func (etcd *EtcdCluster) provision(ctx *pulumi.Context, args *EtcdArgs, opts ...pulumi.ResourceOption) (err error) { etcd.rand, err = random.NewRandomString(ctx, "etcd-password", &random.RandomStringArgs{ Length: pulumi.Int(16), @@ -100,7 +109,7 @@ func (etcd *EtcdCluster) provision(ctx *pulumi.Context, args *EtcdArgs, opts ... return nil } -func (etcd *EtcdCluster) outputs() { +func (etcd *EtcdCluster) outputs(ctx *pulumi.Context) error { // Hardcoded values // XXX might not be sufficient etcd.PodLabels = pulumi.ToStringMap(map[string]string{ @@ -111,4 +120,11 @@ func (etcd *EtcdCluster) outputs() { // Generated values etcd.Password = etcd.rand.Result + + return ctx.RegisterResourceOutputs(etcd, pulumi.Map{ + "podLabels": etcd.PodLabels, + "endpoint": etcd.Endpoint, + "username": etcd.Username, + "password": etcd.Password, + }) } diff --git a/go.work.sum b/go.work.sum index 7753a68..2c4ed57 100644 --- a/go.work.sum +++ b/go.work.sum @@ -1269,6 +1269,7 @@ golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -1467,6 +1468,7 @@ golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY=