Skip to content

Commit

Permalink
Merge branch 'main' into dependabot/go_modules/otel-dependencies-034d…
Browse files Browse the repository at this point in the history
…251fb6
  • Loading branch information
michalpristas authored Apr 22, 2024
2 parents d003ca6 + d48ac97 commit eb5ae12
Show file tree
Hide file tree
Showing 23 changed files with 2,860 additions and 330 deletions.
2 changes: 2 additions & 0 deletions .buildkite/pipeline.elastic-agent-package.yml
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,8 @@ steps:
depends_on: package
agents:
provider: "gcp"
machineType: "n2-standard-8"
diskSizeGb: 250
env:
DRA_PROJECT_ID: "elastic-agent-package"
DRA_PROJECT_ARTIFACT_ID: "agent-package"
Expand Down
2 changes: 1 addition & 1 deletion .buildkite/scripts/steps/integration_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ MAGE_SUBTARGET="${3:-""}"
# Override the agent package version using a string with format <major>.<minor>.<patch>
# NOTE: use only after version bump when the new version is not yet available, for example:
# OVERRIDE_AGENT_PACKAGE_VERSION="8.10.3" otherwise OVERRIDE_AGENT_PACKAGE_VERSION="".
OVERRIDE_AGENT_PACKAGE_VERSION=""
OVERRIDE_AGENT_PACKAGE_VERSION="8.14.0"

if [[ -n "$OVERRIDE_AGENT_PACKAGE_VERSION" ]]; then
OVERRIDE_TEST_AGENT_VERSION=${OVERRIDE_AGENT_PACKAGE_VERSION}"-SNAPSHOT"
Expand Down
13 changes: 13 additions & 0 deletions .mergify.yml
Original file line number Diff line number Diff line change
Expand Up @@ -306,3 +306,16 @@ pull_request_rules:
labels:
- "backport"
title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}"
- name: backport patches to 8.14 branch
conditions:
- merged
- label=backport-v8.14.0
actions:
backport:
assignees:
- "{{ author }}"
branches:
- "8.14"
labels:
- "backport"
title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}"
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# Kind can be one of:
# - breaking-change: a change to previously-documented behavior
# - deprecation: functionality that is being removed in a later release
# - bug-fix: fixes a problem in a previous version
# - enhancement: extends functionality but does not break or fix existing behavior
# - feature: new functionality
# - known-issue: problems that we are aware of in a given version
# - security: impacts on the security of a product or a user’s deployment.
# - upgrade: important information for someone upgrading from a prior version
# - other: does not fit into any of the other categories
kind: feature

# Change summary; a 80ish characters long description of the change.
summary: Introduce isolate units for input spec.

# Long description; in case the summary is not enough to describe the change
# this field accommodate a description without length limits.
# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment.
description: |
Introduce a new flag in the spec that will allow to disable grouping of same input type inputs into a single component.
If that flag is being activated the inputs won't be grouped and we will create a separate component for each input that will run units for that particular input.
# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc.
component: elastic-agent

# PR URL; optional; the PR number that added the changeset.
# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added.
# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number.
# Please provide it if you are adding a fragment for a different PR.
pr: https://github.com/elastic/elastic-agent/pull/4476

# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of).
# If not present is automatically filled by the tooling with the issue linked to the PR number.
issue: https://github.com/elastic/security-team/issues/8669
32 changes: 32 additions & 0 deletions changelog/fragments/1712583231-leader-election-issue.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# Kind can be one of:
# - breaking-change: a change to previously-documented behavior
# - deprecation: functionality that is being removed in a later release
# - bug-fix: fixes a problem in a previous version
# - enhancement: extends functionality but does not break or fix existing behavior
# - feature: new functionality
# - known-issue: problems that we are aware of in a given version
# - security: impacts on the security of a product or a user’s deployment.
# - upgrade: important information for someone upgrading from a prior version
# - other: does not fit into any of the other categories
kind: bug-fix

# Change summary; a 80ish characters long description of the change.
summary: Fix issue where kubernetes_leaderelection provider would not try to reacquire the lease once lost

# Long description; in case the summary is not enough to describe the change
# this field accommodate a description without length limits.
# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment.
#description:

# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc.
component: elastic-agent

# PR URL; optional; the PR number that added the changeset.
# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added.
# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number.
# Please provide it if you are adding a fragment for a different PR.
pr: https://github.com/elastic/elastic-agent/pull/4542

# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of).
# If not present is automatically filled by the tooling with the issue linked to the PR number.
issue: https://github.com/elastic/elastic-agent/issues/4543
2 changes: 1 addition & 1 deletion deploy/kubernetes/elastic-agent-managed-kubernetes.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ spec:
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: elastic-agent
image: docker.elastic.co/beats/elastic-agent:8.14.0
image: docker.elastic.co/beats/elastic-agent:8.15.0
env:
# Set to 1 for enrollment into Fleet server. If not set, Elastic Agent is run in standalone mode
- name: FLEET_ENROLL
Expand Down
4 changes: 2 additions & 2 deletions deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -698,13 +698,13 @@ spec:
# - -c
# - >-
# mkdir -p /etc/elastic-agent/inputs.d &&
# wget -O - https://github.com/elastic/elastic-agent/archive/8.14.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-8.14/deploy/kubernetes/elastic-agent-standalone/templates.d"
# wget -O - https://github.com/elastic/elastic-agent/archive/8.15.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-8.15/deploy/kubernetes/elastic-agent-standalone/templates.d"
# volumeMounts:
# - name: external-inputs
# mountPath: /etc/elastic-agent/inputs.d
containers:
- name: elastic-agent-standalone
image: docker.elastic.co/beats/elastic-agent:8.14.0
image: docker.elastic.co/beats/elastic-agent:8.15.0
args: ["-c", "/etc/elastic-agent/agent.yml", "-e"]
env:
# The basic authentication username used to connect to Elasticsearch
Expand Down
114 changes: 112 additions & 2 deletions internal/pkg/agent/application/coordinator/coordinator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,19 @@ var (
},
},
}
fakeIsolatedUnitsInputSpec = component.InputSpec{
Name: "fake-isolated-units",
Platforms: []string{fmt.Sprintf("%s/%s", goruntime.GOOS, goruntime.GOARCH)},
Shippers: []string{"fake-shipper"},
Command: &component.CommandSpec{
Timeouts: component.CommandTimeoutSpec{
Checkin: 30 * time.Second,
Restart: 10 * time.Millisecond, // quick restart during tests
Stop: 30 * time.Second,
},
},
IsolateUnits: true,
}
fakeShipperSpec = component.ShipperSpec{
Name: "fake-shipper",
Platforms: []string{fmt.Sprintf("%s/%s", goruntime.GOOS, goruntime.GOARCH)},
Expand Down Expand Up @@ -547,6 +560,94 @@ func TestCoordinator_StateSubscribe(t *testing.T) {
require.NoError(t, err)
}

func TestCoordinator_StateSubscribeIsolatedUnits(t *testing.T) {
coordCh := make(chan error)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

coord, cfgMgr, varsMgr := createCoordinator(t, ctx, WithComponentInputSpec(fakeIsolatedUnitsInputSpec))
go func() {
err := coord.Run(ctx)
if errors.Is(err, context.Canceled) {
// allowed error
err = nil
}
coordCh <- err
}()

resultChan := make(chan error)
go func() {
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()

subChan := coord.StateSubscribe(ctx, 32)
for {
select {
case <-ctx.Done():
resultChan <- ctx.Err()
return
case state := <-subChan:
t.Logf("%+v", state)
if len(state.Components) == 3 {
compState0 := getComponentState(state.Components, "fake-isolated-units-default-fake-isolated-units-0")
compState1 := getComponentState(state.Components, "fake-isolated-units-default-fake-isolated-units-1")
if compState0 != nil && compState1 != nil {
unit0, ok0 := compState0.State.Units[runtime.ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-isolated-units-default-fake-isolated-units-0-unit"}]
unit1, ok1 := compState1.State.Units[runtime.ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-isolated-units-default-fake-isolated-units-1-unit"}]
if ok0 && ok1 {
if (unit0.State == client.UnitStateHealthy && unit0.Message == "Healthy From Fake Isolated Units 0 Config") &&
(unit1.State == client.UnitStateHealthy && unit1.Message == "Healthy From Fake Isolated Units 1 Config") {
resultChan <- nil
return
}
}
}
}
}
}
}()

// no vars used by the config
varsMgr.Vars(ctx, []*transpiler.Vars{{}})

// set the configuration to run a fake input
cfg, err := config.NewConfigFrom(map[string]interface{}{
"outputs": map[string]interface{}{
"default": map[string]interface{}{
"type": "fake-action-output",
"shipper": map[string]interface{}{
"enabled": true,
},
},
},
"inputs": []interface{}{
map[string]interface{}{
"id": "fake-isolated-units-0",
"type": "fake-isolated-units",
"use_output": "default",
"state": client.UnitStateHealthy,
"message": "Healthy From Fake Isolated Units 0 Config",
},
map[string]interface{}{
"id": "fake-isolated-units-1",
"type": "fake-isolated-units",
"use_output": "default",
"state": client.UnitStateHealthy,
"message": "Healthy From Fake Isolated Units 1 Config",
},
},
})
require.NoError(t, err)
cfgMgr.Config(ctx, cfg)

err = <-resultChan
require.NoError(t, err)
cancel()

err = <-coordCh
require.NoError(t, err)
}

func TestCollectManagerErrorsTimeout(t *testing.T) {
handlerChan, _, _, _, _ := setupManagerShutdownChannels(time.Millisecond)
// Don't send anything to the shutdown channels, causing a timeout
Expand Down Expand Up @@ -757,6 +858,7 @@ func TestCoordinator_UpgradeDetails(t *testing.T) {
type createCoordinatorOpts struct {
managed bool
upgradeManager UpgradeManager
compInputSpec component.InputSpec
}

type CoordinatorOpt func(o *createCoordinatorOpts)
Expand All @@ -773,13 +875,21 @@ func WithUpgradeManager(upgradeManager UpgradeManager) CoordinatorOpt {
}
}

func WithComponentInputSpec(spec component.InputSpec) CoordinatorOpt {
return func(o *createCoordinatorOpts) {
o.compInputSpec = spec
}
}

// createCoordinator creates a coordinator that using a fake config manager and a fake vars manager.
//
// The runtime specifications is set up to use both the fake component and fake shipper.
func createCoordinator(t *testing.T, ctx context.Context, opts ...CoordinatorOpt) (*Coordinator, *fakeConfigManager, *fakeVarsManager) {
t.Helper()

o := &createCoordinatorOpts{}
o := &createCoordinatorOpts{
compInputSpec: fakeInputSpec,
}
for _, opt := range opts {
opt(o)
}
Expand All @@ -793,7 +903,7 @@ func createCoordinator(t *testing.T, ctx context.Context, opts ...CoordinatorOpt
InputType: "fake",
BinaryName: "",
BinaryPath: testBinary(t, "component"),
Spec: fakeInputSpec,
Spec: o.compInputSpec,
}
shipperSpec := component.ShipperRuntimeSpec{
ShipperType: "fake-shipper",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,12 @@ import (
"time"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sclient "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"

"github.com/elastic/elastic-agent-autodiscover/kubernetes"

"github.com/elastic/elastic-agent/internal/pkg/agent/application/info"
"github.com/elastic/elastic-agent/internal/pkg/agent/errors"
"github.com/elastic/elastic-agent/internal/pkg/composable"
Expand All @@ -22,6 +24,8 @@ import (
"github.com/elastic/elastic-agent/pkg/core/logger"
)

const leaderElectorPrefix = "elastic-agent-leader-"

func init() {
composable.Providers.MustAddContextProvider("kubernetes_leaderelection", ContextProviderBuilder)
}
Expand All @@ -45,11 +49,15 @@ func ContextProviderBuilder(logger *logger.Logger, c *config.Config, managed boo
return &contextProvider{logger, &cfg, nil}, nil
}

// This is needed to overwrite the Kubernetes client for the tests
var getK8sClientFunc = func(kubeconfig string, opt kubernetes.KubeClientOptions) (k8sclient.Interface, error) {
return kubernetes.GetKubernetesClient(kubeconfig, opt)
}

// Run runs the leaderelection provider.
func (p *contextProvider) Run(ctx context.Context, comm corecomp.ContextProviderComm) error {
client, err := kubernetes.GetKubernetesClient(p.config.KubeConfig, p.config.KubeClientOptions)
client, err := getK8sClientFunc(p.config.KubeConfig, p.config.KubeClientOptions)
if err != nil {
// info only; return nil (do nothing)
p.logger.Debugf("Kubernetes leaderelection provider skipped, unable to connect: %s", err)
return nil
}
Expand All @@ -61,9 +69,9 @@ func (p *contextProvider) Run(ctx context.Context, comm corecomp.ContextProvider
var id string
podName, found := os.LookupEnv("POD_NAME")
if found {
id = "elastic-agent-leader-" + podName
id = leaderElectorPrefix + podName
} else {
id = "elastic-agent-leader-" + agentInfo.AgentID()
id = leaderElectorPrefix + agentInfo.AgentID()
}

ns, err := kubernetes.InClusterNamespace()
Expand Down Expand Up @@ -104,9 +112,14 @@ func (p *contextProvider) Run(ctx context.Context, comm corecomp.ContextProvider
p.logger.Errorf("error while creating Leader Elector: %v", err)
}
p.logger.Debugf("Starting Leader Elector")
le.Run(comm)
p.logger.Debugf("Stopped Leader Elector")
return comm.Err()

for {
le.Run(ctx)
if ctx.Err() != nil {
p.logger.Debugf("Stopped Leader Elector")
return comm.Err()
}
}
}

func (p *contextProvider) startLeading(comm corecomp.ContextProviderComm) {
Expand Down
Loading

0 comments on commit eb5ae12

Please sign in to comment.