Skip to content

Commit

Permalink
adds Red Hat entitlement functionality for on-cluster layering
Browse files Browse the repository at this point in the history
This adds the capability for BuildController to use the RHEL entitlement secrets to allow cluster admins to inject RHEL content into their builds that they are entitled to receive. This also allows the injection / consumption of content into `/etc/yum.repos.d` as well as `/etc/pki/rpm-gpg`. There are a few notes about the implementation that I would like to have at a higher level:

- Because we run rootless Buildah, we're more prone to running into SELinux complications. This makes it more difficult  to directly mount the contents of `/etc/yum.repos.d`, `/etc/pki/entitlement`, and `/etc/pki/rpm-gpg` directly into the build context. What that in mind, we copy everything into a temp directory first, and then mount that temp directory into the build context as a volume.
- We also create an `emptyDir` which is mounted into the build pod at `/home/build/.local/share/containers`. It is unclear why this is necessary, but as mentioned before, I suspect that this is due to SELinux issues.
- The e2e test suite now has the capability to stream the container logs from the build pod to the filesystem as there is useful information contained within those logs if the e2e test fails.
  • Loading branch information
cheesesashimi committed Apr 18, 2024
1 parent 8201133 commit 5614c7e
Show file tree
Hide file tree
Showing 8 changed files with 785 additions and 122 deletions.
62 changes: 56 additions & 6 deletions pkg/controller/build/assets/buildah-build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,10 @@
# custom build pod.
set -xeuo

ETC_PKI_ENTITLEMENT_MOUNTPOINT="${ETC_PKI_ENTITLEMENT_MOUNTPOINT:-}"
ETC_PKI_RPM_GPG_MOUNTPOINT="${ETC_PKI_RPM_GPG_MOUNTPOINT:-}"
ETC_YUM_REPOS_D_MOUNTPOINT="${ETC_YUM_REPOS_D_MOUNTPOINT:-}"

build_context="$HOME/context"

# Create a directory to hold our build context.
Expand All @@ -14,12 +18,58 @@ mkdir -p "$build_context/machineconfig"
cp /tmp/dockerfile/Dockerfile "$build_context"
cp /tmp/machineconfig/machineconfig.json.gz "$build_context/machineconfig/"

# Build our image using Buildah.
buildah bud \
--storage-driver vfs \
--authfile="$BASE_IMAGE_PULL_CREDS" \
--tag "$TAG" \
--file="$build_context/Dockerfile" "$build_context"
build_args=(
--log-level=DEBUG
--storage-driver vfs
--authfile="$BASE_IMAGE_PULL_CREDS"
--tag "$TAG"
--file="$build_context/Dockerfile"
)

mount_opts="z,rw"

# If we have RHSM certs, copy them into a tempdir to avoid SELinux issues, and
# tell Buildah about them.
rhsm_path="/var/run/secrets/rhsm"
if [[ -d "$rhsm_path" ]]; then
rhsm_certs="$(mktemp -d)"
cp -r -v "$rhsm_path/." "$rhsm_certs"
chmod -R 0755 "$rhsm_certs"
build_args+=("--volume=$rhsm_certs:/run/secrets/rhsm:$mount_opts")
fi

# If we have /etc/pki/entitlement certificates, commonly used with RHEL
# entitlements, copy them into a tempdir to avoid SELinux issues, and tell
# Buildah about them.
if [[ -n "$ETC_PKI_ENTITLEMENT_MOUNTPOINT" ]] && [[ -d "$ETC_PKI_ENTITLEMENT_MOUNTPOINT" ]]; then
configs="$(mktemp -d)"
cp -r -v "$ETC_PKI_ENTITLEMENT_MOUNTPOINT/." "$configs"
chmod -R 0755 "$configs"
build_args+=("--volume=$configs:$ETC_PKI_ENTITLEMENT_MOUNTPOINT:$mount_opts")
fi

# If we have /etc/yum.repos.d configs, commonly used with Red Hat Satellite
# subscriptions, copy them into a tempdir to avoid SELinux issues, and tell
# Buildah about them.
if [[ -n "$ETC_YUM_REPOS_D_MOUNTPOINT" ]] && [[ -d "$ETC_YUM_REPOS_D_MOUNTPOINT" ]]; then
configs="$(mktemp -d)"
cp -r -v "$ETC_YUM_REPOS_D_MOUNTPOINT/." "$configs"
chmod -R 0755 "$configs"
build_args+=("--volume=$configs:$ETC_YUM_REPOS_D_MOUNTPOINT:$mount_opts")
fi

# If we have /etc/pki/rpm-gpg configs, commonly used with Red Hat Satellite
# subscriptions, copy them into a tempdir to avoid SELinux issues, and tell
# Buildah about them.
if [[ -n "$ETC_PKI_RPM_GPG_MOUNTPOINT" ]] && [[ -d "$ETC_PKI_RPM_GPG_MOUNTPOINT" ]]; then
configs="$(mktemp -d)"
cp -r -v "$ETC_PKI_RPM_GPG_MOUNTPOINT/." "$configs"
chmod -R 0755 "$configs"
build_args+=("--volume=$configs:$ETC_PKI_RPM_GPG_MOUNTPOINT:$mount_opts")
fi

# Build our image.
buildah bud "${build_args[@]}" "$build_context"

# Push our built image.
buildah push \
Expand Down
103 changes: 98 additions & 5 deletions pkg/controller/build/build_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,17 @@ import (
"github.com/openshift/machine-config-operator/internal/clients"
)

const (
// Name of the etc-pki-entitlement secret from the openshift-config-managed namespace.
etcPkiEntitlementSecretName = "etc-pki-entitlement"

// Name of the etc-pki-rpm-gpg secret.
etcPkiRpmGpgSecretName = "etc-pki-rpm-gpg"

// Name of the etc-yum-repos-d ConfigMap.
etcYumReposDConfigMapName = "etc-yum-repos-d"
)

const (
targetMachineConfigPoolLabel = "machineconfiguration.openshift.io/targetMachineConfigPool"
// TODO(zzlotnik): Is there a constant for this someplace else?
Expand Down Expand Up @@ -472,6 +483,20 @@ func (ctrl *Controller) customBuildPodUpdater(pod *corev1.Pod) error {

ps := newPoolState(pool)

// We cannot solely rely upon the pod phase to determine whether the build
// pod is in an error state. This is because it is possible for the build
// container to enter an error state while the wait-for-done container is
// still running. The pod phase in this state will still be "Running" as
// opposed to error.
if isBuildPodError(pod) {
if err := ctrl.markBuildFailed(ps); err != nil {
return err
}

ctrl.enqueueMachineConfigPool(pool)
return nil
}

switch pod.Status.Phase {
case corev1.PodPending:
if !ps.IsBuildPending() {
Expand Down Expand Up @@ -503,6 +528,22 @@ func (ctrl *Controller) customBuildPodUpdater(pod *corev1.Pod) error {
return nil
}

// Determines if the build pod is in an error state by examining the individual
// container statuses. Returns true if a single container is in an error state.
func isBuildPodError(pod *corev1.Pod) bool {
for _, container := range pod.Status.ContainerStatuses {
if container.State.Waiting != nil && container.State.Waiting.Reason == "ErrImagePull" {
return true
}

if container.State.Terminated != nil && container.State.Terminated.ExitCode != 0 {
return true
}
}

return false
}

func (ctrl *Controller) handleConfigMapError(pools []*mcfgv1.MachineConfigPool, err error, key interface{}) {
klog.V(2).Infof("Error syncing configmap %v: %v", key, err)
utilruntime.HandleError(err)
Expand Down Expand Up @@ -950,17 +991,69 @@ func (ctrl *Controller) getBuildInputs(ps *poolState) (*buildInputs, error) {
return nil, fmt.Errorf("could not get MachineConfig %s: %w", currentMC, err)
}

etcPkiEntitlements, err := ctrl.getOptionalSecret(etcPkiEntitlementSecretName)
if err != nil {
return nil, err
}

etcPkiRpmGpgKeys, err := ctrl.getOptionalSecret(etcPkiRpmGpgSecretName)
if err != nil {
return nil, err
}

etcYumReposDConfigs, err := ctrl.getOptionalConfigMap(etcYumReposDConfigMapName)
if err != nil {
return nil, err
}

inputs := &buildInputs{
onClusterBuildConfig: onClusterBuildConfig,
osImageURL: osImageURL,
customDockerfiles: customDockerfiles,
pool: ps.MachineConfigPool(),
machineConfig: mc,
onClusterBuildConfig: onClusterBuildConfig,
osImageURL: osImageURL,
customDockerfiles: customDockerfiles,
pool: ps.MachineConfigPool(),
machineConfig: mc,
etcPkiEntitlementKeys: etcPkiEntitlements,
etcYumReposDConfigs: etcYumReposDConfigs,
etcPkiRpmGpgKeys: etcPkiRpmGpgKeys,
}

return inputs, nil
}

// Fetches an optional secret to inject into the build. Returns a nil error if
// the secret is not found.
func (ctrl *Controller) getOptionalSecret(secretName string) (*corev1.Secret, error) {
optionalSecret, err := ctrl.kubeclient.CoreV1().Secrets(ctrlcommon.MCONamespace).Get(context.TODO(), secretName, metav1.GetOptions{})
if err == nil {
klog.Infof("Optional build secret %q found, will include in build", secretName)
return optionalSecret, nil
}

if k8serrors.IsNotFound(err) {
klog.Infof("Could not find optional secret %q, will not include in build", secretName)
return nil, nil
}

return nil, fmt.Errorf("could not retrieve optional secret: %s: %w", secretName, err)
}

// Fetches an optional ConfigMap to inject into the build. Returns a nil error if
// the ConfigMap is not found.
func (ctrl *Controller) getOptionalConfigMap(configmapName string) (*corev1.ConfigMap, error) {
optionalConfigMap, err := ctrl.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), configmapName, metav1.GetOptions{})
if err == nil {
klog.Infof("Optional build ConfigMap %q found, will include in build", configmapName)
return optionalConfigMap, nil
}

if k8serrors.IsNotFound(err) {
klog.Infof("Could not find ConfigMap %q, will not include in build", configmapName)
return nil, nil
}

return nil, fmt.Errorf("could not retrieve optional ConfigMap: %s: %w", configmapName, err)
}

// Prepares all of the objects needed to perform an image build.
func (ctrl *Controller) prepareForBuild(inputs *buildInputs) (ImageBuildRequest, error) {
ibr := newImageBuildRequestFromBuildInputs(inputs)
Expand Down
Loading

0 comments on commit 5614c7e

Please sign in to comment.