Skip to content

Commit

Permalink
fix rebase
Browse files Browse the repository at this point in the history
  • Loading branch information
skonto committed Apr 11, 2024
1 parent 7835476 commit 3b7524c
Show file tree
Hide file tree
Showing 7 changed files with 27 additions and 47 deletions.
5 changes: 0 additions & 5 deletions pkg/apis/autoscaling/v1alpha1/pa_lifecycle.go
Original file line number Diff line number Diff line change
Expand Up @@ -225,11 +225,6 @@ func (pas *PodAutoscalerStatus) MarkSKSNotReady(mes string) {
podCondSet.Manage(pas).MarkUnknown(PodAutoscalerConditionSKSReady, "NotReady", mes)
}

// MarkNotReady marks the PA condition that denotes it is not yet ready.
func (pas *PodAutoscalerStatus) MarkNotReady(reason, mes string) {
podCondSet.Manage(pas).MarkUnknown(PodAutoscalerConditionReady, reason, mes)
}

// GetCondition gets the condition `t`.
func (pas *PodAutoscalerStatus) GetCondition(t apis.ConditionType) *apis.Condition {
return podCondSet.Manage(pas).GetCondition(t)
Expand Down
15 changes: 6 additions & 9 deletions pkg/apis/serving/v1/revision_lifecycle.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package v1

import (
"fmt"
"log"

appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -168,24 +169,20 @@ func (rs *RevisionStatus) PropagateDeploymentStatus(original *appsv1.DeploymentS
}
}

func (rs *RevisionStatus) PropagateDeploymentAvailabilityStatusIfFalse(original *appsv1.DeploymentStatus) {
func (rs *RevisionStatus) PropagateDeploymentAvailabilityWithContainerStatus(original *appsv1.DeploymentStatus, reason, message string) bool {
m := revisionCondSet.Manage(rs)
avCond := m.GetCondition(RevisionConditionResourcesAvailable)

// Skip if set for other reasons
if avCond.Status == corev1.ConditionFalse {
return
}

for _, cond := range original.Conditions {
switch cond.Type {
case appsv1.DeploymentAvailable:
switch cond.Status {
case corev1.ConditionFalse:
m.MarkFalse(RevisionConditionResourcesAvailable, cond.Reason, cond.Message)
log.Printf("marking revision resource unavailable because deployment is not available with: %s: %s\n", reason, message)
m.MarkFalse(RevisionConditionResourcesAvailable, reason, message)
return true
}
}
}
return false
}

// PropagateAutoscalerStatus propagates autoscaler's status to the revision's status.
Expand Down
18 changes: 14 additions & 4 deletions pkg/reconciler/revision/reconcile_resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package revision
import (
"context"
"fmt"
"time"

"go.uber.org/zap"
"knative.dev/pkg/tracker"
Expand All @@ -32,6 +33,7 @@ import (

networkingApi "knative.dev/networking/pkg/apis/networking"
"knative.dev/networking/pkg/certificates"
"knative.dev/pkg/controller"
"knative.dev/pkg/kmeta"
"knative.dev/pkg/kmp"
"knative.dev/pkg/logging"
Expand All @@ -43,6 +45,8 @@ import (
resourcenames "knative.dev/serving/pkg/reconciler/revision/resources/names"
)

const requeuePeriodForProgressionChecking = 1 * time.Second

func (c *Reconciler) reconcileDeployment(ctx context.Context, rev *v1.Revision) error {
ns := rev.Namespace
deploymentName := resourcenames.Deployment(rev)
Expand Down Expand Up @@ -72,7 +76,10 @@ func (c *Reconciler) reconcileDeployment(ctx context.Context, rev *v1.Revision)
return fmt.Errorf("failed to update deployment %q: %w", deploymentName, err)
}

rev.Status.PropagateDeploymentStatus(&deployment.Status)
// When we are scaling down we want to keep the error that we might have seen before
if *deployment.Spec.Replicas > 0 {
rev.Status.PropagateDeploymentStatus(&deployment.Status)
}

// If a container keeps crashing (no active pods in the deployment although we want some)
if *deployment.Spec.Replicas > 0 && deployment.Status.AvailableReplicas == 0 {
Expand Down Expand Up @@ -109,10 +116,13 @@ func (c *Reconciler) reconcileDeployment(ctx context.Context, rev *v1.Revision)
}
} else if w := status.State.Waiting; w != nil {
if hasDeploymentTimedOut(deployment) {
logger.Infof("marking resources unavailable with: %s: %s", w.Reason, w.Message)
logger.Infof("marking revision resources unavailable with: %s: %s", w.Reason, w.Message)
rev.Status.MarkResourcesAvailableFalse(w.Reason, w.Message)
} else {
rev.Status.PropagateDeploymentAvailabilityStatusIfFalse(&deployment.Status)
break
}
if shouldRequeue := rev.Status.PropagateDeploymentAvailabilityWithContainerStatus(&deployment.Status, w.Reason, w.Message); shouldRequeue {
// If we don't keep re-trying here at some point revision gets stuck without updates, thus not getting the latest.
return controller.NewRequeueAfter(requeuePeriodForProgressionChecking)
}
}
}
Expand Down
11 changes: 4 additions & 7 deletions pkg/reconciler/revision/table_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -566,7 +566,7 @@ func TestReconcile(t *testing.T) {
MarkContainerHealthy(),
WithRevisionObservedGeneration(1),
),
pa("foo", "pull-backoff", WithBufferedTrafficAllConditionsSet), // pa can't be ready since deployment times out.
pa("foo", "pull-backoff", WithReachabilityUnreachable), // pa can't be ready since deployment times out.
pod(t, "foo", "pull-backoff", WithWaitingContainer("pull-backoff", "ImagePullBackoff", "can't pull it")),
unAvailableDeploy(deploy(t, "foo", "pull-backoff")),
image("foo", "pull-backoff"),
Expand All @@ -575,13 +575,10 @@ func TestReconcile(t *testing.T) {
Object: Revision("foo", "pull-backoff",
WithLogURL,
MarkContainerHealthy(),
MarkActiveUknown("Queued", "Requests to the target are being buffered as resources are provisioned."),
MarkResourcesUnavailable("MinimumReplicasUnavailable", "Deployment does not have minimum availability."), withDefaultContainerStatuses(), WithRevisionObservedGeneration(1)),
}},
WantUpdates: []clientgotesting.UpdateActionImpl{{
Object: pa("foo", "pull-backoff", WithBufferedTrafficAllConditionsSet, WithReachabilityUnreachable),
MarkResourcesUnavailable("ImagePullBackoff", "can't pull it"), withDefaultContainerStatuses(), WithRevisionObservedGeneration(1)),
}},
Key: "foo/pull-backoff",
WantErr: true, // requeue error
Key: "foo/pull-backoff",
}, {
Name: "surface pod errors",
// Test the propagation of the termination state of a Pod into the revision.
Expand Down
12 changes: 0 additions & 12 deletions pkg/testing/functional.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,18 +124,6 @@ func WithBufferedTraffic(pa *autoscalingv1alpha1.PodAutoscaler) {
"Requests to the target are being buffered as resources are provisioned.")
}

// WithBufferedTrafficAllConditionsSet updates the PA to reflect that it has received
// and buffered traffic, it is not ready and waits for sks too.
func WithBufferedTrafficAllConditionsSet(pa *autoscalingv1alpha1.PodAutoscaler) {
pa.Status.MarkActivating("Queued",
"Requests to the target are being buffered as resources are provisioned.")
pa.Status.MarkSKSNotReady("K8s Service is not ready")
pa.Status.MarkScaleTargetInitialized()
pa.Status.MarkNotReady("Queued",
"Requests to the target are being buffered as resources are provisioned.")

}

// WithNoTraffic updates the PA to reflect the fact that it is not
// receiving traffic.
func WithNoTraffic(reason, message string) PodAutoscalerOption {
Expand Down
6 changes: 0 additions & 6 deletions pkg/testing/v1/revision.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,12 +137,6 @@ func MarkInactive(reason, message string) RevisionOption {
}
}

func MarkActiveUknown(reason, message string) RevisionOption {
return func(r *v1.Revision) {
r.Status.MarkActiveUnknown(reason, message)
}
}

// MarkActivating calls .Status.MarkActivating on the Revision.
func MarkActivating(reason, message string) RevisionOption {
return func(r *v1.Revision) {
Expand Down
7 changes: 3 additions & 4 deletions test/conformance/api/v1/errorcondition_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,9 +160,9 @@ func TestContainerExitingMsg(t *testing.T) {
}

t.Log("When the containers keep crashing, the Revision should have error status.")
err = v1test.CheckRevisionState(clients.ServingClient, names.Revision, func(r *v1.Revision) (bool, error) {
if err := v1test.WaitForRevisionState(clients.ServingClient, names.Revision, func(r *v1.Revision) (bool, error) {
cond := r.Status.GetCondition(v1.RevisionConditionReady)
t.Logf("Revsion %s Ready status = %v", names.Revision, cond)
t.Logf("Revision %s Ready status = %v", names.Revision, cond)
if cond != nil {
if cond.Reason != "" && cond.Message != "" {
return true, nil
Expand All @@ -171,8 +171,7 @@ func TestContainerExitingMsg(t *testing.T) {
names.Revision, cond.Reason, cond.Message)
}
return false, nil
})
if err != nil {
}, "RevisionContainersCrashing"); err != nil {
t.Fatal("Failed to validate revision state:", err)
}
}
Expand Down

0 comments on commit 3b7524c

Please sign in to comment.