Skip to content

Commit

Permalink
chore: Adding cherry picked commits to release-v0.33.x (#979)
Browse files Browse the repository at this point in the history
Co-authored-by: Todd Neal <tnealt@amazon.com>
  • Loading branch information
jigisha620 and tzneal authored Jan 30, 2024
1 parent 16a5b02 commit fd28943
Show file tree
Hide file tree
Showing 4 changed files with 43 additions and 13 deletions.
2 changes: 1 addition & 1 deletion hack/toolchain.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ tools() {
go install github.com/mikefarah/yq/v4@latest
go install github.com/norwoodj/helm-docs/cmd/helm-docs@latest
go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
go install sigs.k8s.io/controller-tools/cmd/controller-gen@latest
go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.13.0
go install github.com/sigstore/cosign/cmd/cosign@latest
go install -tags extended github.com/gohugoio/hugo@v0.110.0
go install golang.org/x/vuln/cmd/govulncheck@latest
Expand Down
3 changes: 2 additions & 1 deletion pkg/controllers/provisioning/scheduling/topology.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"sigs.k8s.io/karpenter/pkg/controllers/state"
"sigs.k8s.io/karpenter/pkg/scheduling"
"sigs.k8s.io/karpenter/pkg/utils/functional"
"sigs.k8s.io/karpenter/pkg/utils/pretty"

"k8s.io/apimachinery/pkg/api/errors"

Expand Down Expand Up @@ -162,7 +163,7 @@ func (t *Topology) AddRequirements(podRequirements, nodeRequirements scheduling.
}
domains := topology.Get(p, podDomains, nodeDomains)
if domains.Len() == 0 {
return nil, fmt.Errorf("unsatisfiable topology constraint for %s, key=%s (counts = %v, podDomains = %v, nodeDomains = %v)", topology.Type, topology.Key, topology.domains, podDomains, nodeDomains)
return nil, fmt.Errorf("unsatisfiable topology constraint for %s, key=%s (counts = %s, podDomains = %v, nodeDomains = %v", topology.Type, topology.Key, pretty.Map(topology.domains, 5), podDomains, nodeDomains)
}
requirements.Add(domains)
}
Expand Down
34 changes: 23 additions & 11 deletions pkg/controllers/provisioning/scheduling/topologygroup.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,9 @@ type TopologyGroup struct {
selector *metav1.LabelSelector
nodeFilter TopologyNodeFilter
// Index
owners map[types.UID]struct{} // Pods that have this topology as a scheduling rule
domains map[string]int32 // TODO(ellistarn) explore replacing with a minheap
owners map[types.UID]struct{} // Pods that have this topology as a scheduling rule
domains map[string]int32 // TODO(ellistarn) explore replacing with a minheap
emptyDomains sets.Set[string] // domains for which we know that no pod exists
}

func NewTopologyGroup(topologyType TopologyType, topologyKey string, pod *v1.Pod, namespaces sets.Set[string], labelSelector *metav1.LabelSelector, maxSkew int32, minDomains *int32, domains sets.Set[string]) *TopologyGroup {
Expand All @@ -76,15 +77,16 @@ func NewTopologyGroup(topologyType TopologyType, topologyKey string, pod *v1.Pod
nodeSelector = MakeTopologyNodeFilter(pod)
}
return &TopologyGroup{
Type: topologyType,
Key: topologyKey,
namespaces: namespaces,
selector: labelSelector,
nodeFilter: nodeSelector,
maxSkew: maxSkew,
domains: domainCounts,
owners: map[types.UID]struct{}{},
minDomains: minDomains,
Type: topologyType,
Key: topologyKey,
namespaces: namespaces,
selector: labelSelector,
nodeFilter: nodeSelector,
maxSkew: maxSkew,
domains: domainCounts,
emptyDomains: domains.Clone(),
owners: map[types.UID]struct{}{},
minDomains: minDomains,
}
}

Expand All @@ -104,6 +106,7 @@ func (t *TopologyGroup) Get(pod *v1.Pod, podDomains, nodeDomains *scheduling.Req
func (t *TopologyGroup) Record(domains ...string) {
for _, domain := range domains {
t.domains[domain]++
t.emptyDomains.Delete(domain)
}
}

Expand All @@ -118,6 +121,7 @@ func (t *TopologyGroup) Register(domains ...string) {
for _, domain := range domains {
if _, ok := t.domains[domain]; !ok {
t.domains[domain] = 0
t.emptyDomains.Insert(domain)
}
}
}
Expand Down Expand Up @@ -245,6 +249,14 @@ func (t *TopologyGroup) nextDomainAffinity(pod *v1.Pod, podDomains *scheduling.R

func (t *TopologyGroup) nextDomainAntiAffinity(domains *scheduling.Requirement) *scheduling.Requirement {
options := scheduling.NewRequirement(domains.Key, v1.NodeSelectorOpDoesNotExist)
// pods with anti-affinity must schedule to a domain where there are currently none of those pods (an empty
// domain). If there are none of those domains, then the pod can't schedule and we don't need to walk this
// list of domains. The use case where this optimization is really great is when we are launching nodes for
// a deployment of pods with self anti-affinity. The domains map here continues to grow, and we continue to
// fully scan it each iteration.
if len(t.emptyDomains) == 0 {
return options
}
for domain := range t.domains {
if domains.Has(domain) && t.domains[domain] == 0 {
options.Insert(domain)
Expand Down
17 changes: 17 additions & 0 deletions pkg/utils/pretty/pretty.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ limitations under the License.
package pretty

import (
"bytes"
"encoding/json"
"fmt"
"strings"
Expand Down Expand Up @@ -43,3 +44,19 @@ func Slice[T any](s []T, maxItems int) string {
}
return sb.String()
}

// Map truncates a map after a certain number of max items to ensure that the
// description in a log doesn't get too long
func Map[K comparable, V any](values map[K]V, maxItems int) string {
var buf bytes.Buffer
for k, v := range values {
fmt.Fprintf(&buf, "%v: %v ", k, v)
if buf.Len() > maxItems {
break
}
}
if maxItems < buf.Len() {
fmt.Fprintf(&buf, "and %d other(s)", buf.Len()-maxItems)
}
return buf.String()
}

0 comments on commit fd28943

Please sign in to comment.