Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

perf: Cache taints for existing nodes #1827

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions pkg/controllers/provisioning/scheduling/existingnode.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,14 +31,15 @@ import (
type ExistingNode struct {
*state.StateNode
cachedAvailable v1.ResourceList // Cache so we don't have to re-subtract resources on the StateNode every time
cachedTaints []v1.Taint // Cache so we don't hae to re-construct the taints each time we attempt to schedule a pod

Pods []*v1.Pod
topology *Topology
requests v1.ResourceList
requirements scheduling.Requirements
}

func NewExistingNode(n *state.StateNode, topology *Topology, daemonResources v1.ResourceList) *ExistingNode {
func NewExistingNode(n *state.StateNode, topology *Topology, taints []v1.Taint, daemonResources v1.ResourceList) *ExistingNode {
// The state node passed in here must be a deep copy from cluster state as we modify it
// the remaining daemonResources to schedule are the total daemonResources minus what has already scheduled
remainingDaemonResources := resources.Subtract(daemonResources, n.DaemonSetRequests())
Expand All @@ -54,6 +55,7 @@ func NewExistingNode(n *state.StateNode, topology *Topology, daemonResources v1.
node := &ExistingNode{
StateNode: n,
cachedAvailable: n.Available(),
cachedTaints: taints,
topology: topology,
requests: remainingDaemonResources,
requirements: scheduling.NewLabelRequirements(n.Labels()),
Expand All @@ -65,7 +67,7 @@ func NewExistingNode(n *state.StateNode, topology *Topology, daemonResources v1.

func (n *ExistingNode) Add(ctx context.Context, kubeClient client.Client, pod *v1.Pod, podRequests v1.ResourceList) error {
// Check Taints
if err := scheduling.Taints(n.Taints()).Tolerates(pod); err != nil {
if err := scheduling.Taints(n.cachedTaints).Tolerates(pod); err != nil {
return err
}
// determine the volumes that will be mounted if the pod schedules
Expand Down
5 changes: 3 additions & 2 deletions pkg/controllers/provisioning/scheduling/scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -318,17 +318,18 @@ func (s *Scheduler) calculateExistingNodeClaims(stateNodes []*state.StateNode, d
// create our existing nodes
for _, node := range stateNodes {
// Calculate any daemonsets that should schedule to the inflight node
taints := node.Taints()
var daemons []*corev1.Pod
for _, p := range daemonSetPods {
if err := scheduling.Taints(node.Taints()).Tolerates(p); err != nil {
if err := scheduling.Taints(taints).Tolerates(p); err != nil {
continue
}
if err := scheduling.NewLabelRequirements(node.Labels()).Compatible(scheduling.NewPodRequirements(p)); err != nil {
continue
}
daemons = append(daemons, p)
}
s.existingNodes = append(s.existingNodes, NewExistingNode(node, s.topology, resources.RequestsForPods(daemons...)))
s.existingNodes = append(s.existingNodes, NewExistingNode(node, s.topology, taints, resources.RequestsForPods(daemons...)))

// We don't use the status field and instead recompute the remaining resources to ensure we have a consistent view
// of the cluster during scheduling. Depending on how node creation falls out, this will also work for cases where
Expand Down
Loading