repo_name
stringlengths 1
52
| repo_creator
stringclasses 6
values | programming_language
stringclasses 4
values | code
stringlengths 0
9.68M
| num_lines
int64 1
234k
|
---|---|---|---|---|
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deprovisioning
import (
"context"
"errors"
"fmt"
"sort"
"time"
"k8s.io/utils/clock"
"knative.dev/pkg/logging"
"knative.dev/pkg/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/samber/lo"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/controllers/provisioning"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/events"
"github.com/aws/karpenter-core/pkg/metrics"
"github.com/aws/karpenter-core/pkg/utils/node"
)
// Expiration is a subreconciler that deletes empty nodes.
// Expiration will respect TTLSecondsAfterEmpty
type Expiration struct {
clock clock.Clock
kubeClient client.Client
cluster *state.Cluster
provisioner *provisioning.Provisioner
recorder events.Recorder
}
func NewExpiration(clk clock.Clock, kubeClient client.Client, cluster *state.Cluster, provisioner *provisioning.Provisioner, recorder events.Recorder) *Expiration {
return &Expiration{
clock: clk,
kubeClient: kubeClient,
cluster: cluster,
provisioner: provisioner,
recorder: recorder,
}
}
// ShouldDeprovision is a predicate used to filter deprovisionable nodes
func (e *Expiration) ShouldDeprovision(ctx context.Context, c *Candidate) bool {
// Filter out nodes without the TTL defined or expired.
if c.provisioner == nil || c.provisioner.Spec.TTLSecondsUntilExpired == nil {
return false
}
return c.Node.Annotations[v1alpha5.VoluntaryDisruptionAnnotationKey] == v1alpha5.VoluntaryDisruptionExpiredAnnotationValue
}
// SortCandidates orders expired nodes by when they've expired
func (e *Expiration) filterAndSortCandidates(ctx context.Context, nodes []*Candidate) ([]*Candidate, error) {
candidates, err := filterCandidates(ctx, e.kubeClient, e.recorder, nodes)
if err != nil {
return nil, fmt.Errorf("filtering candidates, %w", err)
}
sort.Slice(candidates, func(i int, j int) bool {
return node.GetExpirationTime(candidates[i].Node, candidates[i].provisioner).Before(node.GetExpirationTime(candidates[j].Node, candidates[j].provisioner))
})
return candidates, nil
}
// ComputeCommand generates a deprovisioning command given deprovisionable nodes
func (e *Expiration) ComputeCommand(ctx context.Context, nodes ...*Candidate) (Command, error) {
candidates, err := e.filterAndSortCandidates(ctx, nodes)
if err != nil {
return Command{}, fmt.Errorf("filtering candidates, %w", err)
}
deprovisioningEligibleMachinesGauge.WithLabelValues(e.String()).Set(float64(len(candidates)))
// Deprovision all empty expired nodes, as they require no scheduling simulations.
if empty := lo.Filter(candidates, func(c *Candidate, _ int) bool {
return len(c.pods) == 0
}); len(empty) > 0 {
return Command{
candidates: empty,
}, nil
}
for _, candidate := range candidates {
// Check if we need to create any nodes.
results, err := simulateScheduling(ctx, e.kubeClient, e.cluster, e.provisioner, candidate)
if err != nil {
// if a candidate node is now deleting, just retry
if errors.Is(err, errCandidateDeleting) {
continue
}
return Command{}, err
}
// Log when all pods can't schedule, as the command will get executed immediately.
if !results.AllPodsScheduled() {
logging.FromContext(ctx).With("node", candidate.Name).Debugf("continuing to expire node after scheduling simulation failed to schedule all pods, %s", results.PodSchedulingErrors())
}
logging.FromContext(ctx).With("ttl", time.Duration(ptr.Int64Value(candidates[0].provisioner.Spec.TTLSecondsUntilExpired))*time.Second).
With("delay", time.Since(node.GetExpirationTime(candidates[0].Node, candidates[0].provisioner))).Infof("triggering termination for expired node after TTL")
return Command{
candidates: []*Candidate{candidate},
replacements: results.NewMachines,
}, nil
}
return Command{}, nil
}
// String is the string representation of the deprovisioner
func (e *Expiration) String() string {
return metrics.ExpirationReason
}
| 128 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deprovisioning_test
import (
"sync"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"knative.dev/pkg/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/cloudprovider/fake"
"github.com/aws/karpenter-core/pkg/test"
. "github.com/aws/karpenter-core/pkg/test/expectations"
)
var _ = Describe("Expiration", func() {
var prov *v1alpha5.Provisioner
var machine *v1alpha5.Machine
var node *v1.Node
BeforeEach(func() {
prov = test.Provisioner(test.ProvisionerOptions{
TTLSecondsUntilExpired: ptr.Int64(30),
})
machine, node = test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
v1alpha5.VoluntaryDisruptionAnnotationKey: v1alpha5.VoluntaryDisruptionExpiredAnnotationValue,
},
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: mostExpensiveInstance.Name,
v1alpha5.LabelCapacityType: mostExpensiveOffering.CapacityType,
v1.LabelTopologyZone: mostExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("32"),
v1.ResourcePods: resource.MustParse("100"),
},
},
})
})
It("should ignore nodes with the disruption annotation but different value", func() {
node.Annotations = lo.Assign(node.Annotations, map[string]string{
v1alpha5.VoluntaryDisruptionAnnotationKey: "wrong-value",
})
ExpectApplied(ctx, env.Client, machine, node, prov)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node}, []*v1alpha5.Machine{machine})
ExpectReconcileSucceeded(ctx, deprovisioningController, types.NamespacedName{})
// Expect to not create or delete more machines
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(1))
ExpectExists(ctx, env.Client, node)
})
It("should ignore nodes without the disruption annotation", func() {
delete(node.Annotations, v1alpha5.VoluntaryDisruptionAnnotationKey)
ExpectApplied(ctx, env.Client, machine, node, prov)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node}, []*v1alpha5.Machine{machine})
ExpectReconcileSucceeded(ctx, deprovisioningController, types.NamespacedName{})
// Expect to not create or delete more machines
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(1))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(1))
ExpectExists(ctx, env.Client, machine)
})
It("can delete expired nodes", func() {
ExpectApplied(ctx, env.Client, machine, node, prov)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node}, []*v1alpha5.Machine{machine})
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectReconcileSucceeded(ctx, deprovisioningController, types.NamespacedName{})
wg.Wait()
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, machine)
// Expect that the expired machine is gone
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(0))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(0))
ExpectNotFound(ctx, env.Client, machine, node)
})
It("should deprovision all empty expired nodes in parallel", func() {
machines, nodes := test.MachinesAndNodes(100, v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
v1alpha5.VoluntaryDisruptionAnnotationKey: v1alpha5.VoluntaryDisruptionExpiredAnnotationValue,
},
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: mostExpensiveInstance.Name,
v1alpha5.LabelCapacityType: mostExpensiveOffering.CapacityType,
v1.LabelTopologyZone: mostExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("32"),
v1.ResourcePods: resource.MustParse("100"),
},
},
})
for _, m := range machines {
ExpectApplied(ctx, env.Client, m)
}
for _, n := range nodes {
ExpectApplied(ctx, env.Client, n)
}
ExpectApplied(ctx, env.Client, prov)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, nodes, machines)
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectReconcileSucceeded(ctx, deprovisioningController, types.NamespacedName{})
wg.Wait()
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, machines...)
// Expect that the expired machines are gone
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(0))
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(0))
})
It("should expire one non-empty node at a time, starting with most expired", func() {
expireProv := test.Provisioner(test.ProvisionerOptions{
TTLSecondsUntilExpired: ptr.Int64(100),
})
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
pods := test.Pods(2, test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
},
},
})
machineToExpire, nodeToExpire := test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
v1alpha5.VoluntaryDisruptionAnnotationKey: v1alpha5.VoluntaryDisruptionExpiredAnnotationValue,
},
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: expireProv.Name,
v1.LabelInstanceTypeStable: mostExpensiveInstance.Name,
v1alpha5.LabelCapacityType: mostExpensiveOffering.CapacityType,
v1.LabelTopologyZone: mostExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{v1.ResourceCPU: resource.MustParse("32")},
},
})
machineNotExpire, nodeNotExpire := test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: mostExpensiveInstance.Name,
v1alpha5.LabelCapacityType: mostExpensiveOffering.CapacityType,
v1.LabelTopologyZone: mostExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{v1.ResourceCPU: resource.MustParse("32")},
},
})
ExpectApplied(ctx, env.Client, rs, pods[0], pods[1], machineToExpire, nodeToExpire, machineNotExpire, nodeNotExpire, expireProv, prov)
// bind pods to node so that they're not empty and don't deprovision in parallel.
ExpectManualBinding(ctx, env.Client, pods[0], nodeToExpire)
ExpectManualBinding(ctx, env.Client, pods[1], nodeNotExpire)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{nodeToExpire, nodeNotExpire}, []*v1alpha5.Machine{machineToExpire, machineNotExpire})
// deprovisioning won't delete the old node until the new node is ready
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectMakeNewMachinesReady(ctx, env.Client, &wg, cluster, cloudProvider, 1)
ExpectReconcileSucceeded(ctx, deprovisioningController, types.NamespacedName{})
wg.Wait()
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, machineToExpire)
// Expect that one of the expired machines is gone
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(2))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(2))
ExpectNotFound(ctx, env.Client, machineToExpire, nodeToExpire)
})
It("can replace node for expiration", func() {
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
pod := test.Pod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
},
},
})
prov.Spec.TTLSecondsUntilExpired = ptr.Int64(30)
ExpectApplied(ctx, env.Client, rs, pod, machine, node, prov)
// bind pods to node
ExpectManualBinding(ctx, env.Client, pod, node)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node}, []*v1alpha5.Machine{machine})
// deprovisioning won't delete the old node until the new node is ready
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectMakeNewMachinesReady(ctx, env.Client, &wg, cluster, cloudProvider, 1)
ExpectReconcileSucceeded(ctx, deprovisioningController, types.NamespacedName{})
wg.Wait()
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, machine)
// Expect that the new machine was created, and it's different than the original
ExpectNotFound(ctx, env.Client, machine, node)
machines := ExpectMachines(ctx, env.Client)
nodes := ExpectNodes(ctx, env.Client)
Expect(machines).To(HaveLen(1))
Expect(nodes).To(HaveLen(1))
Expect(machines[0].Name).ToNot(Equal(machine.Name))
Expect(nodes[0].Name).ToNot(Equal(node.Name))
})
It("should uncordon nodes when expiration replacement fails", func() {
cloudProvider.AllowedCreateCalls = 0 // fail the replacement and expect it to uncordon
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
Expect(env.Client.Get(ctx, client.ObjectKeyFromObject(rs), rs)).To(Succeed())
pod := test.Pod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
},
},
})
ExpectApplied(ctx, env.Client, rs, machine, node, prov, pod)
// bind pods to node
ExpectManualBinding(ctx, env.Client, pod, node)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node}, []*v1alpha5.Machine{machine})
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectNewMachinesDeleted(ctx, env.Client, &wg, 1)
_, err := deprovisioningController.Reconcile(ctx, reconcile.Request{})
Expect(err).To(HaveOccurred())
wg.Wait()
// We should have tried to create a new machine but failed to do so; therefore, we uncordoned the existing node
node = ExpectExists(ctx, env.Client, node)
Expect(node.Spec.Unschedulable).To(BeFalse())
})
It("can replace node for expiration with multiple nodes", func() {
currentInstance := fake.NewInstanceType(fake.InstanceTypeOptions{
Name: "current-on-demand",
Offerings: []cloudprovider.Offering{
{
CapacityType: v1alpha5.CapacityTypeOnDemand,
Zone: "test-zone-1a",
Price: 0.5,
Available: false,
},
},
})
replacementInstance := fake.NewInstanceType(fake.InstanceTypeOptions{
Name: "replacement-on-demand",
Offerings: []cloudprovider.Offering{
{
CapacityType: v1alpha5.CapacityTypeOnDemand,
Zone: "test-zone-1a",
Price: 0.3,
Available: true,
},
},
Resources: map[v1.ResourceName]resource.Quantity{v1.ResourceCPU: resource.MustParse("3")},
})
cloudProvider.InstanceTypes = []*cloudprovider.InstanceType{
currentInstance,
replacementInstance,
}
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
Expect(env.Client.Get(ctx, client.ObjectKeyFromObject(rs), rs)).To(Succeed())
pods := test.Pods(3, test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
}},
// Make each pod request about a third of the allocatable on the node
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{v1.ResourceCPU: resource.MustParse("2")},
},
})
machine, node := test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
v1alpha5.VoluntaryDisruptionAnnotationKey: v1alpha5.VoluntaryDisruptionExpiredAnnotationValue,
},
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: currentInstance.Name,
v1alpha5.LabelCapacityType: currentInstance.Offerings[0].CapacityType,
v1.LabelTopologyZone: currentInstance.Offerings[0].Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{v1.ResourceCPU: resource.MustParse("8")},
},
})
ExpectApplied(ctx, env.Client, rs, machine, node, prov, pods[0], pods[1], pods[2])
// bind pods to node
ExpectManualBinding(ctx, env.Client, pods[0], node)
ExpectManualBinding(ctx, env.Client, pods[1], node)
ExpectManualBinding(ctx, env.Client, pods[2], node)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node}, []*v1alpha5.Machine{machine})
// deprovisioning won't delete the old machine until the new machine is ready
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectMakeNewMachinesReady(ctx, env.Client, &wg, cluster, cloudProvider, 3)
ExpectReconcileSucceeded(ctx, deprovisioningController, types.NamespacedName{})
wg.Wait()
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, machine)
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(3))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(3))
ExpectNotFound(ctx, env.Client, machine, node)
})
})
| 433 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deprovisioning
import (
"context"
"fmt"
"math"
"strconv"
"github.com/samber/lo"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider"
deprovisioningevents "github.com/aws/karpenter-core/pkg/controllers/deprovisioning/events"
"github.com/aws/karpenter-core/pkg/controllers/provisioning"
pscheduling "github.com/aws/karpenter-core/pkg/controllers/provisioning/scheduling"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/events"
"github.com/aws/karpenter-core/pkg/scheduling"
"github.com/aws/karpenter-core/pkg/utils/pod"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/clock"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func filterCandidates(ctx context.Context, kubeClient client.Client, recorder events.Recorder, nodes []*Candidate) ([]*Candidate, error) {
pdbs, err := NewPDBLimits(ctx, kubeClient)
if err != nil {
return nil, fmt.Errorf("tracking PodDisruptionBudgets, %w", err)
}
// filter out nodes that can't be terminated
nodes = lo.Filter(nodes, func(cn *Candidate, _ int) bool {
if !cn.Node.DeletionTimestamp.IsZero() {
recorder.Publish(deprovisioningevents.Blocked(cn.Node, cn.Machine, "in the process of deletion")...)
return false
}
if pdb, ok := pdbs.CanEvictPods(cn.pods); !ok {
recorder.Publish(deprovisioningevents.Blocked(cn.Node, cn.Machine, fmt.Sprintf("pdb %s prevents pod evictions", pdb))...)
return false
}
if p, ok := hasDoNotEvictPod(cn); ok {
recorder.Publish(deprovisioningevents.Blocked(cn.Node, cn.Machine, fmt.Sprintf("pod %s/%s has do not evict annotation", p.Namespace, p.Name))...)
return false
}
return true
})
return nodes, nil
}
//nolint:gocyclo
func simulateScheduling(ctx context.Context, kubeClient client.Client, cluster *state.Cluster, provisioner *provisioning.Provisioner,
candidates ...*Candidate) (*pscheduling.Results, error) {
candidateNames := sets.NewString(lo.Map(candidates, func(t *Candidate, i int) string { return t.Name() })...)
nodes := cluster.Nodes()
deletingNodes := nodes.Deleting()
stateNodes := lo.Filter(nodes.Active(), func(n *state.StateNode, _ int) bool {
return !candidateNames.Has(n.Name())
})
// We do one final check to ensure that the node that we are attempting to consolidate isn't
// already handled for deletion by some other controller. This could happen if the node was markedForDeletion
// between returning the candidates and getting the stateNodes above
if _, ok := lo.Find(deletingNodes, func(n *state.StateNode) bool {
return candidateNames.Has(n.Name())
}); ok {
return nil, errCandidateDeleting
}
// We get the pods that are on nodes that are deleting
deletingNodePods, err := deletingNodes.Pods(ctx, kubeClient)
if err != nil {
return nil, fmt.Errorf("failed to get pods from deleting nodes, %w", err)
}
// start by getting all pending pods
pods, err := provisioner.GetPendingPods(ctx)
if err != nil {
return nil, fmt.Errorf("determining pending pods, %w", err)
}
for _, n := range candidates {
pods = append(pods, n.pods...)
}
pods = append(pods, deletingNodePods...)
scheduler, err := provisioner.NewScheduler(ctx, pods, stateNodes, pscheduling.SchedulerOptions{
SimulationMode: true,
})
if err != nil {
return nil, fmt.Errorf("creating scheduler, %w", err)
}
results, err := scheduler.Solve(ctx, pods)
if err != nil {
return nil, fmt.Errorf("simulating scheduling, %w", err)
}
// check if the scheduling relied on an existing node that isn't ready yet, if so we fail
// to schedule since we want to assume that we can delete a node and its pods will immediately
// move to an existing node which won't occur if that node isn't ready.
for _, n := range results.ExistingNodes {
if !n.Initialized() {
for _, p := range n.Pods {
results.PodErrors[p] = fmt.Errorf("would schedule against a non-initialized node %s", n.Name())
}
}
}
return results, nil
}
// instanceTypesAreSubset returns true if the lhs slice of instance types are a subset of the rhs.
func instanceTypesAreSubset(lhs []*cloudprovider.InstanceType, rhs []*cloudprovider.InstanceType) bool {
rhsNames := sets.NewString(lo.Map(rhs, func(t *cloudprovider.InstanceType, i int) string { return t.Name })...)
lhsNames := sets.NewString(lo.Map(lhs, func(t *cloudprovider.InstanceType, i int) string { return t.Name })...)
return len(rhsNames.Intersection(lhsNames)) == len(lhsNames)
}
// GetPodEvictionCost returns the disruption cost computed for evicting the given pod.
func GetPodEvictionCost(ctx context.Context, p *v1.Pod) float64 {
cost := 1.0
podDeletionCostStr, ok := p.Annotations[v1.PodDeletionCost]
if ok {
podDeletionCost, err := strconv.ParseFloat(podDeletionCostStr, 64)
if err != nil {
logging.FromContext(ctx).Errorf("parsing %s=%s from pod %s, %s",
v1.PodDeletionCost, podDeletionCostStr, client.ObjectKeyFromObject(p), err)
} else {
// the pod deletion disruptionCost is in [-2147483647, 2147483647]
// the min pod disruptionCost makes one pod ~ -15 pods, and the max pod disruptionCost to ~ 17 pods.
cost += podDeletionCost / math.Pow(2, 27.0)
}
}
// the scheduling priority is in [-2147483648, 1000000000]
if p.Spec.Priority != nil {
cost += float64(*p.Spec.Priority) / math.Pow(2, 25)
}
// overall we clamp the pod cost to the range [-10.0, 10.0] with the default being 1.0
return clamp(-10.0, cost, 10.0)
}
func filterByPrice(options []*cloudprovider.InstanceType, reqs scheduling.Requirements, price float64) []*cloudprovider.InstanceType {
var result []*cloudprovider.InstanceType
for _, it := range options {
launchPrice := worstLaunchPrice(it.Offerings.Available(), reqs)
if launchPrice < price {
result = append(result, it)
}
}
return result
}
func disruptionCost(ctx context.Context, pods []*v1.Pod) float64 {
cost := 0.0
for _, p := range pods {
cost += GetPodEvictionCost(ctx, p)
}
return cost
}
// GetCandidates returns nodes that appear to be currently deprovisionable based off of their provisioner
func GetCandidates(ctx context.Context, cluster *state.Cluster, kubeClient client.Client, recorder events.Recorder, clk clock.Clock, cloudProvider cloudprovider.CloudProvider, shouldDeprovision CandidateFilter) ([]*Candidate, error) {
provisionerMap, provisionerToInstanceTypes, err := buildProvisionerMap(ctx, kubeClient, cloudProvider)
if err != nil {
return nil, err
}
candidates := lo.FilterMap(cluster.Nodes(), func(n *state.StateNode, _ int) (*Candidate, bool) {
cn, e := NewCandidate(ctx, kubeClient, recorder, clk, n, provisionerMap, provisionerToInstanceTypes)
return cn, e == nil
})
// Filter only the valid candidates that we should deprovision
return lo.Filter(candidates, func(c *Candidate, _ int) bool { return shouldDeprovision(ctx, c) }), nil
}
// buildProvisionerMap builds a provName -> provisioner map and a provName -> instanceName -> instance type map
func buildProvisionerMap(ctx context.Context, kubeClient client.Client, cloudProvider cloudprovider.CloudProvider) (map[string]*v1alpha5.Provisioner, map[string]map[string]*cloudprovider.InstanceType, error) {
provisioners := map[string]*v1alpha5.Provisioner{}
var provList v1alpha5.ProvisionerList
if err := kubeClient.List(ctx, &provList); err != nil {
return nil, nil, fmt.Errorf("listing provisioners, %w", err)
}
instanceTypesByProvisioner := map[string]map[string]*cloudprovider.InstanceType{}
for i := range provList.Items {
p := &provList.Items[i]
provisioners[p.Name] = p
provInstanceTypes, err := cloudProvider.GetInstanceTypes(ctx, p)
if err != nil {
return nil, nil, fmt.Errorf("listing instance types for %s, %w", p.Name, err)
}
instanceTypesByProvisioner[p.Name] = map[string]*cloudprovider.InstanceType{}
for _, it := range provInstanceTypes {
instanceTypesByProvisioner[p.Name][it.Name] = it
}
}
return provisioners, instanceTypesByProvisioner, nil
}
// mapCandidates maps the list of proposed candidates with the current state
func mapCandidates(proposed, current []*Candidate) []*Candidate {
proposedNames := sets.NewString(lo.Map(proposed, func(c *Candidate, i int) string { return c.Name() })...)
return lo.Filter(current, func(c *Candidate, _ int) bool {
return proposedNames.Has(c.Name())
})
}
// worstLaunchPrice gets the worst-case launch price from the offerings that are offered
// on an instance type. If the instance type has a spot offering available, then it uses the spot offering
// to get the launch price; else, it uses the on-demand launch price
func worstLaunchPrice(ofs []cloudprovider.Offering, reqs scheduling.Requirements) float64 {
// We prefer to launch spot offerings, so we will get the worst price based on the node requirements
if reqs.Get(v1alpha5.LabelCapacityType).Has(v1alpha5.CapacityTypeSpot) {
spotOfferings := lo.Filter(ofs, func(of cloudprovider.Offering, _ int) bool {
return of.CapacityType == v1alpha5.CapacityTypeSpot && reqs.Get(v1.LabelTopologyZone).Has(of.Zone)
})
if len(spotOfferings) > 0 {
return lo.MaxBy(spotOfferings, func(of1, of2 cloudprovider.Offering) bool {
return of1.Price > of2.Price
}).Price
}
}
if reqs.Get(v1alpha5.LabelCapacityType).Has(v1alpha5.CapacityTypeOnDemand) {
onDemandOfferings := lo.Filter(ofs, func(of cloudprovider.Offering, _ int) bool {
return of.CapacityType == v1alpha5.CapacityTypeOnDemand && reqs.Get(v1.LabelTopologyZone).Has(of.Zone)
})
if len(onDemandOfferings) > 0 {
return lo.MaxBy(onDemandOfferings, func(of1, of2 cloudprovider.Offering) bool {
return of1.Price > of2.Price
}).Price
}
}
return math.MaxFloat64
}
func clamp(min, val, max float64) float64 {
if val < min {
return min
}
if val > max {
return max
}
return val
}
func hasDoNotEvictPod(c *Candidate) (*v1.Pod, bool) {
return lo.Find(c.pods, func(p *v1.Pod) bool {
if pod.IsTerminating(p) || pod.IsTerminal(p) || pod.IsOwnedByNode(p) {
return false
}
return pod.HasDoNotEvict(p)
})
}
| 271 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deprovisioning
import (
"github.com/prometheus/client_golang/prometheus"
crmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
"github.com/aws/karpenter-core/pkg/metrics"
)
func init() {
crmetrics.Registry.MustRegister(deprovisioningDurationHistogram)
crmetrics.Registry.MustRegister(deprovisioningReplacementNodeInitializedHistogram)
crmetrics.Registry.MustRegister(deprovisioningActionsPerformedCounter)
crmetrics.Registry.MustRegister(deprovisioningEligibleMachinesGauge)
}
const (
deprovisioningSubsystem = "deprovisioning"
deprovisionerLabel = "deprovisioner"
actionLabel = "action"
)
var (
deprovisioningDurationHistogram = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: metrics.Namespace,
Subsystem: deprovisioningSubsystem,
Name: "evaluation_duration_seconds",
Help: "Duration of the deprovisioning evaluation process in seconds.",
Buckets: metrics.DurationBuckets(),
},
[]string{"method"})
deprovisioningReplacementNodeInitializedHistogram = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: metrics.Namespace,
Subsystem: deprovisioningSubsystem,
Name: "replacement_machine_initialized_seconds",
Help: "Amount of time required for a replacement machine to become initialized.",
Buckets: metrics.DurationBuckets(),
})
deprovisioningActionsPerformedCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: metrics.Namespace,
Subsystem: deprovisioningSubsystem,
Name: "actions_performed",
Help: "Number of deprovisioning actions performed. Labeled by deprovisioner.",
},
[]string{actionLabel, deprovisionerLabel},
)
deprovisioningEligibleMachinesGauge = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: metrics.Namespace,
Subsystem: deprovisioningSubsystem,
Name: "eligible_machines",
Help: "Number of machines eligible for deprovisioning by Karpenter. Labeled by deprovisioner",
},
[]string{deprovisionerLabel},
)
)
| 74 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deprovisioning
import (
"context"
"fmt"
"math"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/clock"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/controllers/provisioning"
"github.com/aws/karpenter-core/pkg/controllers/provisioning/scheduling"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/events"
)
type MultiMachineConsolidation struct {
consolidation
}
func NewMultiMachineConsolidation(clk clock.Clock, cluster *state.Cluster, kubeClient client.Client,
provisioner *provisioning.Provisioner, cp cloudprovider.CloudProvider, recorder events.Recorder) *MultiMachineConsolidation {
return &MultiMachineConsolidation{makeConsolidation(clk, cluster, kubeClient, provisioner, cp, recorder)}
}
func (m *MultiMachineConsolidation) ComputeCommand(ctx context.Context, candidates ...*Candidate) (Command, error) {
if m.cluster.Consolidated() {
return Command{}, nil
}
candidates, err := m.sortAndFilterCandidates(ctx, candidates)
if err != nil {
return Command{}, fmt.Errorf("sorting candidates, %w", err)
}
deprovisioningEligibleMachinesGauge.WithLabelValues(m.String()).Set(float64(len(candidates)))
// For now, we will consider up to every machine in the cluster, might be configurable in the future.
maxParallel := len(candidates)
cmd, err := m.firstNMachineConsolidationOption(ctx, candidates, maxParallel)
if err != nil {
return Command{}, err
}
if cmd.Action() == NoOpAction {
return cmd, nil
}
v := NewValidation(consolidationTTL, m.clock, m.cluster, m.kubeClient, m.provisioner, m.cloudProvider, m.recorder)
isValid, err := v.IsValid(ctx, cmd)
if err != nil {
return Command{}, fmt.Errorf("validating, %w", err)
}
if !isValid {
logging.FromContext(ctx).Debugf("consolidation command is no longer valid, %s", cmd)
return Command{}, nil
}
return cmd, nil
}
// firstNMachineConsolidationOption looks at the first N machines to determine if they can all be consolidated at once. The
// machines are sorted by increasing disruption order which correlates to likelihood if being able to consolidate the machine
func (m *MultiMachineConsolidation) firstNMachineConsolidationOption(ctx context.Context, candidates []*Candidate, max int) (Command, error) {
// we always operate on at least two machines at once, for single machines standard consolidation will find all solutions
if len(candidates) < 2 {
return Command{}, nil
}
min := 1
if len(candidates) <= max {
max = len(candidates) - 1
}
lastSavedCommand := Command{}
// binary search to find the maximum number of machines we can terminate
for min <= max {
mid := (min + max) / 2
candidatesToConsolidate := candidates[0 : mid+1]
cmd, err := m.computeConsolidation(ctx, candidatesToConsolidate...)
if err != nil {
return Command{}, err
}
// ensure that the action is sensical for replacements, see explanation on filterOutSameType for why this is
// required
replacementHasValidInstanceTypes := false
if cmd.Action() == ReplaceAction {
cmd.replacements[0].InstanceTypeOptions = filterOutSameType(cmd.replacements[0], candidatesToConsolidate)
replacementHasValidInstanceTypes = len(cmd.replacements[0].InstanceTypeOptions) > 0
}
// replacementHasValidInstanceTypes will be false if the replacement action has valid instance types remaining after filtering.
if replacementHasValidInstanceTypes || cmd.Action() == DeleteAction {
// we can consolidate machines [0,mid]
lastSavedCommand = cmd
min = mid + 1
} else {
max = mid - 1
}
}
return lastSavedCommand, nil
}
// filterOutSameType filters out instance types that are more expensive than the cheapest instance type that is being
// consolidated if the list of replacement instance types include one of the instance types that is being removed
//
// This handles the following potential consolidation result:
// machines=[t3a.2xlarge, t3a.2xlarge, t3a.small] -> 1 of t3a.small, t3a.xlarge, t3a.2xlarge
//
// In this case, we shouldn't perform this consolidation at all. This is equivalent to just
// deleting the 2x t3a.xlarge machines. This code will identify that t3a.small is in both lists and filter
// out any instance type that is the same or more expensive than the t3a.small
//
// For another scenario:
// machines=[t3a.2xlarge, t3a.2xlarge, t3a.small] -> 1 of t3a.nano, t3a.small, t3a.xlarge, t3a.2xlarge
//
// This code sees that t3a.small is the cheapest type in both lists and filters it and anything more expensive out
// leaving the valid consolidation:
// machines=[t3a.2xlarge, t3a.2xlarge, t3a.small] -> 1 of t3a.nano
func filterOutSameType(newMachine *scheduling.Machine, consolidate []*Candidate) []*cloudprovider.InstanceType {
existingInstanceTypes := sets.NewString()
pricesByInstanceType := map[string]float64{}
// get the price of the cheapest machine that we currently are considering deleting indexed by instance type
for _, c := range consolidate {
existingInstanceTypes.Insert(c.instanceType.Name)
of, ok := c.instanceType.Offerings.Get(c.capacityType, c.zone)
if !ok {
continue
}
existingPrice, ok := pricesByInstanceType[c.instanceType.Name]
if !ok {
existingPrice = math.MaxFloat64
}
if of.Price < existingPrice {
pricesByInstanceType[c.instanceType.Name] = of.Price
}
}
maxPrice := math.MaxFloat64
for _, it := range newMachine.InstanceTypeOptions {
// we are considering replacing multiple machines with a single machine of one of the same types, so the replacement
// machine must be cheaper than the price of the existing machine, or we should just keep that one and do a
// deletion only to reduce cluster disruption (fewer pods will re-schedule).
if existingInstanceTypes.Has(it.Name) {
if pricesByInstanceType[it.Name] < maxPrice {
maxPrice = pricesByInstanceType[it.Name]
}
}
}
return filterByPrice(newMachine.InstanceTypeOptions, newMachine.Requirements, maxPrice)
}
| 170 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deprovisioning
import (
"context"
v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// PDBLimits is used to evaluate if evicting a list of pods is possible.
type PDBLimits struct {
ctx context.Context
kubeClient client.Client
pdbs []*pdbItem
}
func NewPDBLimits(ctx context.Context, kubeClient client.Client) (*PDBLimits, error) {
ps := &PDBLimits{
ctx: ctx,
kubeClient: kubeClient,
}
var pdbList policyv1.PodDisruptionBudgetList
if err := kubeClient.List(ctx, &pdbList); err != nil {
return nil, err
}
for _, pdb := range pdbList.Items {
pi, err := newPdb(pdb)
if err != nil {
return nil, err
}
ps.pdbs = append(ps.pdbs, pi)
}
return ps, nil
}
// CanEvictPods returns true if every pod in the list is evictable. They may not all be evictable simultaneously, but
// for every PDB that controls the pods at least one pod can be evicted.
func (s *PDBLimits) CanEvictPods(pods []*v1.Pod) (client.ObjectKey, bool) {
for _, pod := range pods {
for _, pdb := range s.pdbs {
if pdb.name.Namespace == pod.ObjectMeta.Namespace {
if pdb.selector.Matches(labels.Set(pod.Labels)) {
if pdb.disruptionsAllowed == 0 {
return pdb.name, false
}
}
}
}
}
return client.ObjectKey{}, true
}
type pdbItem struct {
name client.ObjectKey
selector labels.Selector
disruptionsAllowed int32
}
func newPdb(pdb policyv1.PodDisruptionBudget) (*pdbItem, error) {
selector, err := metav1.LabelSelectorAsSelector(pdb.Spec.Selector)
if err != nil {
return nil, err
}
return &pdbItem{
name: client.ObjectKeyFromObject(&pdb),
selector: selector,
disruptionsAllowed: pdb.Status.DisruptionsAllowed,
}, nil
}
| 89 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deprovisioning
import (
"context"
"fmt"
"k8s.io/utils/clock"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/controllers/provisioning"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/events"
)
// SingleMachineConsolidation is the consolidation controller that performs single machine consolidation.
type SingleMachineConsolidation struct {
consolidation
}
func NewSingleMachineConsolidation(clk clock.Clock, cluster *state.Cluster, kubeClient client.Client, provisioner *provisioning.Provisioner,
cp cloudprovider.CloudProvider, recorder events.Recorder) *SingleMachineConsolidation {
return &SingleMachineConsolidation{consolidation: makeConsolidation(clk, cluster, kubeClient, provisioner, cp, recorder)}
}
// ComputeCommand generates a deprovisioning command given deprovisionable machines
// nolint:gocyclo
func (c *SingleMachineConsolidation) ComputeCommand(ctx context.Context, candidates ...*Candidate) (Command, error) {
if c.cluster.Consolidated() {
return Command{}, nil
}
candidates, err := c.sortAndFilterCandidates(ctx, candidates)
if err != nil {
return Command{}, fmt.Errorf("sorting candidates, %w", err)
}
deprovisioningEligibleMachinesGauge.WithLabelValues(c.String()).Set(float64(len(candidates)))
v := NewValidation(consolidationTTL, c.clock, c.cluster, c.kubeClient, c.provisioner, c.cloudProvider, c.recorder)
for _, candidate := range candidates {
// compute a possible consolidation option
cmd, err := c.computeConsolidation(ctx, candidate)
if err != nil {
logging.FromContext(ctx).Errorf("computing consolidation %s", err)
continue
}
if cmd.Action() == NoOpAction {
continue
}
isValid, err := v.IsValid(ctx, cmd)
if err != nil {
logging.FromContext(ctx).Errorf("validating consolidation %s", err)
continue
}
if !isValid {
return Command{}, fmt.Errorf("command is no longer valid, %s", cmd)
}
return cmd, nil
}
return Command{}, nil
}
| 77 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// nolint:gosec
package deprovisioning_test
import (
"context"
"fmt"
"math"
"math/rand"
"sort"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
clock "k8s.io/utils/clock/testing"
. "knative.dev/pkg/logging/testing"
"knative.dev/pkg/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/karpenter-core/pkg/apis"
"github.com/aws/karpenter-core/pkg/apis/settings"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/cloudprovider/fake"
"github.com/aws/karpenter-core/pkg/controllers/deprovisioning"
"github.com/aws/karpenter-core/pkg/controllers/provisioning"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/controllers/state/informer"
"github.com/aws/karpenter-core/pkg/events"
"github.com/aws/karpenter-core/pkg/operator/controller"
"github.com/aws/karpenter-core/pkg/operator/scheme"
"github.com/aws/karpenter-core/pkg/scheduling"
"github.com/aws/karpenter-core/pkg/test"
. "github.com/aws/karpenter-core/pkg/test/expectations"
)
var ctx context.Context
var env *test.Environment
var cluster *state.Cluster
var deprovisioningController *deprovisioning.Controller
var provisioner *provisioning.Provisioner
var cloudProvider *fake.CloudProvider
var nodeStateController controller.Controller
var machineStateController controller.Controller
var fakeClock *clock.FakeClock
var recorder *test.EventRecorder
var onDemandInstances []*cloudprovider.InstanceType
var mostExpensiveInstance *cloudprovider.InstanceType
var mostExpensiveOffering cloudprovider.Offering
var leastExpensiveInstance *cloudprovider.InstanceType
var leastExpensiveOffering cloudprovider.Offering
func TestAPIs(t *testing.T) {
ctx = TestContextWithLogger(t)
RegisterFailHandler(Fail)
RunSpecs(t, "Deprovisioning")
}
var _ = BeforeSuite(func() {
env = test.NewEnvironment(scheme.Scheme, test.WithCRDs(apis.CRDs...))
ctx = settings.ToContext(ctx, test.Settings(settings.Settings{DriftEnabled: true}))
cloudProvider = fake.NewCloudProvider()
fakeClock = clock.NewFakeClock(time.Now())
cluster = state.NewCluster(fakeClock, env.Client, cloudProvider)
nodeStateController = informer.NewNodeController(env.Client, cluster)
machineStateController = informer.NewMachineController(env.Client, cluster)
recorder = test.NewEventRecorder()
provisioner = provisioning.NewProvisioner(env.Client, env.KubernetesInterface.CoreV1(), recorder, cloudProvider, cluster)
deprovisioningController = deprovisioning.NewController(fakeClock, env.Client, provisioner, cloudProvider, recorder, cluster)
})
var _ = AfterSuite(func() {
Expect(env.Stop()).To(Succeed(), "Failed to stop environment")
})
var _ = BeforeEach(func() {
cloudProvider.Reset()
cloudProvider.InstanceTypes = fake.InstanceTypesAssorted()
recorder.Reset() // Reset the events that we captured during the run
// ensure any waiters on our clock are allowed to proceed before resetting our clock time
for fakeClock.HasWaiters() {
fakeClock.Step(1 * time.Minute)
}
fakeClock.SetTime(time.Now())
cluster.Reset()
cluster.SetConsolidated(false)
// Reset Feature Flags to test defaults
ctx = settings.ToContext(ctx, test.Settings(settings.Settings{DriftEnabled: true}))
onDemandInstances = lo.Filter(cloudProvider.InstanceTypes, func(i *cloudprovider.InstanceType, _ int) bool {
for _, o := range i.Offerings.Available() {
if o.CapacityType == v1alpha5.CapacityTypeOnDemand {
return true
}
}
return false
})
// Sort the instances by pricing from low to high
sort.Slice(onDemandInstances, func(i, j int) bool {
return cheapestOffering(onDemandInstances[i].Offerings).Price < cheapestOffering(onDemandInstances[j].Offerings).Price
})
leastExpensiveInstance = onDemandInstances[0]
leastExpensiveOffering = leastExpensiveInstance.Offerings[0]
mostExpensiveInstance = onDemandInstances[len(onDemandInstances)-1]
mostExpensiveOffering = mostExpensiveInstance.Offerings[0]
})
var _ = AfterEach(func() {
ExpectCleanedUp(ctx, env.Client)
cluster.Reset()
})
var _ = Describe("Consolidation State", func() {
It("should not reset consolidation state if consolidation hasn't run", func() {
// this assumes that the consolidation reset period is 5 minutes, which it is currently
_, err := deprovisioningController.Reconcile(ctx, reconcile.Request{})
Expect(err).ToNot(HaveOccurred())
Expect(cluster.Consolidated()).To(BeTrue())
fakeClock.Step(1 * time.Minute)
Expect(cluster.Consolidated()).To(BeTrue())
// reconciling now shouldn't set the last consolidated time to current time, as consolidation isn't actually
// running since it last ran 1 minute ago
_, err = deprovisioningController.Reconcile(ctx, reconcile.Request{})
Expect(err).ToNot(HaveOccurred())
// but advancing the clock 4:30, so we are at 5:30 past the last run time should cause consolidated to return
// false
fakeClock.Step(4*time.Minute + 30*time.Second)
Expect(cluster.Consolidated()).To(BeFalse())
})
})
var _ = Describe("Pod Eviction Cost", func() {
const standardPodCost = 1.0
It("should have a standard disruptionCost for a pod with no priority or disruptionCost specified", func() {
cost := deprovisioning.GetPodEvictionCost(ctx, &v1.Pod{})
Expect(cost).To(BeNumerically("==", standardPodCost))
})
It("should have a higher disruptionCost for a pod with a positive deletion disruptionCost", func() {
cost := deprovisioning.GetPodEvictionCost(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{
v1.PodDeletionCost: "100",
}},
})
Expect(cost).To(BeNumerically(">", standardPodCost))
})
It("should have a lower disruptionCost for a pod with a positive deletion disruptionCost", func() {
cost := deprovisioning.GetPodEvictionCost(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{
v1.PodDeletionCost: "-100",
}},
})
Expect(cost).To(BeNumerically("<", standardPodCost))
})
It("should have higher costs for higher deletion costs", func() {
cost1 := deprovisioning.GetPodEvictionCost(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{
v1.PodDeletionCost: "101",
}},
})
cost2 := deprovisioning.GetPodEvictionCost(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{
v1.PodDeletionCost: "100",
}},
})
cost3 := deprovisioning.GetPodEvictionCost(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{
v1.PodDeletionCost: "99",
}},
})
Expect(cost1).To(BeNumerically(">", cost2))
Expect(cost2).To(BeNumerically(">", cost3))
})
It("should have a higher disruptionCost for a pod with a higher priority", func() {
cost := deprovisioning.GetPodEvictionCost(ctx, &v1.Pod{
Spec: v1.PodSpec{Priority: ptr.Int32(1)},
})
Expect(cost).To(BeNumerically(">", standardPodCost))
})
It("should have a lower disruptionCost for a pod with a lower priority", func() {
cost := deprovisioning.GetPodEvictionCost(ctx, &v1.Pod{
Spec: v1.PodSpec{Priority: ptr.Int32(-1)},
})
Expect(cost).To(BeNumerically("<", standardPodCost))
})
})
var _ = Describe("Replace Nodes", func() {
It("can replace node", func() {
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
Expect(env.Client.Get(ctx, client.ObjectKeyFromObject(rs), rs)).To(Succeed())
pod := test.Pod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
}}})
prov := test.Provisioner(test.ProvisionerOptions{
Consolidation: &v1alpha5.Consolidation{Enabled: ptr.Bool(true)},
})
machine, node := test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: mostExpensiveInstance.Name,
v1alpha5.LabelCapacityType: mostExpensiveOffering.CapacityType,
v1.LabelTopologyZone: mostExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{v1.ResourceCPU: resource.MustParse("32")},
},
})
ExpectApplied(ctx, env.Client, rs, pod, node, machine, prov)
// bind pods to node
ExpectManualBinding(ctx, env.Client, pod, node)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node}, []*v1alpha5.Machine{machine})
fakeClock.Step(10 * time.Minute)
// consolidation won't delete the old machine until the new machine is ready
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectMakeNewMachinesReady(ctx, env.Client, &wg, cluster, cloudProvider, 1)
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
wg.Wait()
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, machine)
// should create a new machine as there is a cheaper one that can hold the pod
machines := ExpectMachines(ctx, env.Client)
nodes := ExpectNodes(ctx, env.Client)
Expect(machines).To(HaveLen(1))
Expect(nodes).To(HaveLen(1))
// Expect that the new machine does not request the most expensive instance type
Expect(machines[0].Name).ToNot(Equal(machine.Name))
Expect(scheduling.NewNodeSelectorRequirements(machines[0].Spec.Requirements...).Has(v1.LabelInstanceTypeStable)).To(BeTrue())
Expect(scheduling.NewNodeSelectorRequirements(machines[0].Spec.Requirements...).Get(v1.LabelInstanceTypeStable).Has(mostExpensiveInstance.Name)).To(BeFalse())
// and delete the old one
ExpectNotFound(ctx, env.Client, machine, node)
})
It("can replace nodes, considers PDB", func() {
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
Expect(env.Client.Get(ctx, client.ObjectKeyFromObject(rs), rs)).To(Succeed())
pods := test.Pods(3, test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
}}})
pdb := test.PodDisruptionBudget(test.PDBOptions{
Labels: labels,
MaxUnavailable: fromInt(0),
Status: &policyv1.PodDisruptionBudgetStatus{
ObservedGeneration: 1,
DisruptionsAllowed: 0,
CurrentHealthy: 1,
DesiredHealthy: 1,
ExpectedPods: 1,
},
})
prov := test.Provisioner(test.ProvisionerOptions{
Consolidation: &v1alpha5.Consolidation{Enabled: ptr.Bool(true)},
})
machine, node := test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: mostExpensiveInstance.Name,
v1alpha5.LabelCapacityType: mostExpensiveOffering.CapacityType,
v1.LabelTopologyZone: mostExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("32"),
v1.ResourcePods: resource.MustParse("100"),
},
},
})
ExpectApplied(ctx, env.Client, rs, pods[0], pods[1], pods[2], machine, node, prov, pdb)
// bind pods to node
ExpectManualBinding(ctx, env.Client, pods[0], node)
ExpectManualBinding(ctx, env.Client, pods[1], node)
ExpectManualBinding(ctx, env.Client, pods[2], node)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node}, []*v1alpha5.Machine{machine})
fakeClock.Step(10 * time.Minute)
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
Expect(cluster.Consolidated()).To(BeTrue())
// we didn't create a new machine or delete the old one
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(1))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(1))
ExpectExists(ctx, env.Client, machine)
})
It("can replace nodes, PDB namespace must match", func() {
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
Expect(env.Client.Get(ctx, client.ObjectKeyFromObject(rs), rs)).To(Succeed())
pod := test.Pod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
}}})
prov := test.Provisioner(test.ProvisionerOptions{
Consolidation: &v1alpha5.Consolidation{Enabled: ptr.Bool(true)},
})
machine, node := test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: mostExpensiveInstance.Name,
v1alpha5.LabelCapacityType: mostExpensiveOffering.CapacityType,
v1.LabelTopologyZone: mostExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{v1.ResourceCPU: resource.MustParse("32")},
},
})
namespace := test.Namespace()
pdb := test.PodDisruptionBudget(test.PDBOptions{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace.ObjectMeta.Name,
},
Labels: labels,
MaxUnavailable: fromInt(0),
Status: &policyv1.PodDisruptionBudgetStatus{
ObservedGeneration: 1,
DisruptionsAllowed: 0,
CurrentHealthy: 1,
DesiredHealthy: 1,
ExpectedPods: 1,
},
})
// bind pods to node
ExpectApplied(ctx, env.Client, rs, pod, machine, node, prov, namespace, pdb)
ExpectManualBinding(ctx, env.Client, pod, node)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node}, []*v1alpha5.Machine{machine})
fakeClock.Step(10 * time.Minute)
// consolidation won't delete the old node until the new node is ready
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectMakeNewMachinesReady(ctx, env.Client, &wg, cluster, cloudProvider, 1)
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
wg.Wait()
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, machine)
// should create a new machine as there is a cheaper one that can hold the pod
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(1))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(1))
ExpectNotFound(ctx, env.Client, machine, node)
})
It("can replace nodes, considers do-not-consolidate annotation", func() {
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
Expect(env.Client.Get(ctx, client.ObjectKeyFromObject(rs), rs)).To(Succeed())
pods := test.Pods(3, test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
}}})
prov := test.Provisioner(test.ProvisionerOptions{
Consolidation: &v1alpha5.Consolidation{Enabled: ptr.Bool(true)},
})
regularMachine, regularNode := test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: mostExpensiveInstance.Name,
v1alpha5.LabelCapacityType: mostExpensiveOffering.CapacityType,
v1.LabelTopologyZone: mostExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("32"),
v1.ResourcePods: resource.MustParse("100"),
},
},
})
annotatedMachine, annotatedNode := test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
v1alpha5.DoNotConsolidateNodeAnnotationKey: "true",
},
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: mostExpensiveInstance.Name,
v1alpha5.LabelCapacityType: mostExpensiveOffering.CapacityType,
v1.LabelTopologyZone: mostExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("32"),
v1.ResourcePods: resource.MustParse("100"),
},
},
})
ExpectApplied(ctx, env.Client, rs, pods[0], pods[1], pods[2], prov)
ExpectApplied(ctx, env.Client, regularMachine, regularNode, annotatedMachine, annotatedNode)
// bind pods to node
ExpectManualBinding(ctx, env.Client, pods[0], regularNode)
ExpectManualBinding(ctx, env.Client, pods[1], regularNode)
ExpectManualBinding(ctx, env.Client, pods[2], annotatedNode)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{regularNode, annotatedNode}, []*v1alpha5.Machine{regularMachine, annotatedMachine})
fakeClock.Step(10 * time.Minute)
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
wg.Wait()
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, regularMachine)
// we should delete the non-annotated node
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(1))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(1))
ExpectNotFound(ctx, env.Client, regularMachine, regularNode)
})
It("won't replace node if any spot replacement is more expensive", func() {
currentInstance := fake.NewInstanceType(fake.InstanceTypeOptions{
Name: "current-on-demand",
Offerings: []cloudprovider.Offering{
{
CapacityType: v1alpha5.CapacityTypeOnDemand,
Zone: "test-zone-1a",
Price: 0.5,
Available: false,
},
},
})
replacementInstance := fake.NewInstanceType(fake.InstanceTypeOptions{
Name: "potential-spot-replacement",
Offerings: []cloudprovider.Offering{
{
CapacityType: v1alpha5.CapacityTypeSpot,
Zone: "test-zone-1a",
Price: 1.0,
Available: true,
},
{
CapacityType: v1alpha5.CapacityTypeSpot,
Zone: "test-zone-1b",
Price: 0.2,
Available: true,
},
{
CapacityType: v1alpha5.CapacityTypeSpot,
Zone: "test-zone-1c",
Price: 0.4,
Available: true,
},
},
})
cloudProvider.InstanceTypes = []*cloudprovider.InstanceType{
currentInstance,
replacementInstance,
}
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
Expect(env.Client.Get(ctx, client.ObjectKeyFromObject(rs), rs)).To(Succeed())
pod := test.Pod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
}}})
prov := test.Provisioner(test.ProvisionerOptions{
Consolidation: &v1alpha5.Consolidation{Enabled: ptr.Bool(true)},
})
machine, node := test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: currentInstance.Name,
v1alpha5.LabelCapacityType: currentInstance.Offerings[0].CapacityType,
v1.LabelTopologyZone: currentInstance.Offerings[0].Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{v1.ResourceCPU: resource.MustParse("32")},
},
})
ExpectApplied(ctx, env.Client, rs, pod, machine, node, prov)
// bind pods to node
ExpectManualBinding(ctx, env.Client, pod, node)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node}, []*v1alpha5.Machine{machine})
fakeClock.Step(10 * time.Minute)
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
wg.Wait()
Expect(cluster.Consolidated()).To(BeTrue())
// Expect to not create or delete more machines
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(1))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(1))
ExpectExists(ctx, env.Client, machine)
})
It("won't replace on-demand node if on-demand replacement is more expensive", func() {
currentInstance := fake.NewInstanceType(fake.InstanceTypeOptions{
Name: "current-on-demand",
Offerings: []cloudprovider.Offering{
{
CapacityType: v1alpha5.CapacityTypeOnDemand,
Zone: "test-zone-1a",
Price: 0.5,
Available: false,
},
},
})
replacementInstance := fake.NewInstanceType(fake.InstanceTypeOptions{
Name: "on-demand-replacement",
Offerings: []cloudprovider.Offering{
{
CapacityType: v1alpha5.CapacityTypeOnDemand,
Zone: "test-zone-1a",
Price: 0.6,
Available: true,
},
{
CapacityType: v1alpha5.CapacityTypeOnDemand,
Zone: "test-zone-1b",
Price: 0.6,
Available: true,
},
{
CapacityType: v1alpha5.CapacityTypeSpot,
Zone: "test-zone-1b",
Price: 0.2,
Available: true,
},
{
CapacityType: v1alpha5.CapacityTypeSpot,
Zone: "test-zone-1c",
Price: 0.3,
Available: true,
},
},
})
cloudProvider.InstanceTypes = []*cloudprovider.InstanceType{
currentInstance,
replacementInstance,
}
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
Expect(env.Client.Get(ctx, client.ObjectKeyFromObject(rs), rs)).To(Succeed())
pod := test.Pod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
}}})
// provisioner should require on-demand instance for this test case
prov := test.Provisioner(test.ProvisionerOptions{
Consolidation: &v1alpha5.Consolidation{Enabled: ptr.Bool(true)},
Requirements: []v1.NodeSelectorRequirement{
{
Key: v1alpha5.LabelCapacityType,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.CapacityTypeOnDemand},
},
},
})
machine, node := test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: currentInstance.Name,
v1alpha5.LabelCapacityType: currentInstance.Offerings[0].CapacityType,
v1.LabelTopologyZone: currentInstance.Offerings[0].Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{v1.ResourceCPU: resource.MustParse("32")},
},
})
ExpectApplied(ctx, env.Client, rs, pod, machine, node, prov)
// bind pods to node
ExpectManualBinding(ctx, env.Client, pod, node)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node}, []*v1alpha5.Machine{machine})
fakeClock.Step(10 * time.Minute)
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
wg.Wait()
Expect(cluster.Consolidated()).To(BeTrue())
// Expect to not create or delete more machines
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(1))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(1))
ExpectExists(ctx, env.Client, machine)
})
It("waits for node deletion to finish", func() {
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
Expect(env.Client.Get(ctx, client.ObjectKeyFromObject(rs), rs)).To(Succeed())
pod := test.Pod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
}}})
prov := test.Provisioner(test.ProvisionerOptions{
Consolidation: &v1alpha5.Consolidation{Enabled: ptr.Bool(true)},
})
machine, node := test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Finalizers: []string{"unit-test.com/block-deletion"},
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: mostExpensiveInstance.Name,
v1alpha5.LabelCapacityType: mostExpensiveOffering.CapacityType,
v1.LabelTopologyZone: mostExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{v1.ResourceCPU: resource.MustParse("32")},
},
})
ExpectApplied(ctx, env.Client, rs, pod, machine, node, prov)
// bind pods to node
ExpectManualBinding(ctx, env.Client, pod, node)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node}, []*v1alpha5.Machine{machine})
fakeClock.Step(10 * time.Minute)
// consolidation won't delete the old node until the new node is ready
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectMakeNewMachinesReady(ctx, env.Client, &wg, cluster, cloudProvider, 1)
var consolidationFinished atomic.Bool
go func() {
defer GinkgoRecover()
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
Expect(cluster.Consolidated()).To(BeFalse())
consolidationFinished.Store(true)
}()
wg.Wait()
// machine should still exist
ExpectExists(ctx, env.Client, machine)
// and consolidation should still be running waiting on the machine's deletion
Expect(consolidationFinished.Load()).To(BeFalse())
// fetch the latest machine object and remove the finalizer
machine = ExpectExists(ctx, env.Client, machine)
ExpectFinalizersRemoved(ctx, env.Client, machine)
// consolidation should complete now that the finalizer on the machine is gone and it can
// was actually deleted
Eventually(consolidationFinished.Load, 10*time.Second).Should(BeTrue())
wg.Wait()
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, machine)
ExpectNotFound(ctx, env.Client, machine, node)
// Expect that the new machine was created and its different than the original
machines := ExpectMachines(ctx, env.Client)
nodes := ExpectNodes(ctx, env.Client)
Expect(machines).To(HaveLen(1))
Expect(nodes).To(HaveLen(1))
Expect(machines[0].Name).ToNot(Equal(machine.Name))
Expect(nodes[0].Name).ToNot(Equal(node.Name))
})
})
var _ = Describe("Delete Node", func() {
var prov *v1alpha5.Provisioner
var machine1, machine2 *v1alpha5.Machine
var node1, node2 *v1.Node
BeforeEach(func() {
prov = test.Provisioner(test.ProvisionerOptions{
Consolidation: &v1alpha5.Consolidation{Enabled: ptr.Bool(true)},
})
machine1, node1 = test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: leastExpensiveInstance.Name,
v1alpha5.LabelCapacityType: leastExpensiveOffering.CapacityType,
v1.LabelTopologyZone: leastExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("32"),
v1.ResourcePods: resource.MustParse("100"),
},
},
})
machine2, node2 = test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: leastExpensiveInstance.Name,
v1alpha5.LabelCapacityType: leastExpensiveOffering.CapacityType,
v1.LabelTopologyZone: leastExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("32"),
v1.ResourcePods: resource.MustParse("100"),
},
},
})
})
It("can delete nodes", func() {
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
pods := test.Pods(3, test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
}}})
ExpectApplied(ctx, env.Client, rs, pods[0], pods[1], pods[2], machine1, node1, machine2, node2, prov)
// bind pods to node
ExpectManualBinding(ctx, env.Client, pods[0], node1)
ExpectManualBinding(ctx, env.Client, pods[1], node1)
ExpectManualBinding(ctx, env.Client, pods[2], node2)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node1, node2}, []*v1alpha5.Machine{machine1, machine2})
fakeClock.Step(10 * time.Minute)
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
wg.Wait()
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, machine2)
// we don't need a new node, but we should evict everything off one of node2 which only has a single pod
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(1))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(1))
// and delete the old one
ExpectNotFound(ctx, env.Client, machine2, node2)
})
It("can delete nodes, considers PDB", func() {
var nl v1.NodeList
Expect(env.Client.List(ctx, &nl)).To(Succeed())
Expect(nl.Items).To(HaveLen(0))
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
Expect(env.Client.Get(ctx, client.ObjectKeyFromObject(rs), rs)).To(Succeed())
pods := test.Pods(3, test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
}}})
// only pod[2] is covered by the PDB
pods[2].Labels = labels
pdb := test.PodDisruptionBudget(test.PDBOptions{
Labels: labels,
MaxUnavailable: fromInt(0),
Status: &policyv1.PodDisruptionBudgetStatus{
ObservedGeneration: 1,
DisruptionsAllowed: 0,
CurrentHealthy: 1,
DesiredHealthy: 1,
ExpectedPods: 1,
},
})
ExpectApplied(ctx, env.Client, rs, pods[0], pods[1], pods[2], machine1, node1, machine2, node2, prov, pdb)
// two pods on node 1
ExpectManualBinding(ctx, env.Client, pods[0], node1)
ExpectManualBinding(ctx, env.Client, pods[1], node1)
// one on node 2, but it has a PDB with zero disruptions allowed
ExpectManualBinding(ctx, env.Client, pods[2], node2)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node1, node2}, []*v1alpha5.Machine{machine1, machine2})
fakeClock.Step(10 * time.Minute)
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
wg.Wait()
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, machine1)
// we don't need a new node
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(1))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(1))
// but we expect to delete the machine with more pods (node1) as the pod on machine2 has a PDB preventing
// eviction
ExpectNotFound(ctx, env.Client, machine1, node1)
})
It("can delete nodes, considers do-not-evict", func() {
// create our RS, so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
Expect(env.Client.Get(ctx, client.ObjectKeyFromObject(rs), rs)).To(Succeed())
pods := test.Pods(3, test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
}}})
// only pod[2] has a do not evict annotation
pods[2].Annotations = map[string]string{
v1alpha5.DoNotEvictPodAnnotationKey: "true",
}
ExpectApplied(ctx, env.Client, rs, pods[0], pods[1], pods[2], machine1, node1, machine2, node2, prov)
// two pods on node 1
ExpectManualBinding(ctx, env.Client, pods[0], node1)
ExpectManualBinding(ctx, env.Client, pods[1], node1)
// one on node 2, but it has a do-not-evict annotation
ExpectManualBinding(ctx, env.Client, pods[2], node2)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node1, node2}, []*v1alpha5.Machine{machine1, machine2})
fakeClock.Step(10 * time.Minute)
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
wg.Wait()
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, machine1)
// we don't need a new node
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(1))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(1))
// but we expect to delete the machine with more pods (machine1) as the pod on machine2 has a do-not-evict annotation
ExpectNotFound(ctx, env.Client, machine1, node1)
})
It("can delete nodes, evicts pods without an ownerRef", func() {
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
Expect(env.Client.Get(ctx, client.ObjectKeyFromObject(rs), rs)).To(Succeed())
pods := test.Pods(3, test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
}}})
// pod[2] is a stand-alone (non ReplicaSet) pod
pods[2].OwnerReferences = nil
ExpectApplied(ctx, env.Client, rs, pods[0], pods[1], pods[2], machine1, node1, machine2, node2, prov)
// two pods on node 1
ExpectManualBinding(ctx, env.Client, pods[0], node1)
ExpectManualBinding(ctx, env.Client, pods[1], node1)
// one on node 2, but it's a standalone pod
ExpectManualBinding(ctx, env.Client, pods[2], node2)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node1, node2}, []*v1alpha5.Machine{machine1, machine2})
fakeClock.Step(10 * time.Minute)
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
wg.Wait()
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, machine2)
// we don't need a new node
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(1))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(1))
// but we expect to delete the machine with the fewest pods (machine 2) even though the pod has no ownerRefs
// and will not be recreated
ExpectNotFound(ctx, env.Client, machine2, node2)
})
It("won't delete node if it would require pods to schedule on an un-initialized node", func() {
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
pods := test.Pods(3, test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
}}})
ExpectApplied(ctx, env.Client, rs, pods[0], pods[1], pods[2], machine1, node1, machine2, node2, prov)
// bind pods to node
ExpectManualBinding(ctx, env.Client, pods[0], node1)
ExpectManualBinding(ctx, env.Client, pods[1], node1)
ExpectManualBinding(ctx, env.Client, pods[2], node2)
// inform cluster state about nodes and machines, intentionally leaving node1 as not ready
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1))
ExpectReconcileSucceeded(ctx, machineStateController, client.ObjectKeyFromObject(machine1))
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node2}, []*v1alpha5.Machine{machine2})
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
wg.Wait()
// shouldn't delete the node
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(2))
// Expect Unconsolidatable events to be fired
evts := recorder.Events()
_, ok := lo.Find(evts, func(e events.Event) bool {
return strings.Contains(e.Message, "not all pods would schedule")
})
Expect(ok).To(BeTrue())
_, ok = lo.Find(evts, func(e events.Event) bool {
return strings.Contains(e.Message, "would schedule against a non-initialized node")
})
Expect(ok).To(BeTrue())
})
It("should consider initialized nodes before un-initialized nodes", func() {
defaultInstanceType := fake.NewInstanceType(fake.InstanceTypeOptions{
Name: "default-instance-type",
Resources: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("3"),
v1.ResourceMemory: resource.MustParse("3Gi"),
v1.ResourcePods: resource.MustParse("110"),
},
})
smallInstanceType := fake.NewInstanceType(fake.InstanceTypeOptions{
Name: "small-instance-type",
Resources: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("1Gi"),
v1.ResourcePods: resource.MustParse("10"),
},
})
cloudProvider.InstanceTypes = []*cloudprovider.InstanceType{
defaultInstanceType,
smallInstanceType,
}
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
podCount := 100
pods := test.Pods(podCount, test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
},
},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("2Gi"),
},
},
})
ExpectApplied(ctx, env.Client, rs, prov)
// Setup 100 machines/nodes with a single machine/node that is initialized
elem := rand.Intn(100)
for i := 0; i < podCount; i++ {
m, n := test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: defaultInstanceType.Name,
v1alpha5.LabelCapacityType: defaultInstanceType.Offerings[0].CapacityType,
v1.LabelTopologyZone: defaultInstanceType.Offerings[0].Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("3"),
v1.ResourceMemory: resource.MustParse("3Gi"),
v1.ResourcePods: resource.MustParse("100"),
},
},
})
ExpectApplied(ctx, env.Client, pods[i], m, n)
ExpectManualBinding(ctx, env.Client, pods[i], n)
if i == elem {
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{n}, []*v1alpha5.Machine{m})
} else {
ExpectReconcileSucceeded(ctx, machineStateController, client.ObjectKeyFromObject(m))
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(n))
}
}
// Create a pod and machine/node that will eventually be scheduled onto the initialized node
consolidatableMachine, consolidatableNode := test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: smallInstanceType.Name,
v1alpha5.LabelCapacityType: smallInstanceType.Offerings[0].CapacityType,
v1.LabelTopologyZone: smallInstanceType.Offerings[0].Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("1Gi"),
v1.ResourcePods: resource.MustParse("100"),
},
},
})
// create a new RS so we can link a pod to it
rs = test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
consolidatablePod := test.Pod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
},
},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("1Gi"),
},
},
})
ExpectApplied(ctx, env.Client, consolidatableMachine, consolidatableNode, consolidatablePod)
ExpectManualBinding(ctx, env.Client, consolidatablePod, consolidatableNode)
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{consolidatableNode}, []*v1alpha5.Machine{consolidatableMachine})
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
wg.Wait()
ExpectMachinesCascadeDeletion(ctx, env.Client, consolidatableMachine)
// Expect no events that state that the pods would schedule against a non-initialized node
evts := recorder.Events()
_, ok := lo.Find(evts, func(e events.Event) bool {
return strings.Contains(e.Message, "would schedule against a non-initialized node")
})
Expect(ok).To(BeFalse())
// the machine with the small instance should consolidate onto the initialized node
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(100))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(100))
ExpectNotFound(ctx, env.Client, consolidatableMachine, consolidatableNode)
})
})
var _ = Describe("Node Lifetime Consideration", func() {
var prov *v1alpha5.Provisioner
var machine1, machine2 *v1alpha5.Machine
var node1, node2 *v1.Node
BeforeEach(func() {
prov = test.Provisioner(test.ProvisionerOptions{
Consolidation: &v1alpha5.Consolidation{
Enabled: ptr.Bool(true),
},
TTLSecondsUntilExpired: ptr.Int64(3),
})
machine1, node1 = test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: leastExpensiveInstance.Name,
v1alpha5.LabelCapacityType: leastExpensiveOffering.CapacityType,
v1.LabelTopologyZone: leastExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("32"),
v1.ResourcePods: resource.MustParse("100"),
},
},
})
machine2, node2 = test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: leastExpensiveInstance.Name,
v1alpha5.LabelCapacityType: leastExpensiveOffering.CapacityType,
v1.LabelTopologyZone: leastExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("32"),
v1.ResourcePods: resource.MustParse("100"),
},
},
})
})
It("should consider node lifetime remaining when calculating disruption cost", func() {
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
pods := test.Pods(3, test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
}}})
ExpectApplied(ctx, env.Client, rs, pods[0], pods[1], pods[2], prov)
ExpectApplied(ctx, env.Client, machine1, node1) // ensure node1 is the oldest node
time.Sleep(2 * time.Second) // this sleep is unfortunate, but necessary. The creation time is from etcd, and we can't mock it, so we
// need to sleep to force the second node to be created a bit after the first node.
ExpectApplied(ctx, env.Client, machine2, node2)
// two pods on node 1, one on node 2
ExpectManualBinding(ctx, env.Client, pods[0], node1)
ExpectManualBinding(ctx, env.Client, pods[1], node1)
ExpectManualBinding(ctx, env.Client, pods[2], node2)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node1, node2}, []*v1alpha5.Machine{machine1, machine2})
fakeClock.SetTime(time.Now())
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
wg.Wait()
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, machine1)
// the second node has more pods, so it would normally not be picked for consolidation, except it very little
// lifetime remaining, so it should be deleted
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(1))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(1))
ExpectNotFound(ctx, env.Client, machine1, node1)
})
})
var _ = Describe("Topology Consideration", func() {
var prov *v1alpha5.Provisioner
var zone1Machine, zone2Machine, zone3Machine *v1alpha5.Machine
var zone1Node, zone2Node, zone3Node *v1.Node
var oldMachineNames sets.String
BeforeEach(func() {
testZone1Instance := leastExpensiveInstanceWithZone("test-zone-1")
testZone2Instance := mostExpensiveInstanceWithZone("test-zone-2")
testZone3Instance := leastExpensiveInstanceWithZone("test-zone-3")
prov = test.Provisioner(test.ProvisionerOptions{
Consolidation: &v1alpha5.Consolidation{Enabled: ptr.Bool(true)},
})
zone1Machine, zone1Node = test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelTopologyZone: "test-zone-1",
v1.LabelInstanceTypeStable: testZone1Instance.Name,
v1alpha5.LabelCapacityType: testZone1Instance.Offerings[0].CapacityType,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{v1.ResourceCPU: resource.MustParse("1")},
},
})
zone2Machine, zone2Node = test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelTopologyZone: "test-zone-2",
v1.LabelInstanceTypeStable: testZone2Instance.Name,
v1alpha5.LabelCapacityType: testZone2Instance.Offerings[0].CapacityType,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{v1.ResourceCPU: resource.MustParse("1")},
},
})
zone3Machine, zone3Node = test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelTopologyZone: "test-zone-3",
v1.LabelInstanceTypeStable: testZone3Instance.Name,
v1alpha5.LabelCapacityType: testZone1Instance.Offerings[0].CapacityType,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{v1.ResourceCPU: resource.MustParse("1")},
},
})
oldMachineNames = sets.NewString(zone1Machine.Name, zone2Machine.Name, zone3Machine.Name)
})
It("can replace node maintaining zonal topology spread", func() {
labels := map[string]string{
"app": "test-zonal-spread",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
tsc := v1.TopologySpreadConstraint{
MaxSkew: 1,
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
}
pods := test.Pods(4, test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: map[v1.ResourceName]resource.Quantity{v1.ResourceCPU: resource.MustParse("1")}},
TopologySpreadConstraints: []v1.TopologySpreadConstraint{tsc},
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
}}})
ExpectApplied(ctx, env.Client, rs, pods[0], pods[1], pods[2], zone1Machine, zone1Node, zone2Machine, zone2Node, zone3Machine, zone3Node, prov)
// bind pods to nodes
ExpectManualBinding(ctx, env.Client, pods[0], zone1Node)
ExpectManualBinding(ctx, env.Client, pods[1], zone2Node)
ExpectManualBinding(ctx, env.Client, pods[2], zone3Node)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{zone1Node, zone2Node, zone3Node}, []*v1alpha5.Machine{zone1Machine, zone2Machine, zone3Machine})
ExpectSkew(ctx, env.Client, "default", &tsc).To(ConsistOf(1, 1, 1))
fakeClock.Step(10 * time.Minute)
// consolidation won't delete the old node until the new node is ready
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectMakeNewMachinesReady(ctx, env.Client, &wg, cluster, cloudProvider, 1)
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
wg.Wait()
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, zone2Machine)
// should create a new node as there is a cheaper one that can hold the pod
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(3))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(3))
ExpectNotFound(ctx, env.Client, zone2Machine, zone2Node)
// Find the new node associated with the machine
newMachine, ok := lo.Find(ExpectMachines(ctx, env.Client), func(m *v1alpha5.Machine) bool {
return !oldMachineNames.Has(m.Name)
})
Expect(ok).To(BeTrue())
newNode, ok := lo.Find(ExpectNodes(ctx, env.Client), func(n *v1.Node) bool {
return newMachine.Status.ProviderID == n.Spec.ProviderID
})
Expect(ok).To(BeTrue())
// we need to emulate the replicaset controller and bind a new pod to the newly created node
ExpectApplied(ctx, env.Client, pods[3])
ExpectManualBinding(ctx, env.Client, pods[3], newNode)
// we should maintain our skew, the new node must be in the same zone as the old node it replaced
ExpectSkew(ctx, env.Client, "default", &tsc).To(ConsistOf(1, 1, 1))
})
It("won't delete node if it would violate pod anti-affinity", func() {
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
pods := test.Pods(3, test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: map[v1.ResourceName]resource.Quantity{v1.ResourceCPU: resource.MustParse("1")}},
PodAntiRequirements: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
TopologyKey: v1.LabelHostname,
},
},
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
},
},
})
// Make the Zone 2 instance also the least expensive instance
zone2Instance := leastExpensiveInstanceWithZone("test-zone-2")
zone2Node.Labels = lo.Assign(zone2Node.Labels, map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelTopologyZone: "test-zone-2",
v1.LabelInstanceTypeStable: zone2Instance.Name,
v1alpha5.LabelCapacityType: zone2Instance.Offerings[0].CapacityType,
})
zone2Machine.Labels = lo.Assign(zone2Machine.Labels, map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelTopologyZone: "test-zone-2",
v1.LabelInstanceTypeStable: zone2Instance.Name,
v1alpha5.LabelCapacityType: zone2Instance.Offerings[0].CapacityType,
})
ExpectApplied(ctx, env.Client, rs, pods[0], pods[1], pods[2], zone1Machine, zone1Node, zone2Machine, zone2Node, zone3Machine, zone3Node, prov)
// bind pods to nodes
ExpectManualBinding(ctx, env.Client, pods[0], zone1Node)
ExpectManualBinding(ctx, env.Client, pods[1], zone2Node)
ExpectManualBinding(ctx, env.Client, pods[2], zone3Node)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{zone1Node, zone2Node, zone3Node}, []*v1alpha5.Machine{zone1Machine, zone2Machine, zone3Machine})
fakeClock.Step(10 * time.Minute)
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
wg.Wait()
// our nodes are already the cheapest available, so we can't replace them. If we delete, it would
// violate the anti-affinity rule, so we can't do anything.
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(3))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(3))
ExpectExists(ctx, env.Client, zone1Machine)
ExpectExists(ctx, env.Client, zone2Machine)
ExpectExists(ctx, env.Client, zone3Machine)
})
})
var _ = Describe("Empty Nodes", func() {
var prov *v1alpha5.Provisioner
var machine1, machine2 *v1alpha5.Machine
var node1, node2 *v1.Node
BeforeEach(func() {
prov = test.Provisioner(test.ProvisionerOptions{Consolidation: &v1alpha5.Consolidation{Enabled: ptr.Bool(true)}})
machine1, node1 = test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: mostExpensiveInstance.Name,
v1alpha5.LabelCapacityType: mostExpensiveOffering.CapacityType,
v1.LabelTopologyZone: mostExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("32"),
v1.ResourcePods: resource.MustParse("100"),
},
},
})
machine2, node2 = test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: mostExpensiveInstance.Name,
v1alpha5.LabelCapacityType: mostExpensiveOffering.CapacityType,
v1.LabelTopologyZone: mostExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("32"),
v1.ResourcePods: resource.MustParse("100"),
},
},
})
})
It("can delete empty nodes with consolidation", func() {
ExpectApplied(ctx, env.Client, machine1, node1, prov)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node1}, []*v1alpha5.Machine{machine1})
fakeClock.Step(10 * time.Minute)
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
wg.Wait()
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, machine1)
// we should delete the empty node
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(0))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(0))
ExpectNotFound(ctx, env.Client, machine1, node1)
})
It("can delete multiple empty nodes with consolidation", func() {
ExpectApplied(ctx, env.Client, machine1, node1, machine2, node2, prov)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node1, node2}, []*v1alpha5.Machine{machine1, machine2})
fakeClock.Step(10 * time.Minute)
wg := sync.WaitGroup{}
ExpectTriggerVerifyAction(&wg)
ExpectReconcileSucceeded(ctx, deprovisioningController, types.NamespacedName{})
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, machine1, machine2)
// we should delete the empty nodes
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(0))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(0))
ExpectNotFound(ctx, env.Client, machine1)
ExpectNotFound(ctx, env.Client, machine2)
})
It("can delete empty nodes with TTLSecondsAfterEmpty with the emptiness timestamp", func() {
prov = test.Provisioner(test.ProvisionerOptions{TTLSecondsAfterEmpty: ptr.Int64(10)})
// Update the machine and node to be "owned" by the new provisioner and the node
// to be marked as empty
machine1.Labels = lo.Assign(machine1.Labels, map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
})
node1.Labels = lo.Assign(node1.Labels, map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
})
node1.Annotations = lo.Assign(node1.Annotations, map[string]string{
v1alpha5.EmptinessTimestampAnnotationKey: fakeClock.Now().Format(time.RFC3339),
})
ExpectApplied(ctx, env.Client, prov, machine1, node1)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node1}, []*v1alpha5.Machine{machine1})
fakeClock.Step(10 * time.Minute)
wg := sync.WaitGroup{}
ExpectTriggerVerifyAction(&wg)
ExpectReconcileSucceeded(ctx, deprovisioningController, types.NamespacedName{})
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, machine1)
// we should delete the empty node
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(0))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(0))
ExpectNotFound(ctx, env.Client, machine1, node1)
})
It("considers pending pods when consolidating", func() {
machine1, node1 = test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: mostExpensiveInstance.Name,
v1alpha5.LabelCapacityType: mostExpensiveOffering.CapacityType,
v1.LabelTopologyZone: mostExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("128"),
v1.ResourcePods: resource.MustParse("100"),
},
},
})
// there is a pending pod that should land on the node
pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1"),
},
},
})
unsched := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("125"),
},
},
})
ExpectApplied(ctx, env.Client, machine1, node1, pod, unsched, prov)
// bind one of the pods to the node
ExpectManualBinding(ctx, env.Client, pod, node1)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node1}, []*v1alpha5.Machine{machine1})
fakeClock.Step(10 * time.Minute)
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
wg.Wait()
// we don't need any new nodes and consolidation should notice the huge pending pod that needs the large
// node to schedule, which prevents the large expensive node from being replaced
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(1))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(1))
ExpectExists(ctx, env.Client, machine1)
})
})
var _ = Describe("Consolidation TTL", func() {
var prov *v1alpha5.Provisioner
var machine1, machine2 *v1alpha5.Machine
var node1, node2 *v1.Node
BeforeEach(func() {
prov = test.Provisioner(test.ProvisionerOptions{Consolidation: &v1alpha5.Consolidation{Enabled: ptr.Bool(true)}})
machine1, node1 = test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: mostExpensiveInstance.Name,
v1alpha5.LabelCapacityType: mostExpensiveOffering.CapacityType,
v1.LabelTopologyZone: mostExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("32"),
v1.ResourcePods: resource.MustParse("100"),
},
},
})
machine2, node2 = test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: mostExpensiveInstance.Name,
v1alpha5.LabelCapacityType: mostExpensiveOffering.CapacityType,
v1.LabelTopologyZone: mostExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("32"),
v1.ResourcePods: resource.MustParse("100"),
},
},
})
})
It("should not deprovision nodes that receive blocking pods during the TTL", func() {
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
Expect(env.Client.Get(ctx, client.ObjectKeyFromObject(rs), rs)).To(Succeed())
pod := test.Pod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
},
},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
},
}})
noEvictPod := test.Pod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
Annotations: map[string]string{v1alpha5.DoNotEvictPodAnnotationKey: "true"},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
},
},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
},
}})
ExpectApplied(ctx, env.Client, machine1, node1, prov, pod, noEvictPod)
ExpectManualBinding(ctx, env.Client, pod, node1)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node1}, []*v1alpha5.Machine{machine1})
var wg sync.WaitGroup
wg.Add(1)
finished := atomic.Bool{}
go func() {
defer GinkgoRecover()
defer wg.Done()
defer finished.Store(true)
_, err := deprovisioningController.Reconcile(ctx, reconcile.Request{})
Expect(err).To(HaveOccurred())
}()
// wait for the deprovisioningController to block on the validation timeout
Eventually(fakeClock.HasWaiters, time.Second*10).Should(BeTrue())
// controller should be blocking during the timeout
Expect(finished.Load()).To(BeFalse())
// and the node should not be deleted yet
ExpectExists(ctx, env.Client, node1)
// make the node non-empty by binding it
ExpectManualBinding(ctx, env.Client, noEvictPod, node1)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1))
// advance the clock so that the timeout expires
fakeClock.Step(31 * time.Second)
// controller should finish
Eventually(finished.Load, 10*time.Second).Should(BeTrue())
wg.Wait()
// nothing should be removed since the node is no longer empty
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(1))
ExpectExists(ctx, env.Client, node1)
})
It("should wait for the node TTL for empty nodes before consolidating", func() {
ExpectApplied(ctx, env.Client, machine1, node1, prov)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node1}, []*v1alpha5.Machine{machine1})
var wg sync.WaitGroup
wg.Add(1)
finished := atomic.Bool{}
go func() {
defer GinkgoRecover()
defer wg.Done()
defer finished.Store(true)
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
}()
// wait for the controller to block on the validation timeout
Eventually(fakeClock.HasWaiters, time.Second*10).Should(BeTrue())
// controller should be blocking during the timeout
Expect(finished.Load()).To(BeFalse())
// and the node should not be deleted yet
ExpectExists(ctx, env.Client, machine1)
// advance the clock so that the timeout expires
fakeClock.Step(31 * time.Second)
// controller should finish
Eventually(finished.Load, 10*time.Second).Should(BeTrue())
wg.Wait()
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, machine1)
// machine should be deleted after the TTL due to emptiness
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(0))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(0))
ExpectNotFound(ctx, env.Client, machine1, node1)
})
It("should wait for the node TTL for non-empty nodes before consolidating", func() {
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
Expect(env.Client.Get(ctx, client.ObjectKeyFromObject(rs), rs)).To(Succeed())
// assign the machines to the least expensive offering so only one of them gets deleted
machine1.Labels = lo.Assign(machine1.Labels, map[string]string{
v1.LabelInstanceTypeStable: leastExpensiveInstance.Name,
v1alpha5.LabelCapacityType: leastExpensiveOffering.CapacityType,
v1.LabelTopologyZone: leastExpensiveOffering.Zone,
})
node1.Labels = lo.Assign(node1.Labels, map[string]string{
v1.LabelInstanceTypeStable: leastExpensiveInstance.Name,
v1alpha5.LabelCapacityType: leastExpensiveOffering.CapacityType,
v1.LabelTopologyZone: leastExpensiveOffering.Zone,
})
machine2.Labels = lo.Assign(machine2.Labels, map[string]string{
v1.LabelInstanceTypeStable: leastExpensiveInstance.Name,
v1alpha5.LabelCapacityType: leastExpensiveOffering.CapacityType,
v1.LabelTopologyZone: leastExpensiveOffering.Zone,
})
node2.Labels = lo.Assign(node2.Labels, map[string]string{
v1.LabelInstanceTypeStable: leastExpensiveInstance.Name,
v1alpha5.LabelCapacityType: leastExpensiveOffering.CapacityType,
v1.LabelTopologyZone: leastExpensiveOffering.Zone,
})
pods := test.Pods(3, test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
}}})
ExpectApplied(ctx, env.Client, rs, pods[0], pods[1], pods[2], machine1, node1, machine2, node2, prov)
// bind pods to nodes
ExpectManualBinding(ctx, env.Client, pods[0], node1)
ExpectManualBinding(ctx, env.Client, pods[1], node1)
ExpectManualBinding(ctx, env.Client, pods[2], node2)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node1, node2}, []*v1alpha5.Machine{machine1, machine2})
var wg sync.WaitGroup
wg.Add(1)
finished := atomic.Bool{}
go func() {
defer wg.Done()
defer finished.Store(true)
ExpectReconcileSucceeded(ctx, deprovisioningController, types.NamespacedName{})
}()
// wait for the controller to block on the validation timeout
Eventually(fakeClock.HasWaiters, time.Second*10).Should(BeTrue())
// controller should be blocking during the timeout
Expect(finished.Load()).To(BeFalse())
// and the node should not be deleted yet
ExpectExists(ctx, env.Client, machine1)
ExpectExists(ctx, env.Client, machine2)
// advance the clock so that the timeout expires
fakeClock.Step(31 * time.Second)
// controller should finish
Eventually(finished.Load, 10*time.Second).Should(BeTrue())
wg.Wait()
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, machine2)
// machine should be deleted after the TTL due to emptiness
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(1))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(1))
ExpectNotFound(ctx, env.Client, machine2, node2)
})
It("should not consolidate if the action becomes invalid during the node TTL wait", func() {
pod := test.Pod()
ExpectApplied(ctx, env.Client, machine1, node1, prov, pod)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node1}, []*v1alpha5.Machine{machine1})
var wg sync.WaitGroup
wg.Add(1)
finished := atomic.Bool{}
go func() {
defer GinkgoRecover()
defer wg.Done()
defer finished.Store(true)
_, err := deprovisioningController.Reconcile(ctx, reconcile.Request{})
Expect(err).To(HaveOccurred())
}()
// wait for the deprovisioningController to block on the validation timeout
Eventually(fakeClock.HasWaiters, time.Second*10).Should(BeTrue())
// controller should be blocking during the timeout
Expect(finished.Load()).To(BeFalse())
// and the node should not be deleted yet
ExpectExists(ctx, env.Client, machine1)
// make the node non-empty by binding it
ExpectManualBinding(ctx, env.Client, pod, node1)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1))
// advance the clock so that the timeout expires
fakeClock.Step(31 * time.Second)
// controller should finish
Eventually(finished.Load, 10*time.Second).Should(BeTrue())
wg.Wait()
// nothing should be removed since the node is no longer empty
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(1))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(1))
ExpectExists(ctx, env.Client, machine1)
})
})
var _ = Describe("Parallelization", func() {
var prov *v1alpha5.Provisioner
var machine *v1alpha5.Machine
var node *v1.Node
BeforeEach(func() {
prov = test.Provisioner(test.ProvisionerOptions{Consolidation: &v1alpha5.Consolidation{Enabled: ptr.Bool(true)}})
machine, node = test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: mostExpensiveInstance.Name,
v1alpha5.LabelCapacityType: mostExpensiveOffering.CapacityType,
v1.LabelTopologyZone: mostExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("32"),
v1.ResourcePods: resource.MustParse("100"),
},
},
})
})
It("should schedule an additional node when receiving pending pods while consolidating", func() {
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
pod := test.Pod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
},
},
})
node.Finalizers = []string{"karpenter.sh/test-finalizer"}
machine.Finalizers = []string{"karpenter.sh/test-finalizer"}
ExpectApplied(ctx, env.Client, rs, pod, machine, node, prov)
// bind pods to node
ExpectManualBinding(ctx, env.Client, pod, node)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node}, []*v1alpha5.Machine{machine})
fakeClock.Step(10 * time.Minute)
// Run the processing loop in parallel in the background with environment context
var wg sync.WaitGroup
ExpectMakeNewMachinesReady(ctx, env.Client, &wg, cluster, cloudProvider, 1)
ExpectTriggerVerifyAction(&wg)
go func() {
defer GinkgoRecover()
_, _ = deprovisioningController.Reconcile(ctx, reconcile.Request{})
}()
wg.Wait()
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(2))
// Add a new pending pod that should schedule while node is not yet deleted
pod = test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, provisioner, pod)
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(2))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(2))
ExpectScheduled(ctx, env.Client, pod)
})
It("should not consolidate a node that is launched for pods on a deleting node", func() {
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
prov := test.Provisioner(test.ProvisionerOptions{
Consolidation: &v1alpha5.Consolidation{Enabled: ptr.Bool(true)},
})
podOpts := test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
},
},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
},
},
}
var pods []*v1.Pod
for i := 0; i < 5; i++ {
pod := test.UnschedulablePod(podOpts)
pods = append(pods, pod)
}
ExpectApplied(ctx, env.Client, rs, prov)
ExpectProvisionedNoBinding(ctx, env.Client, cluster, cloudProvider, provisioner, lo.Map(pods, func(p *v1.Pod, _ int) *v1.Pod { return p.DeepCopy() })...)
machines := ExpectMachines(ctx, env.Client)
Expect(machines).To(HaveLen(1))
nodes := ExpectNodes(ctx, env.Client)
Expect(nodes).To(HaveLen(1))
// Update cluster state with new node
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(nodes[0]))
// Mark the node for deletion and re-trigger reconciliation
oldNodeName := nodes[0].Name
cluster.MarkForDeletion(nodes[0].Name)
ExpectProvisionedNoBinding(ctx, env.Client, cluster, cloudProvider, provisioner, lo.Map(pods, func(p *v1.Pod, _ int) *v1.Pod { return p.DeepCopy() })...)
// Make sure that the cluster state is aware of the current node state
nodes = ExpectNodes(ctx, env.Client)
Expect(nodes).To(HaveLen(2))
newNode, _ := lo.Find(nodes, func(n *v1.Node) bool { return n.Name != oldNodeName })
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, nodes, nil)
// Wait for the nomination cache to expire
time.Sleep(time.Second * 11)
// Re-create the pods to re-bind them
for i := 0; i < 2; i++ {
ExpectDeleted(ctx, env.Client, pods[i])
pod := test.UnschedulablePod(podOpts)
ExpectApplied(ctx, env.Client, pod)
ExpectManualBinding(ctx, env.Client, pod, newNode)
}
// Trigger a reconciliation run which should take into account the deleting node
// consolidation shouldn't trigger additional actions
fakeClock.Step(10 * time.Minute)
result, err := deprovisioningController.Reconcile(ctx, reconcile.Request{})
Expect(err).ToNot(HaveOccurred())
Expect(result.RequeueAfter).To(BeNumerically(">", 0))
})
})
var _ = Describe("Multi-Node Consolidation", func() {
var prov *v1alpha5.Provisioner
var machine1, machine2, machine3 *v1alpha5.Machine
var node1, node2, node3 *v1.Node
BeforeEach(func() {
prov = test.Provisioner(test.ProvisionerOptions{Consolidation: &v1alpha5.Consolidation{Enabled: ptr.Bool(true)}})
machine1, node1 = test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: mostExpensiveInstance.Name,
v1alpha5.LabelCapacityType: mostExpensiveOffering.CapacityType,
v1.LabelTopologyZone: mostExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("32"),
v1.ResourcePods: resource.MustParse("100"),
},
},
})
machine2, node2 = test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: mostExpensiveInstance.Name,
v1alpha5.LabelCapacityType: mostExpensiveOffering.CapacityType,
v1.LabelTopologyZone: mostExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("32"),
v1.ResourcePods: resource.MustParse("100"),
},
},
})
machine3, node3 = test.MachineAndNode(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: prov.Name,
v1.LabelInstanceTypeStable: mostExpensiveInstance.Name,
v1alpha5.LabelCapacityType: mostExpensiveOffering.CapacityType,
v1.LabelTopologyZone: mostExpensiveOffering.Zone,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("32"),
v1.ResourcePods: resource.MustParse("100"),
},
},
})
})
It("can merge 3 nodes into 1", func() {
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
pods := test.Pods(3, test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
}}})
ExpectApplied(ctx, env.Client, rs, pods[0], pods[1], pods[2], machine1, node1, machine2, node2, machine3, node3, prov)
ExpectMakeNodesInitialized(ctx, env.Client, node1, node2, node3)
// bind pods to nodes
ExpectManualBinding(ctx, env.Client, pods[0], node1)
ExpectManualBinding(ctx, env.Client, pods[1], node2)
ExpectManualBinding(ctx, env.Client, pods[2], node3)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node1, node2, node3}, []*v1alpha5.Machine{machine1, machine2, machine3})
fakeClock.Step(10 * time.Minute)
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectMakeNewMachinesReady(ctx, env.Client, &wg, cluster, cloudProvider, 1)
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
wg.Wait()
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, machine1, machine2, machine3)
// three machines should be replaced with a single machine
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(1))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(1))
ExpectNotFound(ctx, env.Client, machine1, node1, machine2, node2, machine3, node3)
})
It("won't merge 2 nodes into 1 of the same type", func() {
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
pods := test.Pods(3, test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
}}})
// Make the machines the least expensive instance type and make them of the same type
machine1.Labels = lo.Assign(machine1.Labels, map[string]string{
v1.LabelInstanceTypeStable: leastExpensiveInstance.Name,
v1alpha5.LabelCapacityType: leastExpensiveOffering.CapacityType,
v1.LabelTopologyZone: leastExpensiveOffering.Zone,
})
node1.Labels = lo.Assign(node1.Labels, map[string]string{
v1.LabelInstanceTypeStable: leastExpensiveInstance.Name,
v1alpha5.LabelCapacityType: leastExpensiveOffering.CapacityType,
v1.LabelTopologyZone: leastExpensiveOffering.Zone,
})
machine2.Labels = lo.Assign(machine1.Labels, map[string]string{
v1.LabelInstanceTypeStable: leastExpensiveInstance.Name,
v1alpha5.LabelCapacityType: leastExpensiveOffering.CapacityType,
v1.LabelTopologyZone: leastExpensiveOffering.Zone,
})
node2.Labels = lo.Assign(node2.Labels, map[string]string{
v1.LabelInstanceTypeStable: leastExpensiveInstance.Name,
v1alpha5.LabelCapacityType: leastExpensiveOffering.CapacityType,
v1.LabelTopologyZone: leastExpensiveOffering.Zone,
})
ExpectApplied(ctx, env.Client, rs, pods[0], pods[1], pods[2], machine1, node1, machine2, node2, prov)
ExpectMakeNodesInitialized(ctx, env.Client, node1, node2)
// bind pods to nodes
ExpectManualBinding(ctx, env.Client, pods[0], node1)
ExpectManualBinding(ctx, env.Client, pods[1], node2)
ExpectManualBinding(ctx, env.Client, pods[2], node2)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node1, node2}, []*v1alpha5.Machine{machine1, machine2})
fakeClock.Step(10 * time.Minute)
var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
wg.Wait()
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, machine1)
// We have [cheap-node, cheap-node] which multi-node consolidation could consolidate via
// [delete cheap-node, delete cheap-node, launch cheap-node]. This isn't the best method though
// as we should instead just delete one of the nodes instead of deleting both and launching a single
// identical replacement. This test verifies the filterOutSameType function from multi-node consolidation
// works to ensure we perform the least-disruptive action.
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(1))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(1))
// should have just deleted the node with the fewest pods
ExpectNotFound(ctx, env.Client, machine1, node1)
// and left the other node alone
ExpectExists(ctx, env.Client, machine2)
ExpectExists(ctx, env.Client, node2)
})
It("should wait for the node TTL for non-empty nodes before consolidating (multi-node)", func() {
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
pods := test.Pods(3, test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
}}})
ExpectApplied(ctx, env.Client, rs, pods[0], pods[1], pods[2], machine1, node1, machine2, node2, prov)
// bind pods to nodes
ExpectManualBinding(ctx, env.Client, pods[0], node1)
ExpectManualBinding(ctx, env.Client, pods[1], node1)
ExpectManualBinding(ctx, env.Client, pods[2], node2)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node1, node2}, []*v1alpha5.Machine{machine1, machine2})
var wg sync.WaitGroup
ExpectMakeNewMachinesReady(ctx, env.Client, &wg, cluster, cloudProvider, 1)
wg.Add(1)
finished := atomic.Bool{}
go func() {
defer GinkgoRecover()
defer wg.Done()
defer finished.Store(true)
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
}()
// wait for the controller to block on the validation timeout
Eventually(fakeClock.HasWaiters, time.Second*5).Should(BeTrue())
// controller should be blocking during the timeout
Expect(finished.Load()).To(BeFalse())
// and the node should not be deleted yet
ExpectExists(ctx, env.Client, machine1)
ExpectExists(ctx, env.Client, machine2)
// advance the clock so that the timeout expires
fakeClock.Step(31 * time.Second)
// controller should finish
Eventually(finished.Load, 10*time.Second).Should(BeTrue())
wg.Wait()
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, machine1, machine2)
// should launch a single smaller replacement node
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(1))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(1))
// and delete the two large ones
ExpectNotFound(ctx, env.Client, machine1, node1, machine2, node2)
})
It("should continue to single machine consolidation when multi-machine consolidation fails validation after the node ttl", func() {
labels := map[string]string{
"app": "test",
}
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
pods := test.Pods(3, test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
}}})
ExpectApplied(ctx, env.Client, rs, pods[0], pods[1], pods[2], machine1, node1, machine2, node2, machine3, node3, prov)
// bind pods to nodes
ExpectManualBinding(ctx, env.Client, pods[0], node1)
ExpectManualBinding(ctx, env.Client, pods[1], node2)
ExpectManualBinding(ctx, env.Client, pods[2], node3)
// inform cluster state about nodes and machines
ExpectMakeInitializedAndStateUpdated(ctx, env.Client, nodeStateController, machineStateController, []*v1.Node{node1, node2, node3}, []*v1alpha5.Machine{machine1, machine2, machine3})
var wg sync.WaitGroup
wg.Add(1)
finished := atomic.Bool{}
go func() {
defer GinkgoRecover()
defer wg.Done()
defer finished.Store(true)
ExpectReconcileSucceeded(ctx, deprovisioningController, client.ObjectKey{})
}()
// wait for the controller to block on the validation timeout
Eventually(fakeClock.HasWaiters, time.Second*5).Should(BeTrue())
// controller should be blocking during the timeout
Expect(finished.Load()).To(BeFalse())
// and the node should not be deleted yet
ExpectExists(ctx, env.Client, machine1)
ExpectExists(ctx, env.Client, machine2)
ExpectExists(ctx, env.Client, machine3)
var extraPods []*v1.Pod
for i := 0; i < 2; i++ {
extraPods = append(extraPods, test.Pod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewQuantity(1, resource.DecimalSI)},
},
}))
}
ExpectApplied(ctx, env.Client, extraPods[0], extraPods[1])
// bind the extra pods to node1 and node 2 to make the consolidation decision invalid
// we bind to 2 nodes so we can deterministically expect that node3 is consolidated in
// single machine consolidation
ExpectManualBinding(ctx, env.Client, extraPods[0], node1)
ExpectManualBinding(ctx, env.Client, extraPods[1], node2)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1))
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node2))
// advance the clock so that the timeout expires for multi-machine consolidation
fakeClock.Step(31 * time.Second)
// wait for the controller to block on the validation timeout for single machine consolidation
Eventually(fakeClock.HasWaiters, time.Second*5).Should(BeTrue())
// advance the clock so that the timeout expires for single machine consolidation
fakeClock.Step(31 * time.Second)
// controller should finish
Eventually(finished.Load, 10*time.Second).Should(BeTrue())
wg.Wait()
// Cascade any deletion of the machine to the node
ExpectMachinesCascadeDeletion(ctx, env.Client, machine1, machine2, machine3)
// should have 2 nodes after single machine consolidation deletes one
Expect(ExpectMachines(ctx, env.Client)).To(HaveLen(2))
Expect(ExpectNodes(ctx, env.Client)).To(HaveLen(2))
// and delete node3 in single machine consolidation
ExpectNotFound(ctx, env.Client, machine3, node3)
})
})
func leastExpensiveInstanceWithZone(zone string) *cloudprovider.InstanceType {
for _, elem := range onDemandInstances {
if hasZone(elem.Offerings, zone) {
return elem
}
}
return onDemandInstances[len(onDemandInstances)-1]
}
func mostExpensiveInstanceWithZone(zone string) *cloudprovider.InstanceType {
for i := len(onDemandInstances) - 1; i >= 0; i-- {
elem := onDemandInstances[i]
if hasZone(elem.Offerings, zone) {
return elem
}
}
return onDemandInstances[0]
}
// hasZone checks whether any of the passed offerings have a zone matching
// the passed zone
func hasZone(ofs []cloudprovider.Offering, zone string) bool {
for _, elem := range ofs {
if elem.Zone == zone {
return true
}
}
return false
}
func fromInt(i int) *intstr.IntOrString {
v := intstr.FromInt(i)
return &v
}
func ExpectTriggerVerifyAction(wg *sync.WaitGroup) {
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 10; i++ {
time.Sleep(250 * time.Millisecond)
if fakeClock.HasWaiters() {
break
}
}
fakeClock.Step(45 * time.Second)
}()
}
// ExpectNewMachinesDeleted simulates the machines being created and then removed, similar to what would happen
// during an ICE error on the created machine
func ExpectNewMachinesDeleted(ctx context.Context, c client.Client, wg *sync.WaitGroup, numNewMachines int) {
existingMachines := ExpectMachines(ctx, c)
existingMachineNames := sets.NewString(lo.Map(existingMachines, func(m *v1alpha5.Machine, _ int) string {
return m.Name
})...)
wg.Add(1)
go func() {
machinesDeleted := 0
ctx, cancel := context.WithTimeout(ctx, time.Second*30) // give up after 30s
defer GinkgoRecover()
defer wg.Done()
defer cancel()
for {
select {
case <-time.After(50 * time.Millisecond):
machineList := &v1alpha5.MachineList{}
if err := c.List(ctx, machineList); err != nil {
continue
}
for i := range machineList.Items {
m := &machineList.Items[i]
if existingMachineNames.Has(m.Name) {
continue
}
ExpectWithOffset(1, client.IgnoreNotFound(c.Delete(ctx, m))).To(Succeed())
machinesDeleted++
if machinesDeleted == numNewMachines {
return
}
}
case <-ctx.Done():
Fail(fmt.Sprintf("waiting for machines to be deleted, %s", ctx.Err()))
}
}
}()
}
func ExpectMakeNewMachinesReady(ctx context.Context, c client.Client, wg *sync.WaitGroup, cluster *state.Cluster,
cloudProvider cloudprovider.CloudProvider, numNewMachines int) {
existingMachines := ExpectMachines(ctx, c)
existingMachineNames := sets.NewString(lo.Map(existingMachines, func(m *v1alpha5.Machine, _ int) string {
return m.Name
})...)
wg.Add(1)
go func() {
machinesMadeReady := 0
ctx, cancel := context.WithTimeout(ctx, time.Second*10) // give up after 10s
defer GinkgoRecover()
defer wg.Done()
defer cancel()
for {
select {
case <-time.After(50 * time.Millisecond):
machineList := &v1alpha5.MachineList{}
if err := c.List(ctx, machineList); err != nil {
continue
}
for i := range machineList.Items {
m := &machineList.Items[i]
if existingMachineNames.Has(m.Name) {
continue
}
m, n := ExpectMachineDeployedWithOffset(1, ctx, c, cluster, cloudProvider, m)
ExpectMakeMachinesInitializedWithOffset(1, ctx, c, m)
ExpectMakeNodesInitializedWithOffset(1, ctx, c, n)
machinesMadeReady++
existingMachineNames.Insert(m.Name)
// did we make all the nodes ready that we expected?
if machinesMadeReady == numNewMachines {
return
}
}
case <-ctx.Done():
Fail(fmt.Sprintf("waiting for machines to be ready, %s", ctx.Err()))
}
}
}()
}
func ExpectMakeInitializedAndStateUpdated(ctx context.Context, c client.Client, nodeStateController, machineStateController controller.Controller, nodes []*v1.Node, machines []*v1alpha5.Machine) {
ExpectMakeNodesInitializedWithOffset(1, ctx, c, nodes...)
ExpectMakeMachinesInitializedWithOffset(1, ctx, c, machines...)
// Inform cluster state about node and machine readiness
for _, n := range nodes {
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(n))
}
for _, m := range machines {
ExpectReconcileSucceeded(ctx, machineStateController, client.ObjectKeyFromObject(m))
}
}
// cheapestOffering grabs the cheapest offering from the passed offerings
func cheapestOffering(ofs []cloudprovider.Offering) cloudprovider.Offering {
offering := cloudprovider.Offering{Price: math.MaxFloat64}
for _, of := range ofs {
if of.Price < offering.Price {
offering = of
}
}
return offering
}
| 2,721 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deprovisioning
import (
"bytes"
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/utils/clock"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider"
deprovisioningevents "github.com/aws/karpenter-core/pkg/controllers/deprovisioning/events"
"github.com/aws/karpenter-core/pkg/controllers/provisioning/scheduling"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/events"
)
type Deprovisioner interface {
ShouldDeprovision(context.Context, *Candidate) bool
ComputeCommand(context.Context, ...*Candidate) (Command, error)
String() string
}
type CandidateFilter func(context.Context, *Candidate) bool
// Candidate is a state.StateNode that we are considering for deprovisioning along with extra information to be used in
// making that determination
type Candidate struct {
*state.StateNode
instanceType *cloudprovider.InstanceType
provisioner *v1alpha5.Provisioner
zone string
capacityType string
disruptionCost float64
pods []*v1.Pod
}
//nolint:gocyclo
func NewCandidate(ctx context.Context, kubeClient client.Client, recorder events.Recorder, clk clock.Clock, node *state.StateNode,
provisionerMap map[string]*v1alpha5.Provisioner, provisionerToInstanceTypes map[string]map[string]*cloudprovider.InstanceType) (*Candidate, error) {
if node.Node == nil || node.Machine == nil {
return nil, fmt.Errorf("state node doesn't contain both a node and a machine")
}
// check whether the node has all the labels we need
for _, label := range []string{
v1alpha5.LabelCapacityType,
v1.LabelTopologyZone,
v1alpha5.ProvisionerNameLabelKey,
} {
if _, ok := node.Labels()[label]; !ok {
// This means that we don't own the candidate which means we shouldn't fire an event for it
if label != v1alpha5.ProvisionerNameLabelKey {
recorder.Publish(deprovisioningevents.Blocked(node.Node, node.Machine, fmt.Sprintf("required label %q doesn't exist", label))...)
}
return nil, fmt.Errorf("state node doesn't have required label '%s'", label)
}
}
provisioner := provisionerMap[node.Labels()[v1alpha5.ProvisionerNameLabelKey]]
instanceTypeMap := provisionerToInstanceTypes[node.Labels()[v1alpha5.ProvisionerNameLabelKey]]
// skip any nodes where we can't determine the provisioner
if provisioner == nil || instanceTypeMap == nil {
recorder.Publish(deprovisioningevents.Blocked(node.Node, node.Machine, fmt.Sprintf("owning provisioner %q not found", node.Labels()[v1alpha5.ProvisionerNameLabelKey]))...)
return nil, fmt.Errorf("provisioner '%s' can't be resolved for state node", node.Labels()[v1alpha5.ProvisionerNameLabelKey])
}
instanceType := instanceTypeMap[node.Labels()[v1.LabelInstanceTypeStable]]
// skip any nodes that we can't determine the instance of
if instanceType == nil {
recorder.Publish(deprovisioningevents.Blocked(node.Node, node.Machine, fmt.Sprintf("instance type %q not found", node.Labels()[v1.LabelInstanceTypeStable]))...)
return nil, fmt.Errorf("instance type '%s' can't be resolved", node.Labels()[v1.LabelInstanceTypeStable])
}
// skip any nodes that are already marked for deletion and being handled
if node.MarkedForDeletion() {
recorder.Publish(deprovisioningevents.Blocked(node.Node, node.Machine, "machine is marked for deletion")...)
return nil, fmt.Errorf("state node is marked for deletion")
}
// skip nodes that aren't initialized
// This also means that the real Node doesn't exist for it
if !node.Initialized() {
recorder.Publish(deprovisioningevents.Blocked(node.Node, node.Machine, "machine is not initialized")...)
return nil, fmt.Errorf("state node isn't initialized")
}
// skip the node if it is nominated by a recent provisioning pass to be the target of a pending pod.
if node.Nominated() {
recorder.Publish(deprovisioningevents.Blocked(node.Node, node.Machine, "machine is nominated")...)
return nil, fmt.Errorf("state node is nominated")
}
pods, err := node.Pods(ctx, kubeClient)
if err != nil {
logging.FromContext(ctx).Errorf("Determining node pods, %s", err)
return nil, fmt.Errorf("getting pods from state node, %w", err)
}
cn := &Candidate{
StateNode: node.DeepCopy(),
instanceType: instanceType,
provisioner: provisioner,
capacityType: node.Labels()[v1alpha5.LabelCapacityType],
zone: node.Labels()[v1.LabelTopologyZone],
pods: pods,
}
cn.disruptionCost = disruptionCost(ctx, pods) * cn.lifetimeRemaining(clk)
return cn, nil
}
// lifetimeRemaining calculates the fraction of node lifetime remaining in the range [0.0, 1.0]. If the TTLSecondsUntilExpired
// is non-zero, we use it to scale down the disruption costs of nodes that are going to expire. Just after creation, the
// disruption cost is highest, and it approaches zero as the node ages towards its expiration time.
func (c *Candidate) lifetimeRemaining(clock clock.Clock) float64 {
remaining := 1.0
if c.provisioner.Spec.TTLSecondsUntilExpired != nil {
ageInSeconds := clock.Since(c.Node.CreationTimestamp.Time).Seconds()
totalLifetimeSeconds := float64(*c.provisioner.Spec.TTLSecondsUntilExpired)
lifetimeRemainingSeconds := totalLifetimeSeconds - ageInSeconds
remaining = clamp(0.0, lifetimeRemainingSeconds/totalLifetimeSeconds, 1.0)
}
return remaining
}
type Command struct {
candidates []*Candidate
replacements []*scheduling.Machine
}
type Action string
var (
NoOpAction Action = "no-op"
ReplaceAction Action = "replace"
DeleteAction Action = "delete"
)
func (o Command) Action() Action {
switch {
case len(o.candidates) > 0 && len(o.replacements) > 0:
return ReplaceAction
case len(o.candidates) > 0 && len(o.replacements) == 0:
return DeleteAction
default:
return NoOpAction
}
}
func (o Command) String() string {
var buf bytes.Buffer
fmt.Fprintf(&buf, "%s, terminating %d machines ", o.Action(), len(o.candidates))
for i, old := range o.candidates {
if i != 0 {
fmt.Fprint(&buf, ", ")
}
fmt.Fprintf(&buf, "%s", old.Name())
fmt.Fprintf(&buf, "/%s", old.instanceType.Name)
fmt.Fprintf(&buf, "/%s", old.capacityType)
}
if len(o.replacements) == 0 {
return buf.String()
}
odMachines := 0
spotMachines := 0
for _, machine := range o.replacements {
ct := machine.Requirements.Get(v1alpha5.LabelCapacityType)
if ct.Has(v1alpha5.CapacityTypeOnDemand) {
odMachines++
}
if ct.Has(v1alpha5.CapacityTypeSpot) {
spotMachines++
}
}
// Print list of instance types for the first replacements.
if len(o.replacements) > 1 {
fmt.Fprintf(&buf, " and replacing with %d spot and %d on-demand machines from types %s",
spotMachines, odMachines,
scheduling.InstanceTypeList(o.replacements[0].InstanceTypeOptions))
return buf.String()
}
ct := o.replacements[0].Requirements.Get(v1alpha5.LabelCapacityType)
machineDesc := "machine"
if ct.Len() == 1 {
machineDesc = fmt.Sprintf("%s machine", ct.Any())
}
fmt.Fprintf(&buf, " and replacing with %s from types %s",
machineDesc,
scheduling.InstanceTypeList(o.replacements[0].InstanceTypeOptions))
return buf.String()
}
| 205 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deprovisioning
import (
"context"
"errors"
"fmt"
"sync"
"time"
"k8s.io/utils/clock"
"knative.dev/pkg/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/controllers/provisioning"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/events"
)
// Validation is used to perform validation on a consolidation command. It makes an assumption that when re-used, all
// of the commands passed to IsValid were constructed based off of the same consolidation state. This allows it to
// skip the validation TTL for all but the first command.
type Validation struct {
validationPeriod time.Duration
start time.Time
clock clock.Clock
cluster *state.Cluster
kubeClient client.Client
cloudProvider cloudprovider.CloudProvider
provisioner *provisioning.Provisioner
once sync.Once
recorder events.Recorder
// validationCandidates are the cached validation candidates. We capture these when validating the first command and reuse them for
// validating subsequent commands.
validationCandidates []*Candidate
}
func NewValidation(validationPeriod time.Duration, clk clock.Clock, cluster *state.Cluster, kubeClient client.Client, provisioner *provisioning.Provisioner, cp cloudprovider.CloudProvider, recorder events.Recorder) *Validation {
return &Validation{
validationPeriod: validationPeriod,
clock: clk,
cluster: cluster,
kubeClient: kubeClient,
provisioner: provisioner,
cloudProvider: cp,
recorder: recorder,
}
}
func (v *Validation) IsValid(ctx context.Context, cmd Command) (bool, error) {
var err error
v.once.Do(func() {
v.start = v.clock.Now()
})
waitDuration := v.validationPeriod - v.clock.Since(v.start)
if waitDuration > 0 {
select {
case <-ctx.Done():
return false, errors.New("context canceled")
case <-v.clock.After(waitDuration):
}
}
if len(v.validationCandidates) == 0 {
v.validationCandidates, err = GetCandidates(ctx, v.cluster, v.kubeClient, v.recorder, v.clock, v.cloudProvider, v.ShouldDeprovision)
if err != nil {
return false, fmt.Errorf("constructing validation candidates, %w", err)
}
}
nodes, err := filterCandidates(ctx, v.kubeClient, v.recorder, cmd.candidates)
if err != nil {
return false, fmt.Errorf("filtering candidates, %w", err)
}
// If we filtered out any candidates, return false as some nodes in the consolidation decision have changed.
if len(nodes) != len(cmd.candidates) {
return false, nil
}
// a candidate we are about to delete is a target of a currently pending pod, wait for that to settle
// before continuing consolidation
for _, n := range cmd.candidates {
if v.cluster.IsNodeNominated(n.Name()) {
return false, nil
}
}
isValid, err := v.ValidateCommand(ctx, cmd, v.validationCandidates)
if err != nil {
return false, fmt.Errorf("validating command, %w", err)
}
return isValid, nil
}
// ShouldDeprovision is a predicate used to filter deprovisionable nodes
func (v *Validation) ShouldDeprovision(_ context.Context, c *Candidate) bool {
if val, ok := c.Annotations()[v1alpha5.DoNotConsolidateNodeAnnotationKey]; ok {
return val != "true"
}
return c.provisioner != nil && c.provisioner.Spec.Consolidation != nil && ptr.BoolValue(c.provisioner.Spec.Consolidation.Enabled)
}
// ValidateCommand validates a command for a deprovisioner
func (v *Validation) ValidateCommand(ctx context.Context, cmd Command, candidates []*Candidate) (bool, error) {
// map from candidates we are about to remove back into candidates with cluster state
candidates = mapCandidates(cmd.candidates, candidates)
// None of the chosen candidate are valid for execution, so retry
if len(candidates) == 0 {
return false, nil
}
results, err := simulateScheduling(ctx, v.kubeClient, v.cluster, v.provisioner, candidates...)
if err != nil {
return false, fmt.Errorf("simluating scheduling, %w", err)
}
if !results.AllPodsScheduled() {
return false, nil
}
// We want to ensure that the re-simulated scheduling using the current cluster state produces the same result.
// There are three possible options for the number of new candidates that we need to handle:
// len(newMachines) == 0, as long as we weren't expecting a new machine, this is valid
// len(newMachines) > 1, something in the cluster changed so that the candidates we were going to delete can no longer
// be deleted without producing more than one machine
// len(newMachines) == 1, as long as the machine looks like what we were expecting, this is valid
if len(results.NewMachines) == 0 {
if len(cmd.replacements) == 0 {
// scheduling produced zero new machines and we weren't expecting any, so this is valid.
return true, nil
}
// if it produced no new machines, but we were expecting one we should re-simulate as there is likely a better
// consolidation option now
return false, nil
}
// we need more than one replacement machine which is never valid currently (all of our node replacement is m->1, never m->n)
if len(results.NewMachines) > 1 {
return false, nil
}
// we now know that scheduling simulation wants to create one new machine
if len(cmd.replacements) == 0 {
// but we weren't expecting any new nodes, so this is invalid
return false, nil
}
// We know that the scheduling simulation wants to create a new machine and that the command we are verifying wants
// to create a new machine. The scheduling simulation doesn't apply any filtering to instance types, so it may include
// instance types that we don't want to launch which were filtered out when the lifecycleCommand was created. To
// check if our lifecycleCommand is valid, we just want to ensure that the list of instance types we are considering
// creating are a subset of what scheduling says we should create. We check for a subset since the scheduling
// simulation here does no price filtering, so it will include more expensive types.
//
// This is necessary since consolidation only wants cheaper machines. Suppose consolidation determined we should delete
// a 4xlarge and replace it with a 2xlarge. If things have changed and the scheduling simulation we just performed
// now says that we need to launch a 4xlarge. It's still launching the correct number of machines, but it's just
// as expensive or possibly more so we shouldn't validate.
if !instanceTypesAreSubset(cmd.replacements[0].InstanceTypeOptions, results.NewMachines[0].InstanceTypeOptions) {
return false, nil
}
// Now we know:
// - current scheduling simulation says to create a new machine with types T = {T_0, T_1, ..., T_n}
// - our lifecycle command says to create a machine with types {U_0, U_1, ..., U_n} where U is a subset of T
return true, nil
}
| 182 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package events
import (
"fmt"
"time"
v1 "k8s.io/api/core/v1"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/events"
)
func Launching(machine *v1alpha5.Machine, reason string) events.Event {
return events.Event{
InvolvedObject: machine,
Type: v1.EventTypeNormal,
Reason: "DeprovisioningLaunching",
Message: fmt.Sprintf("Launching machine for %s", reason),
DedupeValues: []string{machine.Name, reason},
}
}
func WaitingOnReadiness(machine *v1alpha5.Machine) events.Event {
return events.Event{
InvolvedObject: machine,
Type: v1.EventTypeNormal,
Reason: "DeprovisioningWaitingReadiness",
Message: "Waiting on readiness to continue deprovisioning",
DedupeValues: []string{machine.Name},
}
}
func WaitingOnDeletion(machine *v1alpha5.Machine) events.Event {
return events.Event{
InvolvedObject: machine,
Type: v1.EventTypeNormal,
Reason: "DeprovisioningWaitingDeletion",
Message: "Waiting on deletion to continue deprovisioning",
DedupeValues: []string{machine.Name},
}
}
func Terminating(node *v1.Node, machine *v1alpha5.Machine, reason string) []events.Event {
return []events.Event{
{
InvolvedObject: node,
Type: v1.EventTypeNormal,
Reason: "DeprovisioningTerminating",
Message: fmt.Sprintf("Deprovisioning node via %s", reason),
DedupeValues: []string{node.Name, reason},
},
{
InvolvedObject: machine,
Type: v1.EventTypeNormal,
Reason: "DeprovisioningTerminating",
Message: fmt.Sprintf("Deprovisioning machine via %s", reason),
DedupeValues: []string{machine.Name, reason},
},
}
}
func Unconsolidatable(node *v1.Node, machine *v1alpha5.Machine, reason string) []events.Event {
return []events.Event{
{
InvolvedObject: node,
Type: v1.EventTypeNormal,
Reason: "Unconsolidatable",
Message: reason,
DedupeValues: []string{node.Name},
DedupeTimeout: time.Minute * 15,
},
{
InvolvedObject: machine,
Type: v1.EventTypeNormal,
Reason: "Unconsolidatable",
Message: reason,
DedupeValues: []string{machine.Name},
DedupeTimeout: time.Minute * 15,
},
}
}
func Blocked(node *v1.Node, machine *v1alpha5.Machine, reason string) []events.Event {
return []events.Event{
{
InvolvedObject: node,
Type: v1.EventTypeNormal,
Reason: "DeprovisioningBlocked",
Message: fmt.Sprintf("Cannot deprovision node due to %s", reason),
DedupeValues: []string{node.Name, reason},
},
{
InvolvedObject: machine,
Type: v1.EventTypeNormal,
Reason: "DeprovisioningBlocked",
Message: fmt.Sprintf("Cannot deprovision machine due to %s", reason),
DedupeValues: []string{machine.Name, reason},
},
}
}
| 115 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package garbagecollection
import (
"context"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/samber/lo"
"go.uber.org/multierr"
"k8s.io/client-go/util/workqueue"
"k8s.io/utils/clock"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/metrics"
corecontroller "github.com/aws/karpenter-core/pkg/operator/controller"
"github.com/aws/karpenter-core/pkg/utils/sets"
)
type Controller struct {
clock clock.Clock
kubeClient client.Client
cloudProvider cloudprovider.CloudProvider
}
func NewController(c clock.Clock, kubeClient client.Client, cloudProvider cloudprovider.CloudProvider) corecontroller.Controller {
return &Controller{
clock: c,
kubeClient: kubeClient,
cloudProvider: cloudProvider,
}
}
func (c *Controller) Name() string {
return "machine.garbagecollection"
}
func (c *Controller) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) {
machineList := &v1alpha5.MachineList{}
if err := c.kubeClient.List(ctx, machineList); err != nil {
return reconcile.Result{}, err
}
cloudProviderMachines, err := c.cloudProvider.List(ctx)
if err != nil {
return reconcile.Result{}, err
}
cloudProviderMachines = lo.Filter(cloudProviderMachines, func(m *v1alpha5.Machine, _ int) bool {
return m.DeletionTimestamp.IsZero()
})
cloudProviderProviderIDs := sets.New[string](lo.Map(cloudProviderMachines, func(m *v1alpha5.Machine, _ int) string {
return m.Status.ProviderID
})...)
machines := lo.Filter(lo.ToSlicePtr(machineList.Items), func(m *v1alpha5.Machine, _ int) bool {
return m.StatusConditions().GetCondition(v1alpha5.MachineLaunched).IsTrue() &&
c.clock.Since(m.StatusConditions().GetCondition(v1alpha5.MachineLaunched).LastTransitionTime.Inner.Time) > time.Second*10 &&
!cloudProviderProviderIDs.Has(m.Status.ProviderID)
})
errs := make([]error, len(machines))
workqueue.ParallelizeUntil(ctx, 20, len(machines), func(i int) {
if err := c.kubeClient.Delete(ctx, machines[i]); err != nil {
errs[i] = client.IgnoreNotFound(err)
return
}
logging.FromContext(ctx).
With("provisioner", machines[i].Labels[v1alpha5.ProvisionerNameLabelKey], "machine", machines[i].Name, "provider-id", machines[i].Status.ProviderID).
Debugf("garbage collecting machine with no cloudprovider representation")
metrics.MachinesTerminatedCounter.With(prometheus.Labels{
metrics.ReasonLabel: "garbage_collected",
metrics.ProvisionerLabel: machines[i].Labels[v1alpha5.ProvisionerNameLabelKey],
}).Inc()
})
return reconcile.Result{RequeueAfter: time.Minute * 2}, multierr.Combine(errs...)
}
func (c *Controller) Builder(_ context.Context, m manager.Manager) corecontroller.Builder {
return corecontroller.NewSingletonManagedBy(m)
}
| 97 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package garbagecollection_test
import (
"context"
"testing"
"time"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clock "k8s.io/utils/clock/testing"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/apis"
"github.com/aws/karpenter-core/pkg/apis/settings"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider/fake"
machinegarbagecollection "github.com/aws/karpenter-core/pkg/controllers/machine/garbagecollection"
machinelifecycle "github.com/aws/karpenter-core/pkg/controllers/machine/lifecycle"
"github.com/aws/karpenter-core/pkg/operator/controller"
"github.com/aws/karpenter-core/pkg/operator/scheme"
"github.com/aws/karpenter-core/pkg/test"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "knative.dev/pkg/logging/testing"
. "github.com/aws/karpenter-core/pkg/test/expectations"
)
var ctx context.Context
var machineController controller.Controller
var garbageCollectionController controller.Controller
var env *test.Environment
var fakeClock *clock.FakeClock
var cloudProvider *fake.CloudProvider
func TestAPIs(t *testing.T) {
ctx = TestContextWithLogger(t)
RegisterFailHandler(Fail)
RunSpecs(t, "Machine")
}
var _ = BeforeSuite(func() {
fakeClock = clock.NewFakeClock(time.Now())
env = test.NewEnvironment(scheme.Scheme, test.WithCRDs(apis.CRDs...), test.WithFieldIndexers(func(c cache.Cache) error {
return c.IndexField(ctx, &v1.Node{}, "spec.providerID", func(obj client.Object) []string {
return []string{obj.(*v1.Node).Spec.ProviderID}
})
}))
ctx = settings.ToContext(ctx, test.Settings())
cloudProvider = fake.NewCloudProvider()
garbageCollectionController = machinegarbagecollection.NewController(fakeClock, env.Client, cloudProvider)
machineController = machinelifecycle.NewController(fakeClock, env.Client, cloudProvider)
})
var _ = AfterSuite(func() {
Expect(env.Stop()).To(Succeed(), "Failed to stop environment")
})
var _ = AfterEach(func() {
fakeClock.SetTime(time.Now())
ExpectCleanedUp(ctx, env.Client)
cloudProvider.Reset()
})
var _ = Describe("GarbageCollection", func() {
var provisioner *v1alpha5.Provisioner
BeforeEach(func() {
provisioner = test.Provisioner()
})
It("should delete the Machine when the Node never appears and the instance is gone", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
// Step forward to move past the cache eventual consistency timeout
fakeClock.SetTime(time.Now().Add(time.Second * 20))
// Delete the machine from the cloudprovider
Expect(cloudProvider.Delete(ctx, machine)).To(Succeed())
// Expect the Machine to be removed now that the Instance is gone
ExpectReconcileSucceeded(ctx, garbageCollectionController, client.ObjectKey{})
ExpectFinalizersRemoved(ctx, env.Client, machine)
ExpectNotFound(ctx, env.Client, machine)
})
It("should delete many Machines when the Node never appears and the instance is gone", func() {
var machines []*v1alpha5.Machine
for i := 0; i < 100; i++ {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
machines = append(machines, machine)
}
// Step forward to move past the cache eventual consistency timeout
fakeClock.SetTime(time.Now().Add(time.Second * 20))
for _, machine := range machines {
// Delete the machine from the cloudprovider
Expect(cloudProvider.Delete(ctx, machine)).To(Succeed())
}
// Expect the Machines to be removed now that the Instance is gone
ExpectReconcileSucceeded(ctx, garbageCollectionController, client.ObjectKey{})
for _, machine := range machines {
ExpectFinalizersRemoved(ctx, env.Client, machine)
}
ExpectNotFound(ctx, env.Client, lo.Map(machines, func(m *v1alpha5.Machine, _ int) client.Object { return m })...)
})
It("shouldn't delete the Machine when the Node isn't there but the instance is there", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
// Step forward to move past the cache eventual consistency timeout
fakeClock.SetTime(time.Now().Add(time.Second * 20))
// Reconcile the Machine. It should not be deleted by this flow since it has never been registered
ExpectReconcileSucceeded(ctx, garbageCollectionController, client.ObjectKey{})
ExpectFinalizersRemoved(ctx, env.Client, machine)
ExpectExists(ctx, env.Client, machine)
})
})
| 165 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import (
"context"
"time"
"github.com/patrickmn/go-cache"
"go.uber.org/multierr"
"golang.org/x/time/rate"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/client-go/util/workqueue"
"k8s.io/utils/clock"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider"
corecontroller "github.com/aws/karpenter-core/pkg/operator/controller"
machineutil "github.com/aws/karpenter-core/pkg/utils/machine"
"github.com/aws/karpenter-core/pkg/utils/result"
)
type machineReconciler interface {
Reconcile(context.Context, *v1alpha5.Machine) (reconcile.Result, error)
}
var _ corecontroller.TypedController[*v1alpha5.Machine] = (*Controller)(nil)
// Controller is a Machine Lifecycle controller that manages the lifecycle of the machine up until its termination
// The controller is responsible for ensuring that new Machines get launched, that they have properly registered with
// the cluster as nodes and that they are properly initialized, ensuring that Machines that do not have matching nodes
// after some liveness TTL are removed
type Controller struct {
kubeClient client.Client
launch *Launch
registration *Registration
initialization *Initialization
liveness *Liveness
}
func NewController(clk clock.Clock, kubeClient client.Client, cloudProvider cloudprovider.CloudProvider) corecontroller.Controller {
return corecontroller.Typed[*v1alpha5.Machine](kubeClient, &Controller{
kubeClient: kubeClient,
launch: &Launch{kubeClient: kubeClient, cloudProvider: cloudProvider, cache: cache.New(time.Minute, time.Second*10)},
registration: &Registration{kubeClient: kubeClient},
initialization: &Initialization{kubeClient: kubeClient},
liveness: &Liveness{clock: clk, kubeClient: kubeClient},
})
}
func (*Controller) Name() string {
return "machine.lifecycle"
}
func (c *Controller) Reconcile(ctx context.Context, machine *v1alpha5.Machine) (reconcile.Result, error) {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("provisioner", machine.Labels[v1alpha5.ProvisionerNameLabelKey]))
if !machine.DeletionTimestamp.IsZero() {
return reconcile.Result{}, nil
}
// Add the finalizer immediately since we shouldn't launch if we don't yet have the finalizer.
// Otherwise, we could leak resources
stored := machine.DeepCopy()
controllerutil.AddFinalizer(machine, v1alpha5.TerminationFinalizer)
if !equality.Semantic.DeepEqual(machine, stored) {
if err := c.kubeClient.Patch(ctx, machine, client.MergeFrom(stored)); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
}
stored = machine.DeepCopy()
var results []reconcile.Result
var errs error
for _, reconciler := range []machineReconciler{
c.launch,
c.registration,
c.initialization,
c.liveness,
} {
res, err := reconciler.Reconcile(ctx, machine)
errs = multierr.Append(errs, err)
results = append(results, res)
}
if !equality.Semantic.DeepEqual(stored, machine) {
statusCopy := machine.DeepCopy()
if err := c.kubeClient.Patch(ctx, machine, client.MergeFrom(stored)); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(multierr.Append(errs, err))
}
if err := c.kubeClient.Status().Patch(ctx, statusCopy, client.MergeFrom(stored)); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(multierr.Append(errs, err))
}
// We sleep here after a patch operation since we want to ensure that we are able to read our own writes
// so that we avoid duplicating metrics and log lines due to quick re-queues from our node watcher
// USE CAUTION when determining whether to increase this timeout or remove this line
time.Sleep(time.Second)
}
return result.Min(results...), errs
}
func (c *Controller) Builder(ctx context.Context, m manager.Manager) corecontroller.Builder {
return corecontroller.Adapt(controllerruntime.
NewControllerManagedBy(m).
For(&v1alpha5.Machine{}, builder.WithPredicates(
predicate.Funcs{
CreateFunc: func(e event.CreateEvent) bool { return true },
UpdateFunc: func(e event.UpdateEvent) bool { return false },
DeleteFunc: func(e event.DeleteEvent) bool { return false },
},
)).
Watches(
&source.Kind{Type: &v1.Node{}},
machineutil.NodeEventHandler(ctx, c.kubeClient),
).
WithOptions(controller.Options{
RateLimiter: workqueue.NewMaxOfRateLimiter(
workqueue.NewItemExponentialFailureRateLimiter(time.Second, time.Minute),
// 10 qps, 100 bucket size
&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
),
MaxConcurrentReconciles: 1000, // higher concurrency limit since we want fast reaction to node syncing and launch
}))
}
| 150 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import (
"context"
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/metrics"
"github.com/aws/karpenter-core/pkg/scheduling"
machineutil "github.com/aws/karpenter-core/pkg/utils/machine"
nodeutil "github.com/aws/karpenter-core/pkg/utils/node"
"github.com/aws/karpenter-core/pkg/utils/resources"
)
type Initialization struct {
kubeClient client.Client
}
// Reconcile checks for initialization based on if:
// a) its current status is set to Ready
// b) all the startup taints have been removed from the node
// c) all extended resources have been registered
// This method handles both nil provisioners and nodes without extended resources gracefully.
func (i *Initialization) Reconcile(ctx context.Context, machine *v1alpha5.Machine) (reconcile.Result, error) {
if machine.StatusConditions().GetCondition(v1alpha5.MachineInitialized).IsTrue() {
return reconcile.Result{}, nil
}
if !machine.StatusConditions().GetCondition(v1alpha5.MachineLaunched).IsTrue() {
machine.StatusConditions().MarkFalse(v1alpha5.MachineInitialized, "MachineNotLaunched", "Machine is not launched")
return reconcile.Result{}, nil
}
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("provider-id", machine.Status.ProviderID))
node, err := machineutil.NodeForMachine(ctx, i.kubeClient, machine)
if err != nil {
machine.StatusConditions().MarkFalse(v1alpha5.MachineInitialized, "NodeNotFound", "Node not registered with cluster")
return reconcile.Result{}, nil //nolint:nilerr
}
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("node", node.Name))
if nodeutil.GetCondition(node, v1.NodeReady).Status != v1.ConditionTrue {
machine.StatusConditions().MarkFalse(v1alpha5.MachineInitialized, "NodeNotReady", "Node status is NotReady")
return reconcile.Result{}, nil
}
if taint, ok := StartupTaintsRemoved(node, machine); !ok {
machine.StatusConditions().MarkFalse(v1alpha5.MachineInitialized, "StartupTaintsExist", "StartupTaint %q still exists", formatTaint(taint))
return reconcile.Result{}, nil
}
if taint, ok := KnownEphemeralTaintsRemoved(node); !ok {
machine.StatusConditions().MarkFalse(v1alpha5.MachineInitialized, "KnownEphemeralTaintsExist", "KnownEphemeralTaint %q still exists", formatTaint(taint))
return reconcile.Result{}, nil
}
if name, ok := RequestedResourcesRegistered(node, machine); !ok {
machine.StatusConditions().MarkFalse(v1alpha5.MachineInitialized, "ResourceNotRegistered", "Resource %q was requested but not registered", name)
return reconcile.Result{}, nil
}
stored := node.DeepCopy()
node.Labels = lo.Assign(node.Labels, map[string]string{v1alpha5.LabelNodeInitialized: "true"})
if !equality.Semantic.DeepEqual(stored, node) {
if err = i.kubeClient.Patch(ctx, node, client.MergeFrom(stored)); err != nil {
return reconcile.Result{}, err
}
}
logging.FromContext(ctx).Debugf("initialized machine")
machine.StatusConditions().MarkTrue(v1alpha5.MachineInitialized)
metrics.MachinesInitializedCounter.With(prometheus.Labels{
metrics.ProvisionerLabel: machine.Labels[v1alpha5.ProvisionerNameLabelKey],
}).Inc()
return reconcile.Result{}, nil
}
// KnownEphemeralTaintsRemoved validates whether all the ephemeral taints are removed
func KnownEphemeralTaintsRemoved(node *v1.Node) (*v1.Taint, bool) {
for _, knownTaint := range scheduling.KnownEphemeralTaints {
// if the node still has a known ephemeral taint applied, it's not ready
for i := range node.Spec.Taints {
if knownTaint.MatchTaint(&node.Spec.Taints[i]) {
return &node.Spec.Taints[i], false
}
}
}
return nil, true
}
// StartupTaintsRemoved returns true if there are no startup taints registered for the provisioner, or if all startup
// taints have been removed from the node
func StartupTaintsRemoved(node *v1.Node, machine *v1alpha5.Machine) (*v1.Taint, bool) {
if machine != nil {
for _, startupTaint := range machine.Spec.StartupTaints {
for i := range node.Spec.Taints {
// if the node still has a startup taint applied, it's not ready
if startupTaint.MatchTaint(&node.Spec.Taints[i]) {
return &node.Spec.Taints[i], false
}
}
}
}
return nil, true
}
// RequestedResourcesRegistered returns true if there are no extended resources on the node, or they have all been
// registered by device plugins
func RequestedResourcesRegistered(node *v1.Node, machine *v1alpha5.Machine) (v1.ResourceName, bool) {
for resourceName, quantity := range machine.Spec.Resources.Requests {
if quantity.IsZero() {
continue
}
// kubelet will zero out both the capacity and allocatable for an extended resource on startup, so if our
// annotation says the resource should be there, but it's zero'd in both then the device plugin hasn't
// registered it yet.
// We wait on allocatable since this is the value that is used in scheduling
if resources.IsZero(node.Status.Allocatable[resourceName]) {
return resourceName, false
}
}
return "", true
}
func formatTaint(taint *v1.Taint) string {
if taint == nil {
return "<nil>"
}
if taint.Value == "" {
return fmt.Sprintf("%s:%s", taint.Key, taint.Effect)
}
return fmt.Sprintf("%s=%s:%s", taint.Key, taint.Value, taint.Effect)
}
| 148 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle_test
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cloudproviderapi "k8s.io/cloud-provider/api"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider/fake"
"github.com/aws/karpenter-core/pkg/test"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "github.com/aws/karpenter-core/pkg/test/expectations"
)
var _ = Describe("Initialization", func() {
var provisioner *v1alpha5.Provisioner
BeforeEach(func() {
provisioner = test.Provisioner()
})
It("should consider the Machine initialized when all initialization conditions are met", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
Spec: v1alpha5.MachineSpec{
Resources: v1alpha5.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("50Mi"),
v1.ResourcePods: resource.MustParse("5"),
},
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
node := test.Node(test.NodeOptions{
ProviderID: machine.Status.ProviderID,
})
ExpectApplied(ctx, env.Client, node)
ExpectMakeNodesReady(ctx, env.Client, node) // Remove the not-ready taint
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineRegistered).Status).To(Equal(v1.ConditionTrue))
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineInitialized).Status).To(Equal(v1.ConditionFalse))
node = ExpectExists(ctx, env.Client, node)
node.Status.Capacity = v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("100Mi"),
v1.ResourcePods: resource.MustParse("110"),
}
node.Status.Allocatable = v1.ResourceList{
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("80Mi"),
v1.ResourcePods: resource.MustParse("110"),
}
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineRegistered).Status).To(Equal(v1.ConditionTrue))
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineInitialized).Status).To(Equal(v1.ConditionTrue))
})
It("should add the initialization label to the node when the Machine is initialized", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
Spec: v1alpha5.MachineSpec{
Resources: v1alpha5.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("50Mi"),
v1.ResourcePods: resource.MustParse("5"),
},
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
node := test.Node(test.NodeOptions{
ProviderID: machine.Status.ProviderID,
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("100Mi"),
v1.ResourcePods: resource.MustParse("110"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("80Mi"),
v1.ResourcePods: resource.MustParse("110"),
},
})
ExpectApplied(ctx, env.Client, node)
ExpectMakeNodesReady(ctx, env.Client, node) // Remove the not-ready taint
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
node = ExpectExists(ctx, env.Client, node)
Expect(node.Labels).To(HaveKeyWithValue(v1alpha5.LabelNodeInitialized, "true"))
})
It("should not consider the Node to be initialized when the status of the Node is NotReady", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
Spec: v1alpha5.MachineSpec{
Resources: v1alpha5.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("50Mi"),
v1.ResourcePods: resource.MustParse("5"),
},
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
node := test.Node(test.NodeOptions{
ProviderID: machine.Status.ProviderID,
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("100Mi"),
v1.ResourcePods: resource.MustParse("110"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("80Mi"),
v1.ResourcePods: resource.MustParse("110"),
},
ReadyStatus: v1.ConditionFalse,
})
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineRegistered).Status).To(Equal(v1.ConditionTrue))
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineInitialized).Status).To(Equal(v1.ConditionFalse))
})
It("should not consider the Node to be initialized when all requested resources aren't registered", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
Spec: v1alpha5.MachineSpec{
Resources: v1alpha5.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("50Mi"),
v1.ResourcePods: resource.MustParse("5"),
fake.ResourceGPUVendorA: resource.MustParse("1"),
},
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
// Update the machine to add mock the instance type having an extended resource
machine.Status.Capacity[fake.ResourceGPUVendorA] = resource.MustParse("2")
machine.Status.Allocatable[fake.ResourceGPUVendorA] = resource.MustParse("2")
ExpectApplied(ctx, env.Client, machine)
// Extended resource hasn't registered yet by the daemonset
node := test.Node(test.NodeOptions{
ProviderID: machine.Status.ProviderID,
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("100Mi"),
v1.ResourcePods: resource.MustParse("110"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("80Mi"),
v1.ResourcePods: resource.MustParse("110"),
},
})
ExpectApplied(ctx, env.Client, node)
ExpectMakeNodesReady(ctx, env.Client, node) // Remove the not-ready taint
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineRegistered).Status).To(Equal(v1.ConditionTrue))
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineInitialized).Status).To(Equal(v1.ConditionFalse))
})
It("should consider the node to be initialized once all the resources are registered", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
Spec: v1alpha5.MachineSpec{
Resources: v1alpha5.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("50Mi"),
v1.ResourcePods: resource.MustParse("5"),
fake.ResourceGPUVendorA: resource.MustParse("1"),
},
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
// Update the machine to add mock the instance type having an extended resource
machine.Status.Capacity[fake.ResourceGPUVendorA] = resource.MustParse("2")
machine.Status.Allocatable[fake.ResourceGPUVendorA] = resource.MustParse("2")
ExpectApplied(ctx, env.Client, machine)
// Extended resource hasn't registered yet by the daemonset
node := test.Node(test.NodeOptions{
ProviderID: machine.Status.ProviderID,
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("100Mi"),
v1.ResourcePods: resource.MustParse("110"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("80Mi"),
v1.ResourcePods: resource.MustParse("110"),
},
})
ExpectApplied(ctx, env.Client, node)
ExpectMakeNodesReady(ctx, env.Client, node) // Remove the not-ready taint
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineRegistered).Status).To(Equal(v1.ConditionTrue))
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineInitialized).Status).To(Equal(v1.ConditionFalse))
// Node now registers the resource
node = ExpectExists(ctx, env.Client, node)
node.Status.Capacity[fake.ResourceGPUVendorA] = resource.MustParse("2")
node.Status.Allocatable[fake.ResourceGPUVendorA] = resource.MustParse("2")
ExpectApplied(ctx, env.Client, node)
// Reconcile the machine and the Machine/Node should now be initilized
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineRegistered).Status).To(Equal(v1.ConditionTrue))
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineInitialized).Status).To(Equal(v1.ConditionTrue))
})
It("should not consider the Node to be initialized when all startupTaints aren't removed", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
Spec: v1alpha5.MachineSpec{
Resources: v1alpha5.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("50Mi"),
v1.ResourcePods: resource.MustParse("5"),
},
},
StartupTaints: []v1.Taint{
{
Key: "custom-startup-taint",
Effect: v1.TaintEffectNoSchedule,
Value: "custom-startup-value",
},
{
Key: "other-custom-startup-taint",
Effect: v1.TaintEffectNoExecute,
Value: "other-custom-startup-value",
},
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
node := test.Node(test.NodeOptions{
ProviderID: machine.Status.ProviderID,
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("100Mi"),
v1.ResourcePods: resource.MustParse("110"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("80Mi"),
v1.ResourcePods: resource.MustParse("110"),
},
})
ExpectApplied(ctx, env.Client, node)
ExpectMakeNodesReady(ctx, env.Client, node) // Remove the not-ready taint
// Should add the startup taints to the node
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
node = ExpectExists(ctx, env.Client, node)
Expect(node.Spec.Taints).To(ContainElements(
v1.Taint{
Key: "custom-startup-taint",
Effect: v1.TaintEffectNoSchedule,
Value: "custom-startup-value",
},
v1.Taint{
Key: "other-custom-startup-taint",
Effect: v1.TaintEffectNoExecute,
Value: "other-custom-startup-value",
},
))
// Shouldn't consider the node ready since the startup taints still exist
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineRegistered).Status).To(Equal(v1.ConditionTrue))
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineInitialized).Status).To(Equal(v1.ConditionFalse))
})
It("should consider the Node to be initialized once the startupTaints are removed", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
Spec: v1alpha5.MachineSpec{
Resources: v1alpha5.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("50Mi"),
v1.ResourcePods: resource.MustParse("5"),
},
},
StartupTaints: []v1.Taint{
{
Key: "custom-startup-taint",
Effect: v1.TaintEffectNoSchedule,
Value: "custom-startup-value",
},
{
Key: "other-custom-startup-taint",
Effect: v1.TaintEffectNoExecute,
Value: "other-custom-startup-value",
},
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
node := test.Node(test.NodeOptions{
ProviderID: machine.Status.ProviderID,
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("100Mi"),
v1.ResourcePods: resource.MustParse("110"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("80Mi"),
v1.ResourcePods: resource.MustParse("110"),
},
})
ExpectApplied(ctx, env.Client, node)
ExpectMakeNodesReady(ctx, env.Client, node) // Remove the not-ready taint
// Shouldn't consider the node ready since the startup taints still exist
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineRegistered).Status).To(Equal(v1.ConditionTrue))
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineInitialized).Status).To(Equal(v1.ConditionFalse))
node = ExpectExists(ctx, env.Client, node)
node.Spec.Taints = []v1.Taint{}
ExpectApplied(ctx, env.Client, node)
// Machine should now be ready since all startup taints are removed
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineRegistered).Status).To(Equal(v1.ConditionTrue))
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineInitialized).Status).To(Equal(v1.ConditionTrue))
})
It("should not consider the Node to be initialized when all ephemeralTaints aren't removed", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
Spec: v1alpha5.MachineSpec{
Resources: v1alpha5.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("50Mi"),
v1.ResourcePods: resource.MustParse("5"),
},
},
StartupTaints: []v1.Taint{
{
Key: "custom-startup-taint",
Effect: v1.TaintEffectNoSchedule,
Value: "custom-startup-value",
},
{
Key: "other-custom-startup-taint",
Effect: v1.TaintEffectNoExecute,
Value: "other-custom-startup-value",
},
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
node := test.Node(test.NodeOptions{
ProviderID: machine.Status.ProviderID,
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("100Mi"),
v1.ResourcePods: resource.MustParse("110"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("80Mi"),
v1.ResourcePods: resource.MustParse("110"),
},
Taints: []v1.Taint{
{
Key: v1.TaintNodeNotReady,
Effect: v1.TaintEffectNoSchedule,
},
{
Key: v1.TaintNodeUnreachable,
Effect: v1.TaintEffectNoSchedule,
},
{
Key: cloudproviderapi.TaintExternalCloudProvider,
Effect: v1.TaintEffectNoSchedule,
Value: "true",
},
},
})
ExpectApplied(ctx, env.Client, node)
// Shouldn't consider the node ready since the ephemeral taints still exist
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineRegistered).Status).To(Equal(v1.ConditionTrue))
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineInitialized).Status).To(Equal(v1.ConditionFalse))
})
It("should consider the Node to be initialized once the ephemeralTaints are removed", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
Spec: v1alpha5.MachineSpec{
Resources: v1alpha5.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("50Mi"),
v1.ResourcePods: resource.MustParse("5"),
},
},
StartupTaints: []v1.Taint{
{
Key: "custom-startup-taint",
Effect: v1.TaintEffectNoSchedule,
Value: "custom-startup-value",
},
{
Key: "other-custom-startup-taint",
Effect: v1.TaintEffectNoExecute,
Value: "other-custom-startup-value",
},
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
node := test.Node(test.NodeOptions{
ProviderID: machine.Status.ProviderID,
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("100Mi"),
v1.ResourcePods: resource.MustParse("110"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("80Mi"),
v1.ResourcePods: resource.MustParse("110"),
},
Taints: []v1.Taint{
{
Key: v1.TaintNodeNotReady,
Effect: v1.TaintEffectNoSchedule,
},
{
Key: v1.TaintNodeUnreachable,
Effect: v1.TaintEffectNoSchedule,
},
{
Key: cloudproviderapi.TaintExternalCloudProvider,
Effect: v1.TaintEffectNoSchedule,
Value: "true",
},
},
})
ExpectApplied(ctx, env.Client, node)
// Shouldn't consider the node ready since the ephemeral taints still exist
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineRegistered).Status).To(Equal(v1.ConditionTrue))
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineInitialized).Status).To(Equal(v1.ConditionFalse))
node = ExpectExists(ctx, env.Client, node)
node.Spec.Taints = []v1.Taint{}
ExpectApplied(ctx, env.Client, node)
// Machine should now be ready since all startup taints are removed
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineRegistered).Status).To(Equal(v1.ConditionTrue))
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineInitialized).Status).To(Equal(v1.ConditionTrue))
})
})
| 572 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import (
"context"
"fmt"
"github.com/patrickmn/go-cache"
"github.com/prometheus/client_golang/prometheus"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/metrics"
"github.com/aws/karpenter-core/pkg/scheduling"
)
type Launch struct {
kubeClient client.Client
cloudProvider cloudprovider.CloudProvider
cache *cache.Cache // exists due to eventual consistency on the cache
}
func (l *Launch) Reconcile(ctx context.Context, machine *v1alpha5.Machine) (reconcile.Result, error) {
if machine.StatusConditions().GetCondition(v1alpha5.MachineLaunched).IsTrue() {
return reconcile.Result{}, nil
}
var err error
var created *v1alpha5.Machine
// One of the following scenarios can happen with a Machine that isn't marked as launched:
// 1. It was already launched by the CloudProvider but the client-go cache wasn't updated quickly enough or
// patching failed on the status. In this case, we use the in-memory cached value for the created machine.
// 2. It is a "linked" machine, which implies that the CloudProvider Machine already exists for the Machine CR, but we
// need to grab info from the CloudProvider to get details on the machine.
// 3. It is a standard machine launch where we should call CloudProvider Create() and fill in details of the launched
// machine into the Machine CR.
if ret, ok := l.cache.Get(string(machine.UID)); ok {
created = ret.(*v1alpha5.Machine)
} else if _, ok := machine.Annotations[v1alpha5.MachineLinkedAnnotationKey]; ok {
created, err = l.linkMachine(ctx, machine)
} else {
created, err = l.launchMachine(ctx, machine)
}
// Either the machine launch/linking failed or the machine was deleted due to InsufficientCapacity/NotFound
if err != nil || created == nil {
return reconcile.Result{}, err
}
l.cache.SetDefault(string(machine.UID), created)
PopulateMachineDetails(machine, created)
machine.StatusConditions().MarkTrue(v1alpha5.MachineLaunched)
metrics.MachinesLaunchedCounter.With(prometheus.Labels{
metrics.ProvisionerLabel: machine.Labels[v1alpha5.ProvisionerNameLabelKey],
}).Inc()
return reconcile.Result{}, nil
}
func (l *Launch) linkMachine(ctx context.Context, machine *v1alpha5.Machine) (*v1alpha5.Machine, error) {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("provider-id", machine.Annotations[v1alpha5.MachineLinkedAnnotationKey]))
created, err := l.cloudProvider.Get(ctx, machine.Annotations[v1alpha5.MachineLinkedAnnotationKey])
if err != nil {
if !cloudprovider.IsMachineNotFoundError(err) {
machine.StatusConditions().MarkFalse(v1alpha5.MachineLaunched, "LinkFailed", truncateMessage(err.Error()))
return nil, fmt.Errorf("linking machine, %w", err)
}
if err = l.kubeClient.Delete(ctx, machine); err != nil {
return nil, client.IgnoreNotFound(err)
}
logging.FromContext(ctx).Debugf("garbage collected machine with no cloudprovider representation")
metrics.MachinesTerminatedCounter.With(prometheus.Labels{
metrics.ReasonLabel: "garbage_collected",
metrics.ProvisionerLabel: machine.Labels[v1alpha5.ProvisionerNameLabelKey],
}).Inc()
return nil, nil
}
logging.FromContext(ctx).With(
"provider-id", created.Status.ProviderID,
"instance-type", created.Labels[v1.LabelInstanceTypeStable],
"zone", created.Labels[v1.LabelTopologyZone],
"capacity-type", created.Labels[v1alpha5.LabelCapacityType],
"allocatable", created.Status.Allocatable).Infof("linked machine")
return created, nil
}
func (l *Launch) launchMachine(ctx context.Context, machine *v1alpha5.Machine) (*v1alpha5.Machine, error) {
created, err := l.cloudProvider.Create(ctx, machine)
if err != nil {
if !cloudprovider.IsInsufficientCapacityError(err) {
machine.StatusConditions().MarkFalse(v1alpha5.MachineLaunched, "LaunchFailed", truncateMessage(err.Error()))
return nil, fmt.Errorf("creating machine, %w", err)
}
logging.FromContext(ctx).Error(err)
if err = l.kubeClient.Delete(ctx, machine); err != nil {
return nil, client.IgnoreNotFound(err)
}
metrics.MachinesTerminatedCounter.With(prometheus.Labels{
metrics.ReasonLabel: "insufficient_capacity",
metrics.ProvisionerLabel: machine.Labels[v1alpha5.ProvisionerNameLabelKey],
}).Inc()
return nil, nil
}
logging.FromContext(ctx).With(
"provider-id", created.Status.ProviderID,
"instance-type", created.Labels[v1.LabelInstanceTypeStable],
"zone", created.Labels[v1.LabelTopologyZone],
"capacity-type", created.Labels[v1alpha5.LabelCapacityType],
"allocatable", created.Status.Allocatable).Infof("launched machine")
return created, nil
}
func PopulateMachineDetails(machine, retrieved *v1alpha5.Machine) {
// These are ordered in priority order so that user-defined machine labels and requirements trump retrieved labels
// or the static machine labels
machine.Labels = lo.Assign(
retrieved.Labels, // CloudProvider-resolved labels
scheduling.NewNodeSelectorRequirements(machine.Spec.Requirements...).Labels(), // Single-value requirement resolved labels
machine.Labels, // User-defined labels
)
machine.Annotations = lo.Assign(machine.Annotations, retrieved.Annotations)
machine.Status.ProviderID = retrieved.Status.ProviderID
machine.Status.Allocatable = retrieved.Status.Allocatable
machine.Status.Capacity = retrieved.Status.Capacity
}
func truncateMessage(msg string) string {
if len(msg) < 300 {
return msg
}
return msg[:300] + "..."
}
| 149 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle_test
import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/test"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "github.com/aws/karpenter-core/pkg/test/expectations"
)
var _ = Describe("Launch", func() {
var provisioner *v1alpha5.Provisioner
BeforeEach(func() {
provisioner = test.Provisioner()
})
It("should launch an instance when a new Machine is created", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
Expect(cloudProvider.CreateCalls).To(HaveLen(1))
Expect(cloudProvider.CreatedMachines).To(HaveLen(1))
_, err := cloudProvider.Get(ctx, machine.Status.ProviderID)
Expect(err).ToNot(HaveOccurred())
})
It("should add the MachineLaunched status condition after creating the Machine", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineLaunched).Status).To(Equal(v1.ConditionTrue))
})
It("should link an instance with the karpenter.sh/linked annotation", func() {
cloudProviderMachine := &v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1.LabelInstanceTypeStable: "small-instance-type",
v1.LabelTopologyZone: "test-zone-1a",
v1.LabelTopologyRegion: "test-zone",
v1alpha5.LabelCapacityType: v1alpha5.CapacityTypeSpot,
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("100Mi"),
v1.ResourceEphemeralStorage: resource.MustParse("20Gi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("80Mi"),
v1.ResourceEphemeralStorage: resource.MustParse("18Gi"),
},
},
}
cloudProvider.CreatedMachines[cloudProviderMachine.Status.ProviderID] = cloudProviderMachine
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
v1alpha5.MachineLinkedAnnotationKey: cloudProviderMachine.Status.ProviderID,
},
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
Expect(machine.Status.ProviderID).To(Equal(cloudProviderMachine.Status.ProviderID))
ExpectResources(machine.Status.Capacity, cloudProviderMachine.Status.Capacity)
ExpectResources(machine.Status.Allocatable, cloudProviderMachine.Status.Allocatable)
Expect(machine.Labels).To(HaveKeyWithValue(v1.LabelInstanceTypeStable, "small-instance-type"))
Expect(machine.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-1a"))
Expect(machine.Labels).To(HaveKeyWithValue(v1.LabelTopologyRegion, "test-zone"))
Expect(machine.Labels).To(HaveKeyWithValue(v1alpha5.LabelCapacityType, v1alpha5.CapacityTypeSpot))
})
It("should delete the machine if InsufficientCapacity is returned from the cloudprovider", func() {
cloudProvider.NextCreateErr = cloudprovider.NewInsufficientCapacityError(fmt.Errorf("all instance types were unavailable"))
machine := test.Machine()
ExpectApplied(ctx, env.Client, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
ExpectFinalizersRemoved(ctx, env.Client, machine)
ExpectNotFound(ctx, env.Client, machine)
})
})
| 130 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import (
"context"
"time"
"github.com/prometheus/client_golang/prometheus"
"k8s.io/utils/clock"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/metrics"
)
type Liveness struct {
clock clock.Clock
kubeClient client.Client
}
// registrationTTL is a heuristic time that we expect the node to register within
// If we don't see the node within this time, then we should delete the machine and try again
const registrationTTL = time.Minute * 15
func (r *Liveness) Reconcile(ctx context.Context, machine *v1alpha5.Machine) (reconcile.Result, error) {
registered := machine.StatusConditions().GetCondition(v1alpha5.MachineRegistered)
if registered.IsTrue() {
return reconcile.Result{}, nil
}
if registered == nil {
return reconcile.Result{Requeue: true}, nil
}
// If the MachineRegistered statusCondition hasn't gone True during the TTL since we first updated it, we should terminate the machine
if r.clock.Since(registered.LastTransitionTime.Inner.Time) < registrationTTL {
return reconcile.Result{RequeueAfter: registrationTTL - r.clock.Since(registered.LastTransitionTime.Inner.Time)}, nil
}
// Delete the machine if we believe the machine won't register since we haven't seen the node
if err := r.kubeClient.Delete(ctx, machine); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
logging.FromContext(ctx).With("ttl", registrationTTL).Debugf("terminating machine due to registration ttl")
metrics.MachinesTerminatedCounter.With(prometheus.Labels{
metrics.ReasonLabel: "liveness",
metrics.ProvisionerLabel: machine.Labels[v1alpha5.ProvisionerNameLabelKey],
}).Inc()
return reconcile.Result{}, nil
}
| 63 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle_test
import (
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider/fake"
"github.com/aws/karpenter-core/pkg/test"
. "github.com/onsi/ginkgo/v2"
. "github.com/aws/karpenter-core/pkg/test/expectations"
)
var _ = Describe("Liveness", func() {
var provisioner *v1alpha5.Provisioner
BeforeEach(func() {
provisioner = test.Provisioner()
})
It("shouldn't delete the Machine when the node has registered past the registration ttl", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
Spec: v1alpha5.MachineSpec{
Resources: v1alpha5.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("50Mi"),
v1.ResourcePods: resource.MustParse("5"),
fake.ResourceGPUVendorA: resource.MustParse("1"),
},
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
node := test.MachineLinkedNode(machine)
ExpectApplied(ctx, env.Client, node)
// Node and Machine should still exist
fakeClock.Step(time.Minute * 20)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
ExpectExists(ctx, env.Client, machine)
ExpectExists(ctx, env.Client, node)
})
It("should delete the Machine when the Node hasn't registered past the registration ttl", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
Spec: v1alpha5.MachineSpec{
Resources: v1alpha5.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("50Mi"),
v1.ResourcePods: resource.MustParse("5"),
fake.ResourceGPUVendorA: resource.MustParse("1"),
},
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
// If the node hasn't registered in the registration timeframe, then we deprovision the Machine
fakeClock.Step(time.Minute * 20)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
ExpectFinalizersRemoved(ctx, env.Client, machine)
ExpectNotFound(ctx, env.Client, machine)
})
It("should delete the Machine when the Machine hasn't launched past the registration ttl", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
Spec: v1alpha5.MachineSpec{
Resources: v1alpha5.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("50Mi"),
v1.ResourcePods: resource.MustParse("5"),
fake.ResourceGPUVendorA: resource.MustParse("1"),
},
},
},
})
cloudProvider.AllowedCreateCalls = 0 // Don't allow Create() calls to succeed
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileFailed(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
// If the node hasn't registered in the registration timeframe, then we deprovision the Machine
fakeClock.Step(time.Minute * 20)
ExpectReconcileFailed(ctx, machineController, client.ObjectKeyFromObject(machine))
ExpectFinalizersRemoved(ctx, env.Client, machine)
ExpectNotFound(ctx, env.Client, machine)
})
})
| 128 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import (
"context"
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/logging"
"knative.dev/pkg/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/metrics"
"github.com/aws/karpenter-core/pkg/scheduling"
machineutil "github.com/aws/karpenter-core/pkg/utils/machine"
)
type Registration struct {
kubeClient client.Client
}
func (r *Registration) Reconcile(ctx context.Context, machine *v1alpha5.Machine) (reconcile.Result, error) {
if machine.StatusConditions().GetCondition(v1alpha5.MachineRegistered).IsTrue() {
return reconcile.Result{}, nil
}
if !machine.StatusConditions().GetCondition(v1alpha5.MachineLaunched).IsTrue() {
machine.StatusConditions().MarkFalse(v1alpha5.MachineRegistered, "MachineNotLaunched", "Machine is not launched")
return reconcile.Result{}, nil
}
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("provider-id", machine.Status.ProviderID))
node, err := machineutil.NodeForMachine(ctx, r.kubeClient, machine)
if err != nil {
if machineutil.IsNodeNotFoundError(err) {
machine.StatusConditions().MarkFalse(v1alpha5.MachineRegistered, "NodeNotFound", "Node not registered with cluster")
return reconcile.Result{}, nil
}
if machineutil.IsDuplicateNodeError(err) {
machine.StatusConditions().MarkFalse(v1alpha5.MachineRegistered, "MultipleNodesFound", "Invariant violated, machine matched multiple nodes")
return reconcile.Result{}, nil
}
return reconcile.Result{}, fmt.Errorf("getting node for machine, %w", err)
}
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("node", node.Name))
if err = r.syncNode(ctx, machine, node); err != nil {
return reconcile.Result{}, fmt.Errorf("syncing node, %w", err)
}
logging.FromContext(ctx).Debugf("registered machine")
machine.StatusConditions().MarkTrue(v1alpha5.MachineRegistered)
machine.Status.NodeName = node.Name
metrics.MachinesRegisteredCounter.With(prometheus.Labels{
metrics.ProvisionerLabel: machine.Labels[v1alpha5.ProvisionerNameLabelKey],
}).Inc()
// If the machine is linked, then the node already existed so we don't mark it as created
if _, ok := machine.Annotations[v1alpha5.MachineLinkedAnnotationKey]; !ok {
metrics.NodesCreatedCounter.With(prometheus.Labels{
metrics.ProvisionerLabel: machine.Labels[v1alpha5.ProvisionerNameLabelKey],
}).Inc()
}
return reconcile.Result{}, nil
}
func (r *Registration) syncNode(ctx context.Context, machine *v1alpha5.Machine, node *v1.Node) error {
stored := node.DeepCopy()
controllerutil.AddFinalizer(node, v1alpha5.TerminationFinalizer)
// Remove any provisioner owner references since we own them
node.OwnerReferences = lo.Reject(node.OwnerReferences, func(o metav1.OwnerReference, _ int) bool {
return o.Kind == "Provisioner"
})
node.OwnerReferences = append(node.OwnerReferences, metav1.OwnerReference{
APIVersion: v1alpha5.SchemeGroupVersion.String(),
Kind: "Machine",
Name: machine.Name,
UID: machine.UID,
BlockOwnerDeletion: ptr.Bool(true),
})
// If the machine isn't registered as linked, then sync it
// This prevents us from messing with nodes that already exist and are scheduled
if _, ok := machine.Annotations[v1alpha5.MachineLinkedAnnotationKey]; !ok {
node.Labels = lo.Assign(node.Labels, machine.Labels)
node.Annotations = lo.Assign(node.Annotations, machine.Annotations)
// Sync all taints inside of Machine into the Machine taints
node.Spec.Taints = scheduling.Taints(node.Spec.Taints).Merge(machine.Spec.Taints)
node.Spec.Taints = scheduling.Taints(node.Spec.Taints).Merge(machine.Spec.StartupTaints)
}
if !equality.Semantic.DeepEqual(stored, node) {
if err := r.kubeClient.Patch(ctx, node, client.MergeFrom(stored)); err != nil {
return fmt.Errorf("syncing node labels, %w", err)
}
}
return nil
}
| 115 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle_test
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/test"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "github.com/aws/karpenter-core/pkg/test/expectations"
)
var _ = Describe("Registration", func() {
var provisioner *v1alpha5.Provisioner
BeforeEach(func() {
provisioner = test.Provisioner()
})
It("should match the Machine to the Node when the Node comes online", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
node := test.Node(test.NodeOptions{ProviderID: machine.Status.ProviderID})
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
Expect(ExpectStatusConditionExists(machine, v1alpha5.MachineRegistered).Status).To(Equal(v1.ConditionTrue))
Expect(machine.Status.NodeName).To(Equal(node.Name))
})
It("should add the owner reference to the Node when the Node comes online", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
node := test.Node(test.NodeOptions{ProviderID: machine.Status.ProviderID})
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
node = ExpectExists(ctx, env.Client, node)
ExpectOwnerReferenceExists(node, machine)
})
It("should sync the labels to the Node when the Node comes online", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
"custom-label": "custom-value",
"other-custom-label": "other-custom-value",
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
Expect(machine.Labels).To(HaveKeyWithValue("custom-label", "custom-value"))
Expect(machine.Labels).To(HaveKeyWithValue("other-custom-label", "other-custom-value"))
node := test.Node(test.NodeOptions{ProviderID: machine.Status.ProviderID})
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
node = ExpectExists(ctx, env.Client, node)
// Expect Node to have all the labels that the Machine has
for k, v := range machine.Labels {
Expect(node.Labels).To(HaveKeyWithValue(k, v))
}
})
It("should sync the annotations to the Node when the Node comes online", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
Annotations: map[string]string{
v1alpha5.DoNotConsolidateNodeAnnotationKey: "true",
"my-custom-annotation": "my-custom-value",
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
Expect(machine.Annotations).To(HaveKeyWithValue(v1alpha5.DoNotConsolidateNodeAnnotationKey, "true"))
Expect(machine.Annotations).To(HaveKeyWithValue("my-custom-annotation", "my-custom-value"))
node := test.Node(test.NodeOptions{ProviderID: machine.Status.ProviderID})
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
node = ExpectExists(ctx, env.Client, node)
// Expect Node to have all the annotations that the Machine has
for k, v := range machine.Annotations {
Expect(node.Annotations).To(HaveKeyWithValue(k, v))
}
})
It("should sync the taints to the Node when the Node comes online", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
Spec: v1alpha5.MachineSpec{
Taints: []v1.Taint{
{
Key: "custom-taint",
Effect: v1.TaintEffectNoSchedule,
Value: "custom-value",
},
{
Key: "other-custom-taint",
Effect: v1.TaintEffectNoExecute,
Value: "other-custom-value",
},
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
Expect(machine.Spec.Taints).To(ContainElements(
v1.Taint{
Key: "custom-taint",
Effect: v1.TaintEffectNoSchedule,
Value: "custom-value",
},
v1.Taint{
Key: "other-custom-taint",
Effect: v1.TaintEffectNoExecute,
Value: "other-custom-value",
},
))
node := test.Node(test.NodeOptions{ProviderID: machine.Status.ProviderID})
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
node = ExpectExists(ctx, env.Client, node)
Expect(node.Spec.Taints).To(ContainElements(
v1.Taint{
Key: "custom-taint",
Effect: v1.TaintEffectNoSchedule,
Value: "custom-value",
},
v1.Taint{
Key: "other-custom-taint",
Effect: v1.TaintEffectNoExecute,
Value: "other-custom-value",
},
))
})
It("should sync the startupTaints to the Node when the Node comes online", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
Spec: v1alpha5.MachineSpec{
Taints: []v1.Taint{
{
Key: "custom-taint",
Effect: v1.TaintEffectNoSchedule,
Value: "custom-value",
},
{
Key: "other-custom-taint",
Effect: v1.TaintEffectNoExecute,
Value: "other-custom-value",
},
},
StartupTaints: []v1.Taint{
{
Key: "custom-startup-taint",
Effect: v1.TaintEffectNoSchedule,
Value: "custom-startup-value",
},
{
Key: "other-custom-startup-taint",
Effect: v1.TaintEffectNoExecute,
Value: "other-custom-startup-value",
},
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
Expect(machine.Spec.StartupTaints).To(ContainElements(
v1.Taint{
Key: "custom-startup-taint",
Effect: v1.TaintEffectNoSchedule,
Value: "custom-startup-value",
},
v1.Taint{
Key: "other-custom-startup-taint",
Effect: v1.TaintEffectNoExecute,
Value: "other-custom-startup-value",
},
))
node := test.Node(test.NodeOptions{ProviderID: machine.Status.ProviderID})
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
node = ExpectExists(ctx, env.Client, node)
Expect(node.Spec.Taints).To(ContainElements(
v1.Taint{
Key: "custom-taint",
Effect: v1.TaintEffectNoSchedule,
Value: "custom-value",
},
v1.Taint{
Key: "other-custom-taint",
Effect: v1.TaintEffectNoExecute,
Value: "other-custom-value",
},
v1.Taint{
Key: "custom-startup-taint",
Effect: v1.TaintEffectNoSchedule,
Value: "custom-startup-value",
},
v1.Taint{
Key: "other-custom-startup-taint",
Effect: v1.TaintEffectNoExecute,
Value: "other-custom-startup-value",
},
))
})
It("should not re-sync the startupTaints to the Node when the startupTaints are removed", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
Spec: v1alpha5.MachineSpec{
StartupTaints: []v1.Taint{
{
Key: "custom-startup-taint",
Effect: v1.TaintEffectNoSchedule,
Value: "custom-startup-value",
},
{
Key: "other-custom-startup-taint",
Effect: v1.TaintEffectNoExecute,
Value: "other-custom-startup-value",
},
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
node := test.Node(test.NodeOptions{ProviderID: machine.Status.ProviderID})
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
node = ExpectExists(ctx, env.Client, node)
Expect(node.Spec.Taints).To(ContainElements(
v1.Taint{
Key: "custom-startup-taint",
Effect: v1.TaintEffectNoSchedule,
Value: "custom-startup-value",
},
v1.Taint{
Key: "other-custom-startup-taint",
Effect: v1.TaintEffectNoExecute,
Value: "other-custom-startup-value",
},
))
node.Spec.Taints = []v1.Taint{}
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
node = ExpectExists(ctx, env.Client, node)
Expect(node.Spec.Taints).To(HaveLen(0))
})
})
| 314 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle_test
import (
"context"
"testing"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clock "k8s.io/utils/clock/testing"
. "knative.dev/pkg/logging/testing"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/apis"
"github.com/aws/karpenter-core/pkg/apis/settings"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider/fake"
machinelifecycle "github.com/aws/karpenter-core/pkg/controllers/machine/lifecycle"
"github.com/aws/karpenter-core/pkg/operator/controller"
"github.com/aws/karpenter-core/pkg/operator/scheme"
. "github.com/aws/karpenter-core/pkg/test/expectations"
"github.com/aws/karpenter-core/pkg/test"
)
var ctx context.Context
var machineController controller.Controller
var env *test.Environment
var fakeClock *clock.FakeClock
var cloudProvider *fake.CloudProvider
func TestAPIs(t *testing.T) {
ctx = TestContextWithLogger(t)
RegisterFailHandler(Fail)
RunSpecs(t, "Machine")
}
var _ = BeforeSuite(func() {
fakeClock = clock.NewFakeClock(time.Now())
env = test.NewEnvironment(scheme.Scheme, test.WithCRDs(apis.CRDs...), test.WithFieldIndexers(func(c cache.Cache) error {
return c.IndexField(ctx, &v1.Node{}, "spec.providerID", func(obj client.Object) []string {
return []string{obj.(*v1.Node).Spec.ProviderID}
})
}))
ctx = settings.ToContext(ctx, test.Settings())
cloudProvider = fake.NewCloudProvider()
machineController = machinelifecycle.NewController(fakeClock, env.Client, cloudProvider)
})
var _ = AfterSuite(func() {
Expect(env.Stop()).To(Succeed(), "Failed to stop environment")
})
var _ = AfterEach(func() {
fakeClock.SetTime(time.Now())
ExpectCleanedUp(ctx, env.Client)
cloudProvider.Reset()
})
var _ = Describe("Finalizer", func() {
var provisioner *v1alpha5.Provisioner
BeforeEach(func() {
provisioner = test.Provisioner()
})
It("should add the finalizer if it doesn't exist", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
})
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
_, ok := lo.Find(machine.Finalizers, func(f string) bool {
return f == v1alpha5.TerminationFinalizer
})
Expect(ok).To(BeTrue())
})
})
| 103 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package termination
import (
"context"
"fmt"
"time"
"golang.org/x/time/rate"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/client-go/util/workqueue"
"knative.dev/pkg/logging"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider"
corecontroller "github.com/aws/karpenter-core/pkg/operator/controller"
machineutil "github.com/aws/karpenter-core/pkg/utils/machine"
)
var _ corecontroller.FinalizingTypedController[*v1alpha5.Machine] = (*Controller)(nil)
// Controller is a Machine Termination controller that triggers deletion of the Node and the
// CloudProvider Machine through its graceful termination mechanism
type Controller struct {
kubeClient client.Client
cloudProvider cloudprovider.CloudProvider
}
// NewController is a constructor for the Machine Controller
func NewController(kubeClient client.Client, cloudProvider cloudprovider.CloudProvider) corecontroller.Controller {
return corecontroller.Typed[*v1alpha5.Machine](kubeClient, &Controller{
kubeClient: kubeClient,
cloudProvider: cloudProvider,
})
}
func (*Controller) Name() string {
return "machine.termination"
}
func (c *Controller) Reconcile(_ context.Context, _ *v1alpha5.Machine) (reconcile.Result, error) {
return reconcile.Result{}, nil
}
func (c *Controller) Finalize(ctx context.Context, machine *v1alpha5.Machine) (reconcile.Result, error) {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("node", machine.Status.NodeName, "provisioner", machine.Labels[v1alpha5.ProvisionerNameLabelKey], "provider-id", machine.Status.ProviderID))
stored := machine.DeepCopy()
if !controllerutil.ContainsFinalizer(machine, v1alpha5.TerminationFinalizer) {
return reconcile.Result{}, nil
}
nodes, err := machineutil.AllNodesForMachine(ctx, c.kubeClient, machine)
if err != nil {
return reconcile.Result{}, err
}
for _, node := range nodes {
// We delete nodes to trigger the node finalization and deletion flow
if err = c.kubeClient.Delete(ctx, node); client.IgnoreNotFound(err) != nil {
return reconcile.Result{}, err
}
}
// We wait until all the nodes associated with this machine have completed their deletion before triggering the finalization of the machine
if len(nodes) > 0 {
return reconcile.Result{}, nil
}
if machine.Status.ProviderID != "" || machine.Annotations[v1alpha5.MachineLinkedAnnotationKey] != "" {
if err := c.cloudProvider.Delete(ctx, machine); cloudprovider.IgnoreMachineNotFoundError(err) != nil {
return reconcile.Result{}, fmt.Errorf("terminating cloudprovider instance, %w", err)
}
}
controllerutil.RemoveFinalizer(machine, v1alpha5.TerminationFinalizer)
if !equality.Semantic.DeepEqual(stored, machine) {
if err := c.kubeClient.Patch(ctx, machine, client.MergeFrom(stored)); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(fmt.Errorf("removing machine termination finalizer, %w", err))
}
logging.FromContext(ctx).Infof("deleted machine")
}
return reconcile.Result{}, nil
}
func (c *Controller) Builder(ctx context.Context, m manager.Manager) corecontroller.Builder {
return corecontroller.Adapt(controllerruntime.
NewControllerManagedBy(m).
For(&v1alpha5.Machine{}).
WithEventFilter(predicate.GenerationChangedPredicate{}).
Watches(
&source.Kind{Type: &v1.Node{}},
machineutil.NodeEventHandler(ctx, c.kubeClient),
// Watch for node deletion events
builder.WithPredicates(predicate.Funcs{
CreateFunc: func(e event.CreateEvent) bool { return false },
UpdateFunc: func(e event.UpdateEvent) bool { return false },
DeleteFunc: func(e event.DeleteEvent) bool { return true },
}),
).
WithOptions(controller.Options{
RateLimiter: workqueue.NewMaxOfRateLimiter(
workqueue.NewItemExponentialFailureRateLimiter(time.Second, time.Minute),
// 10 qps, 100 bucket size
&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
),
MaxConcurrentReconciles: 100, // higher concurrency limit since we want fast reaction to termination
}))
}
| 128 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package termination_test
import (
"context"
"testing"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clock "k8s.io/utils/clock/testing"
. "knative.dev/pkg/logging/testing"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/apis"
"github.com/aws/karpenter-core/pkg/apis/settings"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/cloudprovider/fake"
machinelifecycle "github.com/aws/karpenter-core/pkg/controllers/machine/lifecycle"
machinetermination "github.com/aws/karpenter-core/pkg/controllers/machine/termination"
"github.com/aws/karpenter-core/pkg/operator/controller"
"github.com/aws/karpenter-core/pkg/operator/scheme"
. "github.com/aws/karpenter-core/pkg/test/expectations"
"github.com/aws/karpenter-core/pkg/test"
)
var ctx context.Context
var env *test.Environment
var fakeClock *clock.FakeClock
var cloudProvider *fake.CloudProvider
var machineController controller.Controller
var terminationController controller.Controller
func TestAPIs(t *testing.T) {
ctx = TestContextWithLogger(t)
RegisterFailHandler(Fail)
RunSpecs(t, "Machine")
}
var _ = BeforeSuite(func() {
fakeClock = clock.NewFakeClock(time.Now())
env = test.NewEnvironment(scheme.Scheme, test.WithCRDs(apis.CRDs...), test.WithFieldIndexers(func(c cache.Cache) error {
return c.IndexField(ctx, &v1.Node{}, "spec.providerID", func(obj client.Object) []string {
return []string{obj.(*v1.Node).Spec.ProviderID}
})
}))
ctx = settings.ToContext(ctx, test.Settings())
cloudProvider = fake.NewCloudProvider()
machineController = machinelifecycle.NewController(fakeClock, env.Client, cloudProvider)
terminationController = machinetermination.NewController(env.Client, cloudProvider)
})
var _ = AfterSuite(func() {
Expect(env.Stop()).To(Succeed(), "Failed to stop environment")
})
var _ = AfterEach(func() {
fakeClock.SetTime(time.Now())
ExpectCleanedUp(ctx, env.Client)
cloudProvider.Reset()
})
var _ = Describe("Termination", func() {
var provisioner *v1alpha5.Provisioner
var machine *v1alpha5.Machine
BeforeEach(func() {
provisioner = test.Provisioner()
machine = test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
Finalizers: []string{
v1alpha5.TerminationFinalizer,
},
},
Spec: v1alpha5.MachineSpec{
Resources: v1alpha5.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("50Mi"),
v1.ResourcePods: resource.MustParse("5"),
fake.ResourceGPUVendorA: resource.MustParse("1"),
},
},
},
})
})
It("should delete the node and the CloudProvider Machine when Machine deletion is triggered", func() {
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
_, err := cloudProvider.Get(ctx, machine.Status.ProviderID)
Expect(err).ToNot(HaveOccurred())
node := test.MachineLinkedNode(machine)
ExpectApplied(ctx, env.Client, node)
// Expect the node and the machine to both be gone
Expect(env.Client.Delete(ctx, machine)).To(Succeed())
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(machine)) // triggers the node deletion
ExpectFinalizersRemoved(ctx, env.Client, node)
ExpectNotFound(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(machine)) // now all nodes are gone so machine deletion continues
ExpectNotFound(ctx, env.Client, machine, node)
// Expect the machine to be gone from the cloudprovider
_, err = cloudProvider.Get(ctx, machine.Status.ProviderID)
Expect(cloudprovider.IsMachineNotFoundError(err)).To(BeTrue())
})
It("should delete multiple Nodes if multiple Nodes map to the Machine", func() {
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
_, err := cloudProvider.Get(ctx, machine.Status.ProviderID)
Expect(err).ToNot(HaveOccurred())
node1 := test.MachineLinkedNode(machine)
node2 := test.MachineLinkedNode(machine)
node3 := test.MachineLinkedNode(machine)
ExpectApplied(ctx, env.Client, node1, node2, node3)
// Expect the node and the machine to both be gone
Expect(env.Client.Delete(ctx, machine)).To(Succeed())
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(machine)) // triggers the node deletion
ExpectFinalizersRemoved(ctx, env.Client, node1, node2, node3)
ExpectNotFound(ctx, env.Client, node1, node2, node3)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(machine)) // now all nodes are gone so machine deletion continues
ExpectNotFound(ctx, env.Client, machine, node1, node2, node3)
// Expect the machine to be gone from the cloudprovider
_, err = cloudProvider.Get(ctx, machine.Status.ProviderID)
Expect(cloudprovider.IsMachineNotFoundError(err)).To(BeTrue())
})
It("should delete the Node if the Machine is linked but doesn't have its providerID resolved yet", func() {
node := test.MachineLinkedNode(machine)
machine.Annotations = lo.Assign(machine.Annotations, map[string]string{v1alpha5.MachineLinkedAnnotationKey: machine.Status.ProviderID})
machine.Status.ProviderID = ""
ExpectApplied(ctx, env.Client, provisioner, machine, node)
// Expect the machine to be gone
Expect(env.Client.Delete(ctx, machine)).To(Succeed())
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(machine)) // triggers the node deletion
ExpectFinalizersRemoved(ctx, env.Client, node)
ExpectNotFound(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(machine)) // now all nodes are gone so machine deletion continues
ExpectNotFound(ctx, env.Client, machine, node)
// Expect the machine to be gone from the cloudprovider
_, err := cloudProvider.Get(ctx, machine.Annotations[v1alpha5.MachineLinkedAnnotationKey])
Expect(cloudprovider.IsMachineNotFoundError(err)).To(BeTrue())
})
It("should not delete the Machine until all the Nodes are removed", func() {
ExpectApplied(ctx, env.Client, provisioner, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
machine = ExpectExists(ctx, env.Client, machine)
_, err := cloudProvider.Get(ctx, machine.Status.ProviderID)
Expect(err).ToNot(HaveOccurred())
node := test.MachineLinkedNode(machine)
ExpectApplied(ctx, env.Client, node)
Expect(env.Client.Delete(ctx, machine)).To(Succeed())
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(machine)) // triggers the node deletion
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(machine)) // the node still hasn't been deleted, so the machine should remain
ExpectExists(ctx, env.Client, machine)
ExpectExists(ctx, env.Client, node)
ExpectFinalizersRemoved(ctx, env.Client, node)
ExpectNotFound(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(machine)) // now the machine should be gone
ExpectNotFound(ctx, env.Client, machine)
})
It("should not call Delete() on the CloudProvider if the machine hasn't been launched yet", func() {
machine.Status.ProviderID = ""
ExpectApplied(ctx, env.Client, provisioner, machine)
// Expect the machine to be gone
Expect(env.Client.Delete(ctx, machine)).To(Succeed())
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(machine))
Expect(cloudProvider.DeleteCalls).To(HaveLen(0))
ExpectNotFound(ctx, env.Client, machine)
})
It("should not delete nodes without provider ids if the machine hasn't been launched yet", func() {
// Generate 10 nodes, none of which have a provider id
var nodes []*v1.Node
for i := 0; i < 10; i++ {
nodes = append(nodes, test.Node())
}
ExpectApplied(ctx, env.Client, lo.Map(nodes, func(n *v1.Node, _ int) client.Object { return n })...)
ExpectApplied(ctx, env.Client, provisioner, machine)
// Expect the machine to be gone
Expect(env.Client.Delete(ctx, machine)).To(Succeed())
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(machine))
ExpectNotFound(ctx, env.Client, machine)
for _, node := range nodes {
ExpectExists(ctx, env.Client, node)
}
})
})
| 235 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"context"
"fmt"
"strings"
"sync"
"github.com/prometheus/client_golang/prometheus"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"knative.dev/pkg/logging"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
crmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/metrics"
"github.com/aws/karpenter-core/pkg/operator/controller"
)
const (
podName = "name"
podNameSpace = "namespace"
ownerSelfLink = "owner"
podHostName = "node"
podProvisioner = "provisioner"
podHostZone = "zone"
podHostArchitecture = "arch"
podHostCapacityType = "capacity_type"
podHostInstanceType = "instance_type"
podPhase = "phase"
phasePending = "Pending"
)
var (
podGaugeVec = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "karpenter",
Subsystem: "pods",
Name: "state",
Help: "Pod state is the current state of pods. This metric can be used several ways as it is labeled by the pod name, namespace, owner, node, provisioner name, zone, architecture, capacity type, instance type and pod phase.",
},
labelNames(),
)
podStartupTimeSummary = prometheus.NewSummary(
prometheus.SummaryOpts{
Namespace: "karpenter",
Subsystem: "pods",
Name: "startup_time_seconds",
Help: "The time from pod creation until the pod is running.",
Objectives: metrics.SummaryObjectives(),
},
)
)
// Controller for the resource
type Controller struct {
kubeClient client.Client
// labelsMap keeps track of the pod gauge for gauge deletion
// podKey (types.NamespacedName) -> prometheus.Labels
labelsMap sync.Map
pendingPods sets.String
}
func init() {
crmetrics.Registry.MustRegister(podGaugeVec)
crmetrics.Registry.MustRegister(podStartupTimeSummary)
}
func labelNames() []string {
return []string{
podName,
podNameSpace,
ownerSelfLink,
podHostName,
podProvisioner,
podHostZone,
podHostArchitecture,
podHostCapacityType,
podHostInstanceType,
podPhase,
}
}
// NewController constructs a podController instance
func NewController(kubeClient client.Client) controller.Controller {
return &Controller{
kubeClient: kubeClient,
pendingPods: sets.NewString(),
}
}
func (c *Controller) Name() string {
return "pod_metrics"
}
// Reconcile executes a termination control loop for the resource
func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).Named(c.Name()).With("pod", req.Name))
// Remove the previous gauge on CREATE/UPDATE/DELETE
c.cleanup(req.NamespacedName)
// Retrieve pod from reconcile request
pod := &v1.Pod{}
if err := c.kubeClient.Get(ctx, req.NamespacedName, pod); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
c.record(ctx, pod)
return reconcile.Result{}, nil
}
func (c *Controller) cleanup(podKey types.NamespacedName) {
if labels, ok := c.labelsMap.Load(podKey); ok {
podGaugeVec.Delete(labels.(prometheus.Labels))
c.labelsMap.Delete(podKey)
}
}
func (c *Controller) record(ctx context.Context, pod *v1.Pod) {
// Record pods state metric
labels := c.makeLabels(ctx, pod)
podGaugeVec.With(labels).Set(float64(1))
c.labelsMap.Store(client.ObjectKeyFromObject(pod), labels)
// Record pods startup time metric
var condition *v1.PodCondition
for i := range pod.Status.Conditions {
if pod.Status.Conditions[i].Type == v1.PodReady {
condition = &pod.Status.Conditions[i]
}
}
podKey := client.ObjectKeyFromObject(pod).String()
if pod.Status.Phase == phasePending {
c.pendingPods.Insert(podKey)
} else if c.pendingPods.Has(podKey) && condition != nil {
podStartupTimeSummary.Observe(condition.LastTransitionTime.Sub(pod.CreationTimestamp.Time).Seconds())
c.pendingPods.Delete(podKey)
}
}
// makeLabels creates the makeLabels using the current state of the pod
func (c *Controller) makeLabels(ctx context.Context, pod *v1.Pod) prometheus.Labels {
metricLabels := prometheus.Labels{}
metricLabels[podName] = pod.GetName()
metricLabels[podNameSpace] = pod.GetNamespace()
// Selflink has been deprecated after v.1.20
// Manually generate the selflink for the first owner reference
// Currently we do not support multiple owner references
selflink := ""
if len(pod.GetOwnerReferences()) > 0 {
ownerreference := pod.GetOwnerReferences()[0]
selflink = fmt.Sprintf("/apis/%s/namespaces/%s/%ss/%s", ownerreference.APIVersion, pod.Namespace, strings.ToLower(ownerreference.Kind), ownerreference.Name)
}
metricLabels[ownerSelfLink] = selflink
metricLabels[podHostName] = pod.Spec.NodeName
metricLabels[podPhase] = string(pod.Status.Phase)
node := &v1.Node{}
if err := c.kubeClient.Get(ctx, types.NamespacedName{Name: pod.Spec.NodeName}, node); err != nil {
metricLabels[podHostZone] = "N/A"
metricLabels[podHostArchitecture] = "N/A"
metricLabels[podHostCapacityType] = "N/A"
metricLabels[podHostInstanceType] = "N/A"
if provisionerName, ok := pod.Spec.NodeSelector[v1alpha5.ProvisionerNameLabelKey]; ok {
metricLabels[podProvisioner] = provisionerName
} else {
metricLabels[podProvisioner] = "N/A"
}
} else {
metricLabels[podHostZone] = node.Labels[v1.LabelTopologyZone]
metricLabels[podHostArchitecture] = node.Labels[v1.LabelArchStable]
if capacityType, ok := node.Labels[v1alpha5.LabelCapacityType]; !ok {
metricLabels[podHostCapacityType] = "N/A"
} else {
metricLabels[podHostCapacityType] = capacityType
}
metricLabels[podHostInstanceType] = node.Labels[v1.LabelInstanceTypeStable]
if provisionerName, ok := node.Labels[v1alpha5.ProvisionerNameLabelKey]; !ok {
metricLabels[podProvisioner] = "N/A"
} else {
metricLabels[podProvisioner] = provisionerName
}
}
return metricLabels
}
func (c *Controller) Builder(_ context.Context, m manager.Manager) controller.Builder {
return controller.Adapt(
controllerruntime.
NewControllerManagedBy(m).
For(&v1.Pod{}),
)
}
| 217 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod_test
import (
"context"
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
. "knative.dev/pkg/logging/testing"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/controllers/metrics/pod"
"github.com/aws/karpenter-core/pkg/operator/controller"
"github.com/aws/karpenter-core/pkg/operator/scheme"
"github.com/aws/karpenter-core/pkg/test"
. "github.com/aws/karpenter-core/pkg/test/expectations"
)
var podController controller.Controller
var ctx context.Context
var env *test.Environment
func TestAPIs(t *testing.T) {
ctx = TestContextWithLogger(t)
RegisterFailHandler(Fail)
RunSpecs(t, "Controllers/Metrics/Pod")
}
var _ = BeforeSuite(func() {
env = test.NewEnvironment(scheme.Scheme)
podController = pod.NewController(env.Client)
})
var _ = AfterSuite(func() {
Expect(env.Stop()).To(Succeed(), "Failed to stop environment")
})
var _ = Describe("Pod Metrics", func() {
It("should update the pod state metrics", func() {
p := test.Pod()
ExpectApplied(ctx, env.Client, p)
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(p))
_, found := FindMetricWithLabelValues("karpenter_pods_state", map[string]string{
"name": p.GetName(),
"namespace": p.GetNamespace(),
})
Expect(found).To(BeTrue())
})
It("should update the pod state metrics with pod phase", func() {
p := test.Pod()
ExpectApplied(ctx, env.Client, p)
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(p))
_, found := FindMetricWithLabelValues("karpenter_pods_state", map[string]string{
"name": p.GetName(),
"namespace": p.GetNamespace(),
})
Expect(found).To(BeTrue())
p.Status.Phase = v1.PodRunning
ExpectApplied(ctx, env.Client, p)
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(p))
_, found = FindMetricWithLabelValues("karpenter_pods_state", map[string]string{
"name": p.GetName(),
"namespace": p.GetNamespace(),
"phase": string(p.Status.Phase),
})
Expect(found).To(BeTrue())
})
It("should delete the pod state metric on pod delete", func() {
p := test.Pod()
ExpectApplied(ctx, env.Client, p)
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(p))
ExpectDeleted(ctx, env.Client, p)
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(p))
_, found := FindMetricWithLabelValues("karpenter_pods_state", map[string]string{
"name": p.GetName(),
"namespace": p.GetNamespace(),
})
Expect(found).To(BeFalse())
})
})
| 102 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provisioner
import (
"context"
"fmt"
"strings"
"sync"
"time"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"knative.dev/pkg/logging"
"github.com/prometheus/client_golang/prometheus"
v1 "k8s.io/api/core/v1"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
crmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
corecontroller "github.com/aws/karpenter-core/pkg/operator/controller"
)
const (
provisionerResourceType = "resource_type"
provisionerName = "provisioner"
)
var (
limitGaugeVec = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "karpenter",
Subsystem: "provisioner",
Name: "limit",
Help: "The Provisioner Limits are the limits specified on the provisioner that restrict the quantity of resources provisioned. Labeled by provisioner name and resource type.",
},
labelNames(),
)
usageGaugeVec = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "karpenter",
Subsystem: "provisioner",
Name: "usage",
Help: "The Provisioner Usage is the amount of resources that have been provisioned by a particular provisioner. Labeled by provisioner name and resource type.",
},
labelNames(),
)
usagePctGaugeVec = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "karpenter",
Subsystem: "provisioner",
Name: "usage_pct",
Help: "The Provisioner Usage Percentage is the percentage of each resource used based on the resources provisioned and the limits that have been configured in the range [0,100]. Labeled by provisioner name and resource type.",
},
labelNames(),
)
)
func init() {
crmetrics.Registry.MustRegister(limitGaugeVec)
crmetrics.Registry.MustRegister(usageGaugeVec)
crmetrics.Registry.MustRegister(usagePctGaugeVec)
}
func labelNames() []string {
return []string{
provisionerResourceType,
provisionerName,
}
}
type Controller struct {
kubeClient client.Client
// labelCollection keeps track of gauges for gauge deletion
// provisionerKey (types.NamespacedName) -> []prometheus.Labels
labelCollection sync.Map
}
// NewController constructs a controller instance
func NewController(kubeClient client.Client) corecontroller.Controller {
return &Controller{
kubeClient: kubeClient,
}
}
func (c *Controller) Name() string {
return "provisioner_metrics"
}
// Reconcile executes a termination control loop for the resource
func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).Named(c.Name()).With("provisioner", req.Name))
// Remove the previous gauge on CREATE/UPDATE/DELETE
c.cleanup(req.NamespacedName)
// Retrieve provisioner from reconcile request
provisioner := &v1alpha5.Provisioner{}
if err := c.kubeClient.Get(ctx, req.NamespacedName, provisioner); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
c.record(ctx, provisioner)
// periodically update our metrics per provisioner even if nothing has changed
return reconcile.Result{RequeueAfter: 5 * time.Minute}, nil
}
func (c *Controller) cleanup(provisionerKey types.NamespacedName) {
if labelSet, ok := c.labelCollection.Load(provisionerKey); ok {
for _, labels := range labelSet.([]prometheus.Labels) {
limitGaugeVec.Delete(labels)
usageGaugeVec.Delete(labels)
usagePctGaugeVec.Delete(labels)
}
c.labelCollection.Delete(provisionerKey)
}
}
func (c *Controller) record(ctx context.Context, provisioner *v1alpha5.Provisioner) {
if err := c.set(provisioner, provisioner.Status.Resources, usageGaugeVec); err != nil {
logging.FromContext(ctx).Errorf("generating gauge: %s", err)
}
if provisioner.Spec.Limits == nil {
// can't generate our limits or usagePct gauges if there are no limits
return
}
if err := c.set(provisioner, provisioner.Spec.Limits.Resources, limitGaugeVec); err != nil {
logging.FromContext(ctx).Errorf("generating gauge: %s", err)
}
usage := v1.ResourceList{}
for k, v := range provisioner.Spec.Limits.Resources {
limitValue := v.AsApproximateFloat64()
usedValue := provisioner.Status.Resources[k]
if limitValue == 0 {
usage[k] = *resource.NewQuantity(100, resource.DecimalSI)
} else {
usage[k] = *resource.NewQuantity(int64(usedValue.AsApproximateFloat64()/limitValue*100), resource.DecimalSI)
}
}
if err := c.set(provisioner, usage, usagePctGaugeVec); err != nil {
logging.FromContext(ctx).Errorf("generating gauge: %s", err)
}
}
// set updates the value for the node gauge for each resource type passed through the resource list
func (c *Controller) set(provisioner *v1alpha5.Provisioner, resourceList v1.ResourceList, gaugeVec *prometheus.GaugeVec) error {
provisionerKey := client.ObjectKeyFromObject(provisioner)
var labels []prometheus.Labels
for resourceName, quantity := range resourceList {
resourceTypeName := strings.ReplaceAll(strings.ToLower(string(resourceName)), "-", "_")
label := c.makeLabels(provisioner, resourceTypeName)
labels = append(labels, label)
// Store the label we are about to add to the label collection
c.labelCollection.Store(provisionerKey, labels)
// gets existing gauge or gets a new one if it doesn't exist
gauge, err := gaugeVec.GetMetricWith(label)
if err != nil {
return fmt.Errorf("creating or getting gauge: %w", err)
}
if resourceName == v1.ResourceCPU {
gauge.Set(float64(quantity.MilliValue()) / float64(1000))
} else {
gauge.Set(float64(quantity.Value()))
}
}
return nil
}
func (c *Controller) makeLabels(provisioner *v1alpha5.Provisioner, resourceTypeName string) prometheus.Labels {
return prometheus.Labels{
provisionerResourceType: resourceTypeName,
provisionerName: provisioner.Name,
}
}
func (c *Controller) Builder(_ context.Context, m manager.Manager) corecontroller.Builder {
return corecontroller.Adapt(
controllerruntime.
NewControllerManagedBy(m).
For(&v1alpha5.Provisioner{}),
)
}
| 203 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provisioner_test
import (
"context"
"strings"
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
. "knative.dev/pkg/logging/testing"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/apis"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/controllers/metrics/provisioner"
"github.com/aws/karpenter-core/pkg/operator/controller"
"github.com/aws/karpenter-core/pkg/operator/scheme"
"github.com/aws/karpenter-core/pkg/test"
. "github.com/aws/karpenter-core/pkg/test/expectations"
)
var provisionerController controller.Controller
var ctx context.Context
var env *test.Environment
func TestAPIs(t *testing.T) {
ctx = TestContextWithLogger(t)
RegisterFailHandler(Fail)
RunSpecs(t, "Controllers/Metrics/Provisioner")
}
var _ = BeforeSuite(func() {
env = test.NewEnvironment(scheme.Scheme, test.WithCRDs(apis.CRDs...))
provisionerController = provisioner.NewController(env.Client)
})
var _ = AfterSuite(func() {
Expect(env.Stop()).To(Succeed(), "Failed to stop environment")
})
var _ = Describe("Provisioner Metrics", func() {
It("should update the provisioner limit metrics", func() {
limits := v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("10Mi"),
v1.ResourceEphemeralStorage: resource.MustParse("100Gi"),
}
provisioner := test.Provisioner(test.ProvisionerOptions{
Limits: limits,
})
ExpectApplied(ctx, env.Client, provisioner)
ExpectReconcileSucceeded(ctx, provisionerController, client.ObjectKeyFromObject(provisioner))
for k, v := range limits {
m, found := FindMetricWithLabelValues("karpenter_provisioner_limit", map[string]string{
"provisioner": provisioner.GetName(),
"resource_type": strings.ReplaceAll(k.String(), "-", "_"),
})
Expect(found).To(BeTrue())
Expect(m.GetGauge().GetValue()).To(BeNumerically("~", v.AsApproximateFloat64()))
}
})
It("should update the provisioner usage metrics", func() {
resources := v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("10Mi"),
v1.ResourceEphemeralStorage: resource.MustParse("100Gi"),
}
provisioner := test.Provisioner(test.ProvisionerOptions{
Status: v1alpha5.ProvisionerStatus{
Resources: resources,
},
})
ExpectApplied(ctx, env.Client, provisioner)
ExpectReconcileSucceeded(ctx, provisionerController, client.ObjectKeyFromObject(provisioner))
for k, v := range resources {
m, found := FindMetricWithLabelValues("karpenter_provisioner_usage", map[string]string{
"provisioner": provisioner.GetName(),
"resource_type": strings.ReplaceAll(k.String(), "-", "_"),
})
Expect(found).To(BeTrue())
Expect(m.GetGauge().GetValue()).To(BeNumerically("~", v.AsApproximateFloat64()))
}
})
It("should update the usage percentage metrics correctly", func() {
resources := v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("10Mi"),
v1.ResourceEphemeralStorage: resource.MustParse("100Gi"),
}
limits := v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100"),
v1.ResourceMemory: resource.MustParse("100Mi"),
v1.ResourceEphemeralStorage: resource.MustParse("1000Gi"),
}
provisioner := test.Provisioner(test.ProvisionerOptions{
Limits: limits,
Status: v1alpha5.ProvisionerStatus{
Resources: resources,
},
})
ExpectApplied(ctx, env.Client, provisioner)
ExpectReconcileSucceeded(ctx, provisionerController, client.ObjectKeyFromObject(provisioner))
for k := range resources {
m, found := FindMetricWithLabelValues("karpenter_provisioner_usage_pct", map[string]string{
"provisioner": provisioner.GetName(),
"resource_type": strings.ReplaceAll(k.String(), "-", "_"),
})
Expect(found).To(BeTrue())
Expect(m.GetGauge().GetValue()).To(BeNumerically("~", 10))
}
})
It("should delete the provisioner state metrics on provisioner delete", func() {
expectedMetrics := []string{"karpenter_provisioner_limit", "karpenter_provisioner_usage", "karpenter_provisioner_usage_pct"}
provisioner := test.Provisioner(test.ProvisionerOptions{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100"),
v1.ResourceMemory: resource.MustParse("100Mi"),
v1.ResourceEphemeralStorage: resource.MustParse("1000Gi"),
},
Status: v1alpha5.ProvisionerStatus{
Resources: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("10Mi"),
v1.ResourceEphemeralStorage: resource.MustParse("100Gi"),
},
},
})
ExpectApplied(ctx, env.Client, provisioner)
ExpectReconcileSucceeded(ctx, provisionerController, client.ObjectKeyFromObject(provisioner))
for _, name := range expectedMetrics {
_, found := FindMetricWithLabelValues(name, map[string]string{
"provisioner": provisioner.GetName(),
})
Expect(found).To(BeTrue())
}
ExpectDeleted(ctx, env.Client, provisioner)
ExpectReconcileSucceeded(ctx, provisionerController, client.ObjectKeyFromObject(provisioner))
for _, name := range expectedMetrics {
_, found := FindMetricWithLabelValues(name, map[string]string{
"provisioner": provisioner.GetName(),
})
Expect(found).To(BeFalse())
}
})
})
| 170 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"context"
"time"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/karpenter-core/pkg/controllers/metrics/state/scraper"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/operator/controller"
)
const pollingPeriod = 5 * time.Second
type Controller struct {
cluster *state.Cluster
scrapers []scraper.Scraper
}
func NewController(cluster *state.Cluster) *Controller {
return &Controller{
cluster: cluster,
scrapers: []scraper.Scraper{scraper.NewNodeScraper(cluster)},
}
}
func (c *Controller) Name() string {
return "metric_scraper"
}
func (c *Controller) Builder(_ context.Context, mgr manager.Manager) controller.Builder {
return controller.NewSingletonManagedBy(mgr)
}
func (c *Controller) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) {
for _, scraper := range c.scrapers {
scraper.Scrape(ctx)
}
return reconcile.Result{RequeueAfter: pollingPeriod}, nil
}
| 57 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics_test
import (
"context"
"testing"
"time"
"k8s.io/apimachinery/pkg/types"
clock "k8s.io/utils/clock/testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/apis"
"github.com/aws/karpenter-core/pkg/apis/settings"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
metricsstate "github.com/aws/karpenter-core/pkg/controllers/metrics/state"
"github.com/aws/karpenter-core/pkg/controllers/state/informer"
"github.com/aws/karpenter-core/pkg/operator/controller"
"github.com/aws/karpenter-core/pkg/operator/scheme"
"github.com/aws/karpenter-core/pkg/cloudprovider/fake"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/test"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "knative.dev/pkg/logging/testing"
. "github.com/aws/karpenter-core/pkg/test/expectations"
)
var ctx context.Context
var fakeClock *clock.FakeClock
var env *test.Environment
var cluster *state.Cluster
var nodeController controller.Controller
var metricsStateController controller.Controller
var cloudProvider *fake.CloudProvider
var provisioner *v1alpha5.Provisioner
func TestAPIs(t *testing.T) {
ctx = TestContextWithLogger(t)
RegisterFailHandler(Fail)
RunSpecs(t, "Controllers/Metrics/State")
}
var _ = BeforeSuite(func() {
env = test.NewEnvironment(scheme.Scheme, test.WithCRDs(apis.CRDs...))
ctx = settings.ToContext(ctx, test.Settings())
cloudProvider = fake.NewCloudProvider()
cloudProvider.InstanceTypes = fake.InstanceTypesAssorted()
fakeClock = clock.NewFakeClock(time.Now())
cluster = state.NewCluster(fakeClock, env.Client, cloudProvider)
provisioner = test.Provisioner(test.ProvisionerOptions{ObjectMeta: metav1.ObjectMeta{Name: "default"}})
nodeController = informer.NewNodeController(env.Client, cluster)
metricsStateController = metricsstate.NewController(cluster)
ExpectApplied(ctx, env.Client, provisioner)
})
var _ = AfterSuite(func() {
ExpectCleanedUp(ctx, env.Client)
Expect(env.Stop()).To(Succeed(), "Failed to stop environment")
})
var _ = Describe("Node Metrics", func() {
It("should update the allocatable metric", func() {
resources := v1.ResourceList{
v1.ResourcePods: resource.MustParse("100"),
v1.ResourceCPU: resource.MustParse("5000"),
v1.ResourceMemory: resource.MustParse("32Gi"),
}
node := test.Node(test.NodeOptions{Allocatable: resources})
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectReconcileSucceeded(ctx, metricsStateController, types.NamespacedName{})
for k, v := range resources {
metric, found := FindMetricWithLabelValues("karpenter_nodes_allocatable", map[string]string{
"node_name": node.GetName(),
"resource_type": k.String(),
})
Expect(found).To(BeTrue())
Expect(metric.GetGauge().GetValue()).To(BeNumerically("~", v.AsApproximateFloat64()))
}
})
It("should remove the node metric gauge when the node is deleted", func() {
resources := v1.ResourceList{
v1.ResourcePods: resource.MustParse("100"),
v1.ResourceCPU: resource.MustParse("5000"),
v1.ResourceMemory: resource.MustParse("32Gi"),
}
node := test.Node(test.NodeOptions{Allocatable: resources})
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectReconcileSucceeded(ctx, metricsStateController, types.NamespacedName{})
_, found := FindMetricWithLabelValues("karpenter_nodes_allocatable", map[string]string{
"node_name": node.GetName(),
})
Expect(found).To(BeTrue())
ExpectDeleted(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectReconcileSucceeded(ctx, metricsStateController, types.NamespacedName{})
_, found = FindMetricWithLabelValues("karpenter_nodes_allocatable", map[string]string{
"node_name": node.GetName(),
})
Expect(found).To(BeFalse())
})
})
| 132 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scraper
import (
"bytes"
"context"
"sort"
"strings"
"github.com/prometheus/client_golang/prometheus"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/controller-runtime/pkg/metrics"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/utils/resources"
)
const (
resourceType = "resource_type"
nodeName = "node_name"
nodeProvisioner = "provisioner"
nodePhase = "phase"
)
var (
allocatableGaugeVec = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "karpenter",
Subsystem: "nodes",
Name: "allocatable",
Help: "Node allocatable are the resources allocatable by nodes.",
},
nodeLabelNames(),
)
podRequestsGaugeVec = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "karpenter",
Subsystem: "nodes",
Name: "total_pod_requests",
Help: "Node total pod requests are the resources requested by non-DaemonSet pods bound to nodes.",
},
nodeLabelNames(),
)
podLimitsGaugeVec = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "karpenter",
Subsystem: "nodes",
Name: "total_pod_limits",
Help: "Node total pod limits are the resources specified by non-DaemonSet pod limits.",
},
nodeLabelNames(),
)
daemonRequestsGaugeVec = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "karpenter",
Subsystem: "nodes",
Name: "total_daemon_requests",
Help: "Node total daemon requests are the resource requested by DaemonSet pods bound to nodes.",
},
nodeLabelNames(),
)
daemonLimitsGaugeVec = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "karpenter",
Subsystem: "nodes",
Name: "total_daemon_limits",
Help: "Node total daemon limits are the resources specified by DaemonSet pod limits.",
},
nodeLabelNames(),
)
overheadGaugeVec = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "karpenter",
Subsystem: "nodes",
Name: "system_overhead",
Help: "Node system daemon overhead are the resources reserved for system overhead, the difference between the node's capacity and allocatable values are reported by the status.",
},
nodeLabelNames(),
)
wellKnownLabels = getWellKnownLabels()
)
func nodeLabelNames() []string {
return append(
lo.Values(wellKnownLabels),
resourceType,
nodeName,
nodeProvisioner,
nodePhase,
)
}
func forEachGaugeVec(f func(*prometheus.GaugeVec)) {
for _, gauge := range []*prometheus.GaugeVec{
allocatableGaugeVec,
podRequestsGaugeVec,
podLimitsGaugeVec,
daemonRequestsGaugeVec,
daemonLimitsGaugeVec,
overheadGaugeVec,
} {
f(gauge)
}
}
func init() {
forEachGaugeVec(func(g *prometheus.GaugeVec) {
metrics.Registry.MustRegister(g)
})
}
type NodeScraper struct {
cluster *state.Cluster
gaugeLabels map[*prometheus.GaugeVec]map[string]prometheus.Labels
}
func NewNodeScraper(cluster *state.Cluster) *NodeScraper {
return &NodeScraper{
cluster: cluster,
gaugeLabels: func() map[*prometheus.GaugeVec]map[string]prometheus.Labels {
m := make(map[*prometheus.GaugeVec]map[string]prometheus.Labels)
forEachGaugeVec(func(g *prometheus.GaugeVec) {
m[g] = make(map[string]prometheus.Labels)
})
return m
}(),
}
}
func (ns *NodeScraper) Scrape(_ context.Context) {
currentGaugeLabels := make(map[*prometheus.GaugeVec]sets.String)
forEachGaugeVec(func(g *prometheus.GaugeVec) {
currentGaugeLabels[g] = sets.NewString()
})
// Populate metrics
ns.cluster.ForEachNode(func(n *state.StateNode) bool {
if n.Node == nil {
return true
}
for gaugeVec, resourceList := range map[*prometheus.GaugeVec]v1.ResourceList{
overheadGaugeVec: ns.getSystemOverhead(n.Node),
podRequestsGaugeVec: resources.Subtract(n.PodRequests(), n.DaemonSetRequests()),
podLimitsGaugeVec: resources.Subtract(n.PodLimits(), n.DaemonSetLimits()),
daemonRequestsGaugeVec: n.DaemonSetRequests(),
daemonLimitsGaugeVec: n.DaemonSetLimits(),
allocatableGaugeVec: n.Node.Status.Allocatable,
} {
for _, labels := range ns.set(gaugeVec, n.Node, resourceList) {
key := labelsToString(labels)
ns.gaugeLabels[gaugeVec][key] = labels
currentGaugeLabels[gaugeVec].Insert(key)
}
}
return true
})
// Remove stale gauges
forEachGaugeVec(func(g *prometheus.GaugeVec) {
for labelsKey := range sets.NewString(lo.Keys(ns.gaugeLabels[g])...).Difference(currentGaugeLabels[g]) {
g.Delete(ns.gaugeLabels[g][labelsKey])
delete(ns.gaugeLabels[g], labelsKey)
}
})
}
// set the value for the node gauge and returns a slice of the labels for the gauges set
func (ns *NodeScraper) set(gaugeVec *prometheus.GaugeVec, node *v1.Node, resourceList v1.ResourceList) []prometheus.Labels {
gaugeLabels := []prometheus.Labels{}
for resourceName, quantity := range resourceList {
// Reformat resource type to be consistent with Prometheus naming conventions (snake_case)
resourceLabels := ns.getNodeLabels(node, strings.ReplaceAll(strings.ToLower(string(resourceName)), "-", "_"))
gaugeLabels = append(gaugeLabels, resourceLabels)
if resourceName == v1.ResourceCPU {
gaugeVec.With(resourceLabels).Set(float64(quantity.MilliValue()) / float64(1000))
} else {
gaugeVec.With(resourceLabels).Set(float64(quantity.Value()))
}
}
return gaugeLabels
}
func (ns *NodeScraper) getSystemOverhead(node *v1.Node) v1.ResourceList {
systemOverhead := v1.ResourceList{}
if len(node.Status.Allocatable) > 0 {
// calculating system daemons overhead
for resourceName, quantity := range node.Status.Allocatable {
overhead := node.Status.Capacity[resourceName]
overhead.Sub(quantity)
systemOverhead[resourceName] = overhead
}
}
return systemOverhead
}
func (ns *NodeScraper) getNodeLabels(node *v1.Node, resourceTypeName string) prometheus.Labels {
metricLabels := prometheus.Labels{}
metricLabels[resourceType] = resourceTypeName
metricLabels[nodeName] = node.GetName()
metricLabels[nodeProvisioner] = node.Labels[v1alpha5.ProvisionerNameLabelKey]
metricLabels[nodePhase] = string(node.Status.Phase)
// Populate well known labels
for wellKnownLabel, label := range wellKnownLabels {
if value, ok := node.Labels[wellKnownLabel]; !ok {
metricLabels[label] = "N/A"
} else {
metricLabels[label] = value
}
}
return metricLabels
}
func getWellKnownLabels() map[string]string {
labels := make(map[string]string)
for wellKnownLabel := range v1alpha5.WellKnownLabels {
if parts := strings.Split(wellKnownLabel, "/"); len(parts) == 2 {
label := parts[1]
// Reformat label names to be consistent with Prometheus naming conventions (snake_case)
label = strings.ReplaceAll(strings.ToLower(label), "-", "_")
labels[wellKnownLabel] = label
}
}
return labels
}
func labelsToString(labels prometheus.Labels) string {
// this function is called often and shows up in profiling, so its optimized
// a bit to run ~2x faster than a more standard approach
keyValues := make([]string, 0, len(labels))
sz := 0
for k, v := range labels {
keyValues = append(keyValues, k)
// len(key + len(value) + len(="",)
sz += len(k) + len(v) + 4
}
sort.Strings(keyValues)
var buf bytes.Buffer
// grow the buffer to the size needed to avoid allocations
buf.Grow(sz)
for i, k := range keyValues {
if i > 0 {
buf.WriteByte(',')
}
// much faster to append a string than to format a string
buf.WriteString(k)
buf.WriteString("=\"")
buf.WriteString(labels[k])
buf.WriteString("\"")
}
return buf.String()
}
| 277 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scraper
import "context"
type Scraper interface {
Scrape(context.Context)
}
| 22 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"context"
"go.uber.org/multierr"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/clock"
"knative.dev/pkg/logging"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
corecontroller "github.com/aws/karpenter-core/pkg/operator/controller"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/utils/result"
)
type nodeReconciler interface {
Reconcile(context.Context, *v1alpha5.Provisioner, *v1.Node) (reconcile.Result, error)
}
var _ corecontroller.TypedController[*v1.Node] = (*Controller)(nil)
// Controller manages a set of properties on karpenter provisioned nodes, such as
// taints, labels, finalizers.
type Controller struct {
kubeClient client.Client
cluster *state.Cluster
emptiness *Emptiness
drift *Drift
expiration *Expiration
}
// NewController constructs a nodeController instance
func NewController(clk clock.Clock, kubeClient client.Client, cloudProvider cloudprovider.CloudProvider, cluster *state.Cluster) corecontroller.Controller {
return corecontroller.Typed[*v1.Node](kubeClient, &Controller{
kubeClient: kubeClient,
cluster: cluster,
emptiness: &Emptiness{kubeClient: kubeClient, clock: clk, cluster: cluster},
drift: &Drift{kubeClient: kubeClient, cloudProvider: cloudProvider},
expiration: &Expiration{clock: clk},
})
}
func (c *Controller) Name() string {
return "node"
}
// Reconcile executes a reallocation control loop for the resource
func (c *Controller) Reconcile(ctx context.Context, node *v1.Node) (reconcile.Result, error) {
stored := node.DeepCopy()
if _, ok := node.Labels[v1alpha5.ProvisionerNameLabelKey]; !ok {
return reconcile.Result{}, nil
}
if !node.DeletionTimestamp.IsZero() {
return reconcile.Result{}, nil
}
provisioner := &v1alpha5.Provisioner{}
if err := c.kubeClient.Get(ctx, types.NamespacedName{Name: node.Labels[v1alpha5.ProvisionerNameLabelKey]}, provisioner); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("provisioner", provisioner.Name))
// Execute Reconcilers
var results []reconcile.Result
var errs error
reconcilers := []nodeReconciler{
c.emptiness,
c.expiration,
c.drift,
}
for _, reconciler := range reconcilers {
res, err := reconciler.Reconcile(ctx, provisioner, node)
errs = multierr.Append(errs, err)
results = append(results, res)
}
if !equality.Semantic.DeepEqual(stored, node) {
if err := c.kubeClient.Patch(ctx, node, client.MergeFrom(stored)); err != nil {
return reconcile.Result{}, err
}
}
return result.Min(results...), errs
}
func (c *Controller) Builder(ctx context.Context, m manager.Manager) corecontroller.Builder {
return corecontroller.Adapt(controllerruntime.
NewControllerManagedBy(m).
For(&v1.Node{}).
WithOptions(controller.Options{MaxConcurrentReconciles: 10}).
Watches(
// Reconcile all nodes related to a provisioner when it changes.
&source.Kind{Type: &v1alpha5.Provisioner{}},
handler.EnqueueRequestsFromMapFunc(func(o client.Object) (requests []reconcile.Request) {
nodes := &v1.NodeList{}
if err := c.kubeClient.List(ctx, nodes, client.MatchingLabels(map[string]string{v1alpha5.ProvisionerNameLabelKey: o.GetName()})); err != nil {
logging.FromContext(ctx).Errorf("Failed to list nodes when mapping expiration watch events, %s", err)
return requests
}
for _, node := range nodes.Items {
requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{Name: node.Name}})
}
return requests
}),
).
Watches(
// Reconcile node when a pod assigned to it changes.
&source.Kind{Type: &v1.Pod{}},
handler.EnqueueRequestsFromMapFunc(func(o client.Object) (requests []reconcile.Request) {
if name := o.(*v1.Pod).Spec.NodeName; name != "" {
requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{Name: name}})
}
return requests
}),
))
}
| 141 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"context"
"fmt"
"time"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/karpenter-core/pkg/apis/settings"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/utils/machine"
)
type Drift struct {
kubeClient client.Client
cloudProvider cloudprovider.CloudProvider
}
func (d *Drift) Reconcile(ctx context.Context, provisioner *v1alpha5.Provisioner, node *v1.Node) (reconcile.Result, error) {
// node is not ready yet, so we don't consider it to be drifted
if node.Labels[v1alpha5.LabelNodeInitialized] != "true" {
return reconcile.Result{}, nil
}
// If the node is marked as voluntarily disrupted by another controller, do nothing.
val, hasAnnotation := node.Annotations[v1alpha5.VoluntaryDisruptionAnnotationKey]
if hasAnnotation && val != v1alpha5.VoluntaryDisruptionDriftedAnnotationValue {
return reconcile.Result{}, nil
}
// From here there are three scenarios to handle:
// 1. If drift is not enabled but the node is drifted, remove the annotation
// so another disruption controller can annotate the node.
if !settings.FromContext(ctx).DriftEnabled {
if val == v1alpha5.VoluntaryDisruptionDriftedAnnotationValue {
delete(node.Annotations, v1alpha5.VoluntaryDisruptionAnnotationKey)
logging.FromContext(ctx).Debugf("removing drift annotation from node as drift has been disabled")
}
return reconcile.Result{}, nil
}
drifted, err := d.cloudProvider.IsMachineDrifted(ctx, machine.NewFromNode(node))
if err != nil {
return reconcile.Result{}, cloudprovider.IgnoreMachineNotFoundError(fmt.Errorf("getting drift for node, %w", err))
}
// 2. Otherwise, if the node isn't drifted, but has the annotation, remove it.
if !drifted && hasAnnotation {
delete(node.Annotations, v1alpha5.VoluntaryDisruptionAnnotationKey)
logging.FromContext(ctx).Debugf("removing drift annotation from node")
// 3. Finally, if the node is drifted, but doesn't have the annotation, add it.
} else if drifted && !hasAnnotation {
node.Annotations = lo.Assign(node.Annotations, map[string]string{
v1alpha5.VoluntaryDisruptionAnnotationKey: v1alpha5.VoluntaryDisruptionDriftedAnnotationValue,
})
logging.FromContext(ctx).Debugf("annotating node as drifted")
}
// Requeue after 5 minutes for the cache TTL
return reconcile.Result{RequeueAfter: 5 * time.Minute}, nil
}
| 80 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"context"
"fmt"
"time"
"k8s.io/utils/clock"
v1 "k8s.io/api/core/v1"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/samber/lo"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/utils/pod"
)
// Emptiness is a subreconciler that deletes nodes that are empty after a ttl
type Emptiness struct {
kubeClient client.Client
clock clock.Clock
cluster *state.Cluster
}
// Reconcile reconciles the node
func (r *Emptiness) Reconcile(ctx context.Context, provisioner *v1alpha5.Provisioner, n *v1.Node) (reconcile.Result, error) {
// Ignore node if not applicable
if provisioner.Spec.TTLSecondsAfterEmpty == nil {
return reconcile.Result{}, nil
}
// node is not ready yet, so we don't consider it to possibly be empty
if n.Labels[v1alpha5.LabelNodeInitialized] != "true" {
return reconcile.Result{}, nil
}
empty, err := r.isEmpty(ctx, n)
if err != nil {
return reconcile.Result{}, err
}
// Node is empty, but it is in-use per the last scheduling round, so we don't consider it empty
// We perform a short requeue if the node is nominated, so we can check the node for emptiness when the node
// nomination time ends since we don't watch node nomination events
if r.cluster.IsNodeNominated(n.Name) {
return reconcile.Result{RequeueAfter: time.Second * 30}, nil
}
_, hasEmptinessTimestamp := n.Annotations[v1alpha5.EmptinessTimestampAnnotationKey]
if !empty && hasEmptinessTimestamp {
delete(n.Annotations, v1alpha5.EmptinessTimestampAnnotationKey)
logging.FromContext(ctx).Debugf("removed emptiness TTL from node")
} else if empty && !hasEmptinessTimestamp {
n.Annotations = lo.Assign(n.Annotations, map[string]string{
v1alpha5.EmptinessTimestampAnnotationKey: r.clock.Now().Format(time.RFC3339),
})
logging.FromContext(ctx).Debugf("added TTL to empty node")
}
return reconcile.Result{}, nil
}
func (r *Emptiness) isEmpty(ctx context.Context, n *v1.Node) (bool, error) {
pods := &v1.PodList{}
if err := r.kubeClient.List(ctx, pods, client.MatchingFields{"spec.nodeName": n.Name}); err != nil {
return false, fmt.Errorf("listing pods for node, %w", err)
}
for i := range pods.Items {
p := pods.Items[i]
if !pod.IsTerminal(&p) && !pod.IsOwnedByDaemonSet(&p) && !pod.IsOwnedByNode(&p) {
return false, nil
}
}
return true, nil
}
| 89 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"context"
v1 "k8s.io/api/core/v1"
"k8s.io/utils/clock"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/samber/lo"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
utilsnode "github.com/aws/karpenter-core/pkg/utils/node"
)
// Expiration is a node sub-controller that annotates or de-annotates an expired node based on TTLSecondsUntilExpired
type Expiration struct {
clock clock.Clock
}
func (e *Expiration) Reconcile(ctx context.Context, provisioner *v1alpha5.Provisioner, node *v1.Node) (reconcile.Result, error) {
// If the node is marked as voluntarily disrupted by another controller, do nothing.
val, hasAnnotation := node.Annotations[v1alpha5.VoluntaryDisruptionAnnotationKey]
if hasAnnotation && val != v1alpha5.VoluntaryDisruptionExpiredAnnotationValue {
return reconcile.Result{}, nil
}
// From here there are three scenarios to handle:
// 1. If TTLSecondsUntilExpired is not configured, but the node is expired,
// remove the annotation so another disruption controller can annotate the node.
if provisioner.Spec.TTLSecondsUntilExpired == nil {
if val == v1alpha5.VoluntaryDisruptionExpiredAnnotationValue {
delete(node.Annotations, v1alpha5.VoluntaryDisruptionAnnotationKey)
logging.FromContext(ctx).Debugf("removing expiration annotation from node as expiration has been disabled")
}
return reconcile.Result{}, nil
}
// 2. Otherwise, if the node is expired, but doesn't have the annotation, add it.
expired := utilsnode.IsExpired(node, e.clock, provisioner)
if expired && !hasAnnotation {
node.Annotations = lo.Assign(node.Annotations, map[string]string{
v1alpha5.VoluntaryDisruptionAnnotationKey: v1alpha5.VoluntaryDisruptionExpiredAnnotationValue,
})
logging.FromContext(ctx).Debugf("annotating node as expired")
return reconcile.Result{}, nil
}
// 3. Finally, if the node isn't expired, but has the annotation, remove it.
if !expired && hasAnnotation {
delete(node.Annotations, v1alpha5.VoluntaryDisruptionAnnotationKey)
logging.FromContext(ctx).Debugf("removing expiration annotation from node")
}
// If the node isn't expired and doesn't have annotation, return.
// Use t.Sub(time.Now()) instead of time.Until() to ensure we're using the injected clock.
return reconcile.Result{RequeueAfter: utilsnode.GetExpirationTime(node, provisioner).Sub(e.clock.Now())}, nil
}
| 73 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node_test
import (
"context"
"testing"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clock "k8s.io/utils/clock/testing"
"knative.dev/pkg/ptr"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "knative.dev/pkg/logging/testing"
"github.com/aws/karpenter-core/pkg/apis"
"github.com/aws/karpenter-core/pkg/apis/settings"
"github.com/aws/karpenter-core/pkg/cloudprovider/fake"
"github.com/aws/karpenter-core/pkg/operator/controller"
"github.com/aws/karpenter-core/pkg/operator/scheme"
. "github.com/aws/karpenter-core/pkg/test/expectations"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/controllers/node"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/test"
)
var ctx context.Context
var nodeController controller.Controller
var env *test.Environment
var fakeClock *clock.FakeClock
var cluster *state.Cluster
var cp *fake.CloudProvider
func TestAPIs(t *testing.T) {
ctx = TestContextWithLogger(t)
RegisterFailHandler(Fail)
RunSpecs(t, "Node")
}
var _ = BeforeSuite(func() {
fakeClock = clock.NewFakeClock(time.Now())
env = test.NewEnvironment(scheme.Scheme, test.WithCRDs(apis.CRDs...))
ctx = settings.ToContext(ctx, test.Settings())
cp = fake.NewCloudProvider()
cluster = state.NewCluster(fakeClock, env.Client, cp)
nodeController = node.NewController(fakeClock, env.Client, cp, cluster)
})
var _ = AfterSuite(func() {
Expect(env.Stop()).To(Succeed(), "Failed to stop environment")
})
var _ = Describe("Controller", func() {
var provisioner *v1alpha5.Provisioner
BeforeEach(func() {
provisioner = &v1alpha5.Provisioner{
ObjectMeta: metav1.ObjectMeta{Name: test.RandomName()},
Spec: v1alpha5.ProvisionerSpec{},
}
ctx = settings.ToContext(ctx, test.Settings(settings.Settings{DriftEnabled: true}))
})
AfterEach(func() {
fakeClock.SetTime(time.Now())
cp.Reset()
ExpectCleanedUp(ctx, env.Client)
})
Context("Drift", func() {
It("should not detect drift if the feature flag is disabled", func() {
cp.Drifted = true
ctx = settings.ToContext(ctx, test.Settings(settings.Settings{DriftEnabled: false}))
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: test.RandomName(),
},
},
})
ExpectApplied(ctx, env.Client, provisioner, node)
ExpectMakeNodesInitialized(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
node = ExpectNodeExists(ctx, env.Client, node.Name)
Expect(node.Annotations).ToNot(HaveKeyWithValue(v1alpha5.VoluntaryDisruptionAnnotationKey, v1alpha5.VoluntaryDisruptionDriftedAnnotationValue))
})
It("should not detect drift if the provisioner does not exist", func() {
cp.Drifted = true
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: test.RandomName(),
},
},
})
ExpectApplied(ctx, env.Client, node)
ExpectMakeNodesInitialized(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
node = ExpectNodeExists(ctx, env.Client, node.Name)
Expect(node.Annotations).ToNot(HaveKeyWithValue(v1alpha5.VoluntaryDisruptionAnnotationKey, v1alpha5.VoluntaryDisruptionDriftedAnnotationValue))
})
It("should not detect drift if the node isn't initialized", func() {
cp.Drifted = true
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: test.RandomName(),
},
},
})
ExpectApplied(ctx, env.Client, provisioner, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
node = ExpectNodeExists(ctx, env.Client, node.Name)
Expect(node.Annotations).ToNot(HaveKeyWithValue(v1alpha5.VoluntaryDisruptionAnnotationKey, v1alpha5.VoluntaryDisruptionDriftedAnnotationValue))
})
It("should annotate the node when it has drifted in the cloud provider", func() {
cp.Drifted = true
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: test.RandomName(),
},
},
ProviderID: test.RandomProviderID(),
})
ExpectApplied(ctx, env.Client, provisioner, node)
ExpectMakeNodesInitialized(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
node = ExpectNodeExists(ctx, env.Client, node.Name)
Expect(node.Annotations).To(HaveKeyWithValue(v1alpha5.VoluntaryDisruptionAnnotationKey, v1alpha5.VoluntaryDisruptionDriftedAnnotationValue))
})
It("should remove the annotation from nodes if drift is disabled", func() {
cp.Drifted = true
ctx = settings.ToContext(ctx, test.Settings(settings.Settings{DriftEnabled: false}))
node := test.Node(test.NodeOptions{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{v1alpha5.ProvisionerNameLabelKey: provisioner.Name},
Annotations: map[string]string{
v1alpha5.VoluntaryDisruptionAnnotationKey: v1alpha5.VoluntaryDisruptionDriftedAnnotationValue,
},
}})
ExpectApplied(ctx, env.Client, provisioner, node)
ExpectMakeNodesInitialized(ctx, env.Client, node)
// step forward to make the node expired
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
node = ExpectNodeExists(ctx, env.Client, node.Name)
Expect(node.Annotations).ToNot(HaveKey(v1alpha5.VoluntaryDisruptionAnnotationKey))
})
})
Context("Emptiness", func() {
It("should not TTL nodes that are not initialized", func() {
provisioner.Spec.TTLSecondsAfterEmpty = ptr.Int64(30)
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1alpha5.ProvisionerNameLabelKey: provisioner.Name}},
ReadyStatus: v1.ConditionFalse,
})
ExpectApplied(ctx, env.Client, provisioner, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
node = ExpectNodeExists(ctx, env.Client, node.Name)
Expect(node.Annotations).ToNot(HaveKey(v1alpha5.EmptinessTimestampAnnotationKey))
})
It("should label nodes as underutilized and add TTL", func() {
provisioner.Spec.TTLSecondsAfterEmpty = ptr.Int64(30)
node := test.Node(test.NodeOptions{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1alpha5.LabelNodeInitialized: "true",
},
}})
ExpectApplied(ctx, env.Client, provisioner, node)
// mark it empty first to get past the debounce check
fakeClock.Step(30 * time.Second)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
// make the node more than 5 minutes old
fakeClock.Step(320 * time.Second)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
node = ExpectNodeExists(ctx, env.Client, node.Name)
Expect(node.Annotations).To(HaveKey(v1alpha5.EmptinessTimestampAnnotationKey))
})
It("should return a requeue polling interval when the node is underutilized and nominated", func() {
provisioner.Spec.TTLSecondsAfterEmpty = ptr.Int64(30)
node := test.Node(test.NodeOptions{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1alpha5.LabelNodeInitialized: "true",
v1.LabelInstanceTypeStable: "default-instance-type", // need the instance type for the cluster state update
},
}})
ExpectApplied(ctx, env.Client, provisioner, node)
// Add the node to the cluster state and nominate it in the internal cluster state
Expect(cluster.UpdateNode(ctx, node)).To(Succeed())
cluster.NominateNodeForPod(ctx, node.Name)
result := ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
Expect(result.RequeueAfter).To(Equal(time.Second * 30))
Expect(node.Labels).ToNot(HaveKey(v1alpha5.EmptinessTimestampAnnotationKey))
})
It("should remove labels from non-empty nodes", func() {
provisioner.Spec.TTLSecondsAfterEmpty = ptr.Int64(30)
node := test.Node(test.NodeOptions{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1alpha5.LabelNodeInitialized: "true",
},
Annotations: map[string]string{
v1alpha5.EmptinessTimestampAnnotationKey: fakeClock.Now().Add(100 * time.Second).Format(time.RFC3339),
}},
})
ExpectApplied(ctx, env.Client, provisioner, node, test.Pod(test.PodOptions{
NodeName: node.Name,
Conditions: []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}},
}))
// make the node more than 5 minutes old
fakeClock.Step(320 * time.Second)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
node = ExpectNodeExists(ctx, env.Client, node.Name)
Expect(node.Annotations).ToNot(HaveKey(v1alpha5.EmptinessTimestampAnnotationKey))
})
})
Context("Expiration", func() {
It("should remove the annotation from nodes when expiration is disabled", func() {
node := test.Node(test.NodeOptions{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{v1alpha5.ProvisionerNameLabelKey: provisioner.Name},
Annotations: map[string]string{
v1alpha5.VoluntaryDisruptionAnnotationKey: v1alpha5.VoluntaryDisruptionExpiredAnnotationValue,
},
}})
ExpectApplied(ctx, env.Client, provisioner, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
node = ExpectNodeExists(ctx, env.Client, node.Name)
Expect(node.Annotations).ToNot(HaveKey(v1alpha5.VoluntaryDisruptionAnnotationKey))
})
It("should annotate nodes as expired", func() {
provisioner.Spec.TTLSecondsUntilExpired = ptr.Int64(30)
node := test.Node(test.NodeOptions{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{v1alpha5.ProvisionerNameLabelKey: provisioner.Name},
}})
ExpectApplied(ctx, env.Client, provisioner, node)
// step forward to make the node expired
fakeClock.Step(60 * time.Second)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
node = ExpectNodeExists(ctx, env.Client, node.Name)
Expect(node.Annotations).To(HaveKeyWithValue(v1alpha5.VoluntaryDisruptionAnnotationKey, v1alpha5.VoluntaryDisruptionExpiredAnnotationValue))
})
It("should remove the annotation from non-expired nodes", func() {
provisioner.Spec.TTLSecondsUntilExpired = ptr.Int64(200)
node := test.Node(test.NodeOptions{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{v1alpha5.ProvisionerNameLabelKey: provisioner.Name},
Annotations: map[string]string{
v1alpha5.VoluntaryDisruptionAnnotationKey: v1alpha5.VoluntaryDisruptionExpiredAnnotationValue,
}},
})
ExpectApplied(ctx, env.Client, provisioner, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
node = ExpectNodeExists(ctx, env.Client, node.Name)
Expect(node.Annotations).ToNot(HaveKey(v1alpha5.VoluntaryDisruptionAnnotationKey))
})
})
Context("Filters", func() {
BeforeEach(func() {
innerCtx, cancel := context.WithCancel(ctx)
DeferCleanup(func() {
cancel()
})
mgr, err := controllerruntime.NewManager(env.Config, controllerruntime.Options{
Scheme: env.Scheme,
MetricsBindAddress: "0",
})
Expect(err).ToNot(HaveOccurred())
Expect(nodeController.Builder(innerCtx, mgr).Complete(nodeController)).To(Succeed())
go func() {
defer GinkgoRecover()
Expect(mgr.Start(innerCtx)).To(Succeed())
}()
})
It("should do nothing if the not owned by a provisioner", func() {
n := test.Node(test.NodeOptions{ObjectMeta: metav1.ObjectMeta{
Finalizers: []string{"fake.com/finalizer"},
}})
ExpectApplied(ctx, env.Client, provisioner, n)
// Node shouldn't reconcile anything onto it
Consistently(func(g Gomega) {
g.Expect(env.Client.Get(ctx, types.NamespacedName{Name: n.Name}, &v1.Node{})).To(Succeed())
g.Expect(n.Finalizers).To(Equal(n.Finalizers))
})
})
It("should do nothing if deletion timestamp is set", func() {
n := test.Node(test.NodeOptions{ObjectMeta: metav1.ObjectMeta{
Finalizers: []string{"fake.com/finalizer"},
}})
ExpectApplied(ctx, env.Client, provisioner, n)
Expect(env.Client.Delete(ctx, n)).To(Succeed())
// Update the node to be provisioned by the provisioner through labels
n.Labels = map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
}
ExpectApplied(ctx, env.Client, n)
// Node shouldn't reconcile anything onto it
Consistently(func(g Gomega) {
g.Expect(env.Client.Get(ctx, types.NamespacedName{Name: n.Name}, &v1.Node{})).To(Succeed())
g.Expect(n.Finalizers).To(Equal(n.Finalizers))
})
})
})
})
| 346 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provisioning
import (
"context"
"time"
"github.com/aws/karpenter-core/pkg/apis/settings"
)
// Batcher separates a stream of Trigger() calls into windowed slices. The
// window is dynamic and will be extended if additional items are added up to a
// maximum batch duration.
type Batcher struct {
trigger chan struct{}
}
// NewBatcher is a constructor for the Batcher
func NewBatcher() *Batcher {
return &Batcher{
trigger: make(chan struct{}, 1),
}
}
// Trigger causes the batcher to start a batching window, or extend the current batching window if it hasn't reached the
// maximum length.
func (b *Batcher) Trigger() {
// The trigger is idempotently armed. This statement never blocks
select {
case b.trigger <- struct{}{}:
default:
}
}
// Wait starts a batching window and continues waiting as long as it continues receiving triggers within
// the idleDuration, up to the maxDuration
func (b *Batcher) Wait(ctx context.Context) bool {
select {
case <-b.trigger:
// start the batching window after the first item is received
case <-time.After(1 * time.Second):
// If no pods, bail to the outer controller framework to refresh the context
return false
}
timeout := time.NewTimer(settings.FromContext(ctx).BatchMaxDuration.Duration)
idle := time.NewTimer(settings.FromContext(ctx).BatchIdleDuration.Duration)
for {
select {
case <-b.trigger:
// correct way to reset an active timer per docs
if !idle.Stop() {
<-idle.C
}
idle.Reset(settings.FromContext(ctx).BatchIdleDuration.Duration)
case <-timeout.C:
return true
case <-idle.C:
return true
}
}
}
| 75 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provisioning
import (
"context"
"time"
v1 "k8s.io/api/core/v1"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/karpenter-core/pkg/events"
corecontroller "github.com/aws/karpenter-core/pkg/operator/controller"
"github.com/aws/karpenter-core/pkg/utils/pod"
)
var _ corecontroller.TypedController[*v1.Pod] = (*Controller)(nil)
// Controller for the resource
type Controller struct {
kubeClient client.Client
provisioner *Provisioner
recorder events.Recorder
}
// NewController constructs a controller instance
func NewController(kubeClient client.Client, provisioner *Provisioner, recorder events.Recorder) corecontroller.Controller {
return corecontroller.Typed[*v1.Pod](kubeClient, &Controller{
kubeClient: kubeClient,
provisioner: provisioner,
recorder: recorder,
})
}
func (c *Controller) Name() string {
return "provisioner_trigger"
}
// Reconcile the resource
func (c *Controller) Reconcile(_ context.Context, p *v1.Pod) (reconcile.Result, error) {
if !pod.IsProvisionable(p) {
return reconcile.Result{}, nil
}
c.provisioner.Trigger()
// Continue to requeue until the pod is no longer provisionable. Pods may
// not be scheduled as expected if new pods are created while nodes are
// coming online. Even if a provisioning loop is successful, the pod may
// require another provisioning loop to become schedulable.
return reconcile.Result{RequeueAfter: 10 * time.Second}, nil
}
func (c *Controller) Builder(_ context.Context, m manager.Manager) corecontroller.Builder {
return corecontroller.Adapt(controllerruntime.
NewControllerManagedBy(m).
For(&v1.Pod{}).
WithOptions(controller.Options{MaxConcurrentReconciles: 10}),
)
}
| 75 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provisioning
import (
"github.com/prometheus/client_golang/prometheus"
crmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
"github.com/aws/karpenter-core/pkg/metrics"
)
func init() {
crmetrics.Registry.MustRegister(schedulingDuration)
}
var schedulingDuration = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: metrics.Namespace,
Subsystem: "provisioner",
Name: "scheduling_duration_seconds",
Help: "Duration of scheduling process in seconds. Broken down by provisioner and error.",
Buckets: metrics.DurationBuckets(),
},
)
| 37 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provisioning
import (
"context"
"fmt"
"strings"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/samber/lo"
"go.uber.org/multierr"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/util/workqueue"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
schedulingevents "github.com/aws/karpenter-core/pkg/controllers/provisioning/scheduling/events"
"github.com/aws/karpenter-core/pkg/operator/controller"
"github.com/aws/karpenter-core/pkg/scheduling"
"github.com/aws/karpenter-core/pkg/utils/functional"
"github.com/aws/karpenter-core/pkg/utils/pretty"
"github.com/aws/karpenter-core/pkg/cloudprovider"
scheduler "github.com/aws/karpenter-core/pkg/controllers/provisioning/scheduling"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/events"
"github.com/aws/karpenter-core/pkg/metrics"
"github.com/aws/karpenter-core/pkg/utils/pod"
)
// LaunchOptions are the set of options that can be used to trigger certain
// actions and configuration during scheduling
type LaunchOptions struct {
RecordPodNomination bool
Reason string
}
// RecordPodNomination causes nominate pod events to be recorded against the node.
func RecordPodNomination(o LaunchOptions) LaunchOptions {
o.RecordPodNomination = true
return o
}
func WithReason(reason string) func(LaunchOptions) LaunchOptions {
return func(o LaunchOptions) LaunchOptions {
o.Reason = reason
return o
}
}
// Provisioner waits for enqueued pods, batches them, creates capacity and binds the pods to the capacity.
type Provisioner struct {
cloudProvider cloudprovider.CloudProvider
kubeClient client.Client
coreV1Client corev1.CoreV1Interface
batcher *Batcher
volumeTopology *scheduler.VolumeTopology
cluster *state.Cluster
recorder events.Recorder
cm *pretty.ChangeMonitor
}
func NewProvisioner(kubeClient client.Client, coreV1Client corev1.CoreV1Interface,
recorder events.Recorder, cloudProvider cloudprovider.CloudProvider, cluster *state.Cluster) *Provisioner {
p := &Provisioner{
batcher: NewBatcher(),
cloudProvider: cloudProvider,
kubeClient: kubeClient,
coreV1Client: coreV1Client,
volumeTopology: scheduler.NewVolumeTopology(kubeClient),
cluster: cluster,
recorder: recorder,
cm: pretty.NewChangeMonitor(),
}
return p
}
func (p *Provisioner) Name() string {
return "provisioner"
}
func (p *Provisioner) Trigger() {
p.batcher.Trigger()
}
func (p *Provisioner) Builder(_ context.Context, mgr manager.Manager) controller.Builder {
return controller.NewSingletonManagedBy(mgr)
}
func (p *Provisioner) Reconcile(ctx context.Context, _ reconcile.Request) (result reconcile.Result, err error) {
// Batch pods
if triggered := p.batcher.Wait(ctx); !triggered {
return reconcile.Result{}, nil
}
// We need to ensure that our internal cluster state mechanism is synced before we proceed
// with making any scheduling decision off of our state nodes. Otherwise, we have the potential to make
// a scheduling decision based on a smaller subset of nodes in our cluster state than actually exist.
if !p.cluster.Synced(ctx) {
logging.FromContext(ctx).Debugf("waiting on cluster sync")
return reconcile.Result{RequeueAfter: time.Second}, nil
}
// Schedule pods to potential nodes, exit if nothing to do
results, err := p.Schedule(ctx)
if err != nil {
return reconcile.Result{}, err
}
if len(results.NewMachines) == 0 {
return reconcile.Result{}, nil
}
_, err = p.LaunchMachines(ctx, results.NewMachines, WithReason(metrics.ProvisioningReason), RecordPodNomination)
return reconcile.Result{}, err
}
// LaunchMachines launches nodes passed into the function in parallel. It returns a slice of the successfully created node
// names as well as a multierr of any errors that occurred while launching nodes
func (p *Provisioner) LaunchMachines(ctx context.Context, machines []*scheduler.Machine, opts ...functional.Option[LaunchOptions]) ([]string, error) {
// Launch capacity and bind pods
errs := make([]error, len(machines))
machineNames := make([]string, len(machines))
workqueue.ParallelizeUntil(ctx, len(machines), len(machines), func(i int) {
// create a new context to avoid a data race on the ctx variable
if machineName, err := p.Launch(ctx, machines[i], opts...); err != nil {
errs[i] = fmt.Errorf("launching machine, %w", err)
} else {
machineNames[i] = machineName
}
})
return machineNames, multierr.Combine(errs...)
}
func (p *Provisioner) GetPendingPods(ctx context.Context) ([]*v1.Pod, error) {
var podList v1.PodList
if err := p.kubeClient.List(ctx, &podList, client.MatchingFields{"spec.nodeName": ""}); err != nil {
return nil, fmt.Errorf("listing pods, %w", err)
}
var pods []*v1.Pod
for i := range podList.Items {
po := podList.Items[i]
// filter for provisionable pods first so we don't check for validity/PVCs on pods we won't provision anyway
// (e.g. those owned by daemonsets)
if !pod.IsProvisionable(&po) {
continue
}
if err := p.Validate(ctx, &po); err != nil {
logging.FromContext(ctx).With("pod", client.ObjectKeyFromObject(&po)).Debugf("ignoring pod, %s", err)
continue
}
p.consolidationWarnings(ctx, po)
pods = append(pods, &po)
}
return pods, nil
}
// consolidationWarnings potentially writes logs warning about possible unexpected interactions between scheduling
// constraints and consolidation
func (p *Provisioner) consolidationWarnings(ctx context.Context, po v1.Pod) {
// We have pending pods that have preferred anti-affinity or topology spread constraints. These can interact
// unexpectedly with consolidation so we warn once per hour when we see these pods.
if po.Spec.Affinity != nil && po.Spec.Affinity.PodAntiAffinity != nil {
if len(po.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution) != 0 {
if p.cm.HasChanged(string(po.UID), "pod-antiaffinity") {
logging.FromContext(ctx).Infof("pod %s has a preferred Anti-Affinity which can prevent consolidation", client.ObjectKeyFromObject(&po))
}
}
}
for _, tsc := range po.Spec.TopologySpreadConstraints {
if tsc.WhenUnsatisfiable == v1.ScheduleAnyway {
if p.cm.HasChanged(string(po.UID), "pod-topology-spread") {
logging.FromContext(ctx).Infof("pod %s has a preferred TopologySpreadConstraint which can prevent consolidation", client.ObjectKeyFromObject(&po))
}
}
}
}
//nolint:gocyclo
func (p *Provisioner) NewScheduler(ctx context.Context, pods []*v1.Pod, stateNodes []*state.StateNode, opts scheduler.SchedulerOptions) (*scheduler.Scheduler, error) {
// Build node templates
var machines []*scheduler.MachineTemplate
var provisionerList v1alpha5.ProvisionerList
instanceTypes := map[string][]*cloudprovider.InstanceType{}
domains := map[string]sets.String{}
if err := p.kubeClient.List(ctx, &provisionerList); err != nil {
return nil, fmt.Errorf("listing provisioners, %w", err)
}
// nodeTemplates generated from provisioners are ordered by weight
// since they are stored within a slice and scheduling
// will always attempt to schedule on the first nodeTemplate
provisionerList.OrderByWeight()
for i := range provisionerList.Items {
provisioner := &provisionerList.Items[i]
if !provisioner.DeletionTimestamp.IsZero() {
continue
}
// Create node template
machines = append(machines, scheduler.NewMachineTemplate(provisioner))
// Get instance type options
instanceTypeOptions, err := p.cloudProvider.GetInstanceTypes(ctx, provisioner)
if err != nil {
return nil, fmt.Errorf("getting instance types, %w", err)
}
instanceTypes[provisioner.Name] = append(instanceTypes[provisioner.Name], instanceTypeOptions...)
// Construct Topology Domains
for _, instanceType := range instanceTypeOptions {
// We need to intersect the instance type requirements with the current provisioner requirements. This
// ensures that something like zones from an instance type don't expand the universe of valid domains.
requirements := scheduling.NewNodeSelectorRequirements(provisioner.Spec.Requirements...)
requirements.Add(instanceType.Requirements.Values()...)
for key, requirement := range requirements {
//This code used to execute a Union between domains[key] and requirement.Values().
//The downside of this is that Union is immutable and takes a copy of the set it is executed upon.
//This resulted in a lot of memory pressure on the heap and poor performance
//https://github.com/aws/karpenter/issues/3565
if domains[key] == nil {
domains[key] = sets.NewString(requirement.Values()...)
} else {
domains[key].Insert(requirement.Values()...)
}
}
}
for key, requirement := range scheduling.NewNodeSelectorRequirements(provisioner.Spec.Requirements...) {
if requirement.Operator() == v1.NodeSelectorOpIn {
//The following is a performance optimisation, for the explanation see the comment above
if domains[key] == nil {
domains[key] = sets.NewString(requirement.Values()...)
} else {
domains[key].Insert(requirement.Values()...)
}
}
}
}
if len(machines) == 0 {
return nil, fmt.Errorf("no provisioners found")
}
// inject topology constraints
pods = p.injectTopology(ctx, pods)
// Calculate cluster topology
topology, err := scheduler.NewTopology(ctx, p.kubeClient, p.cluster, domains, pods)
if err != nil {
return nil, fmt.Errorf("tracking topology counts, %w", err)
}
daemonSetPods, err := p.getDaemonSetPods(ctx)
if err != nil {
return nil, fmt.Errorf("getting daemon pods, %w", err)
}
return scheduler.NewScheduler(ctx, p.kubeClient, machines, provisionerList.Items, p.cluster, stateNodes, topology, instanceTypes, daemonSetPods, p.recorder, opts), nil
}
func (p *Provisioner) Schedule(ctx context.Context) (*scheduler.Results, error) {
defer metrics.Measure(schedulingDuration)()
// We collect the nodes with their used capacities before we get the list of pending pods. This ensures that
// the node capacities we schedule against are always >= what the actual capacity is at any given instance. This
// prevents over-provisioning at the cost of potentially under-provisioning which will self-heal during the next
// scheduling loop when we Launch a new node. When this order is reversed, our node capacity may be reduced by pods
// that have bound which we then provision new un-needed capacity for.
// -------
// We don't consider the nodes that are MarkedForDeletion since this capacity shouldn't be considered
// as persistent capacity for the cluster (since it will soon be removed). Additionally, we are scheduling for
// the pods that are on these nodes so the MarkedForDeletion node capacity can't be considered.
nodes := p.cluster.Nodes()
// Get pods, exit if nothing to do
pendingPods, err := p.GetPendingPods(ctx)
if err != nil {
return nil, err
}
// Get pods from nodes that are preparing for deletion
// We do this after getting the pending pods so that we undershoot if pods are
// actively migrating from a node that is being deleted
// NOTE: The assumption is that these nodes are cordoned and no additional pods will schedule to them
deletingNodePods, err := nodes.Deleting().Pods(ctx, p.kubeClient)
if err != nil {
return nil, err
}
pods := append(pendingPods, deletingNodePods...)
// nothing to schedule, so just return success
if len(pods) == 0 {
return &scheduler.Results{}, nil
}
scheduler, err := p.NewScheduler(ctx, pods, nodes.Active(), scheduler.SchedulerOptions{})
if err != nil {
return nil, fmt.Errorf("creating scheduler, %w", err)
}
return scheduler.Solve(ctx, pods)
}
func (p *Provisioner) Launch(ctx context.Context, m *scheduler.Machine, opts ...functional.Option[LaunchOptions]) (string, error) {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("provisioner", m.Labels[v1alpha5.ProvisionerNameLabelKey]))
// Check limits
latest := &v1alpha5.Provisioner{}
if err := p.kubeClient.Get(ctx, types.NamespacedName{Name: m.ProvisionerName}, latest); err != nil {
return "", fmt.Errorf("getting current resource usage, %w", err)
}
if err := latest.Spec.Limits.ExceededBy(latest.Status.Resources); err != nil {
return "", err
}
options := functional.ResolveOptions(opts...)
machine := m.ToMachine(latest)
if err := p.kubeClient.Create(ctx, machine); err != nil {
return "", err
}
instanceTypeRequirement, _ := lo.Find(machine.Spec.Requirements, func(req v1.NodeSelectorRequirement) bool { return req.Key == v1.LabelInstanceTypeStable })
logging.FromContext(ctx).With("requests", machine.Spec.Resources.Requests, "instance-types", instanceTypeList(instanceTypeRequirement.Values)).Infof("created machine")
p.cluster.NominateNodeForPod(ctx, machine.Name)
metrics.MachinesCreatedCounter.With(prometheus.Labels{
metrics.ReasonLabel: options.Reason,
metrics.ProvisionerLabel: machine.Labels[v1alpha5.ProvisionerNameLabelKey],
}).Inc()
if functional.ResolveOptions(opts...).RecordPodNomination {
for _, pod := range m.Pods {
p.recorder.Publish(schedulingevents.NominatePod(pod, nil, machine))
}
}
return machine.Name, nil
}
func instanceTypeList(names []string) string {
var itSb strings.Builder
for i, name := range names {
// print the first 5 instance types only (indices 0-4)
if i > 4 {
lo.Must(fmt.Fprintf(&itSb, " and %d other(s)", len(names)-i))
break
} else if i > 0 {
lo.Must(fmt.Fprint(&itSb, ", "))
}
lo.Must(fmt.Fprint(&itSb, name))
}
return itSb.String()
}
func (p *Provisioner) getDaemonSetPods(ctx context.Context) ([]*v1.Pod, error) {
daemonSetList := &appsv1.DaemonSetList{}
if err := p.kubeClient.List(ctx, daemonSetList); err != nil {
return nil, fmt.Errorf("listing daemonsets, %w", err)
}
return lo.Map(daemonSetList.Items, func(d appsv1.DaemonSet, _ int) *v1.Pod {
pod := p.cluster.GetDaemonSetPod(&d)
if pod == nil {
pod = &v1.Pod{Spec: d.Spec.Template.Spec}
}
return pod
}), nil
}
func (p *Provisioner) Validate(ctx context.Context, pod *v1.Pod) error {
return multierr.Combine(
validateProvisionerNameCanExist(pod),
validateAffinity(pod),
p.volumeTopology.ValidatePersistentVolumeClaims(ctx, pod),
)
}
// validateProvisionerNameCanExist provides a more clear error message in the event of scheduling a pod that specifically doesn't
// want to run on a Karpenter node (e.g. a Karpenter controller replica).
func validateProvisionerNameCanExist(p *v1.Pod) error {
for _, req := range scheduling.NewPodRequirements(p) {
if req.Key == v1alpha5.ProvisionerNameLabelKey && req.Operator() == v1.NodeSelectorOpDoesNotExist {
return fmt.Errorf("configured to not run on a Karpenter provisioned node via %s %s requirement",
v1alpha5.ProvisionerNameLabelKey, v1.NodeSelectorOpDoesNotExist)
}
}
return nil
}
func (p *Provisioner) injectTopology(ctx context.Context, pods []*v1.Pod) []*v1.Pod {
var schedulablePods []*v1.Pod
for _, pod := range pods {
if err := p.volumeTopology.Inject(ctx, pod); err != nil {
logging.FromContext(ctx).With("pod", client.ObjectKeyFromObject(pod)).Errorf("getting volume topology requirements, %s", err)
} else {
schedulablePods = append(schedulablePods, pod)
}
}
return schedulablePods
}
func validateAffinity(p *v1.Pod) (errs error) {
if p.Spec.Affinity == nil {
return nil
}
if p.Spec.Affinity.NodeAffinity != nil {
for _, term := range p.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
errs = multierr.Append(errs, validateNodeSelectorTerm(term.Preference))
}
if p.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
for _, term := range p.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms {
errs = multierr.Append(errs, validateNodeSelectorTerm(term))
}
}
}
return errs
}
func validateNodeSelectorTerm(term v1.NodeSelectorTerm) (errs error) {
if term.MatchFields != nil {
errs = multierr.Append(errs, fmt.Errorf("node selector term with matchFields is not supported"))
}
if term.MatchExpressions != nil {
for _, requirement := range term.MatchExpressions {
errs = multierr.Append(errs, v1alpha5.ValidateRequirement(requirement))
}
}
return errs
}
| 440 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package provisioning_test
import (
"context"
"encoding/json"
"testing"
"time"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
clock "k8s.io/utils/clock/testing"
"knative.dev/pkg/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/apis"
"github.com/aws/karpenter-core/pkg/apis/settings"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/cloudprovider/fake"
"github.com/aws/karpenter-core/pkg/controllers/provisioning"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/controllers/state/informer"
"github.com/aws/karpenter-core/pkg/events"
"github.com/aws/karpenter-core/pkg/operator/controller"
"github.com/aws/karpenter-core/pkg/operator/scheme"
"github.com/aws/karpenter-core/pkg/test"
"github.com/aws/karpenter-core/pkg/utils/sets"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "knative.dev/pkg/logging/testing"
. "github.com/aws/karpenter-core/pkg/test/expectations"
)
var ctx context.Context
var fakeClock *clock.FakeClock
var cluster *state.Cluster
var nodeController controller.Controller
var cloudProvider *fake.CloudProvider
var prov *provisioning.Provisioner
var daemonsetController controller.Controller
var env *test.Environment
var instanceTypeMap map[string]*cloudprovider.InstanceType
func TestAPIs(t *testing.T) {
ctx = TestContextWithLogger(t)
RegisterFailHandler(Fail)
RunSpecs(t, "Controllers/Provisioning")
}
var _ = BeforeSuite(func() {
env = test.NewEnvironment(scheme.Scheme, test.WithCRDs(apis.CRDs...))
ctx = settings.ToContext(ctx, test.Settings())
cloudProvider = fake.NewCloudProvider()
fakeClock = clock.NewFakeClock(time.Now())
cluster = state.NewCluster(fakeClock, env.Client, cloudProvider)
nodeController = informer.NewNodeController(env.Client, cluster)
prov = provisioning.NewProvisioner(env.Client, corev1.NewForConfigOrDie(env.Config), events.NewRecorder(&record.FakeRecorder{}), cloudProvider, cluster)
daemonsetController = informer.NewDaemonSetController(env.Client, cluster)
instanceTypes, _ := cloudProvider.GetInstanceTypes(context.Background(), nil)
instanceTypeMap = map[string]*cloudprovider.InstanceType{}
for _, it := range instanceTypes {
instanceTypeMap[it.Name] = it
}
})
var _ = BeforeEach(func() {
ctx = settings.ToContext(ctx, test.Settings())
cloudProvider.Reset()
})
var _ = AfterSuite(func() {
Expect(env.Stop()).To(Succeed(), "Failed to stop environment")
})
var _ = AfterEach(func() {
ExpectCleanedUp(ctx, env.Client)
cluster.Reset()
})
var _ = Describe("Provisioning", func() {
It("should provision nodes", func() {
ExpectApplied(ctx, env.Client, test.Provisioner())
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
nodes := &v1.NodeList{}
Expect(env.Client.List(ctx, nodes)).To(Succeed())
Expect(len(nodes.Items)).To(Equal(1))
ExpectScheduled(ctx, env.Client, pod)
})
It("should ignore provisioners that are deleting", func() {
provisioner := test.Provisioner()
ExpectApplied(ctx, env.Client, provisioner)
ExpectDeletionTimestampSet(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
nodes := &v1.NodeList{}
Expect(env.Client.List(ctx, nodes)).To(Succeed())
Expect(len(nodes.Items)).To(Equal(0))
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should provision nodes for pods with supported node selectors", func() {
provisioner := test.Provisioner()
schedulable := []*v1.Pod{
// Constrained by provisioner
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1alpha5.ProvisionerNameLabelKey: provisioner.Name}}),
// Constrained by zone
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-1"}}),
// Constrained by instanceType
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1.LabelInstanceTypeStable: "default-instance-type"}}),
// Constrained by architecture
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1.LabelArchStable: "arm64"}}),
// Constrained by operatingSystem
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1.LabelOSStable: string(v1.Linux)}}),
}
unschedulable := []*v1.Pod{
// Ignored, matches another provisioner
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1alpha5.ProvisionerNameLabelKey: "unknown"}}),
// Ignored, invalid zone
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1.LabelTopologyZone: "unknown"}}),
// Ignored, invalid instance type
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1.LabelInstanceTypeStable: "unknown"}}),
// Ignored, invalid architecture
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1.LabelArchStable: "unknown"}}),
// Ignored, invalid operating system
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1.LabelOSStable: "unknown"}}),
// Ignored, invalid capacity type
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1alpha5.LabelCapacityType: "unknown"}}),
// Ignored, label selector does not match
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{"foo": "bar"}}),
}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, schedulable...)
for _, pod := range schedulable {
ExpectScheduled(ctx, env.Client, pod)
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, unschedulable...)
for _, pod := range unschedulable {
ExpectNotScheduled(ctx, env.Client, pod)
}
})
It("should provision nodes for accelerators", func() {
ExpectApplied(ctx, env.Client, test.Provisioner())
pods := []*v1.Pod{
test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Limits: v1.ResourceList{fake.ResourceGPUVendorA: resource.MustParse("1")}},
}),
test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Limits: v1.ResourceList{fake.ResourceGPUVendorB: resource.MustParse("1")}},
}),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
for _, pod := range pods {
ExpectScheduled(ctx, env.Client, pod)
}
})
It("should provision multiple nodes when maxPods is set", func() {
// Kubelet configuration is actually not observed here, the scheduler is relying on the
// pods resource value which is statically set in the fake cloudprovider
ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{
Kubelet: &v1alpha5.KubeletConfiguration{MaxPods: ptr.Int32(1)},
Requirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelInstanceTypeStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{"single-pod-instance-type"},
},
},
}))
pods := []*v1.Pod{
test.UnschedulablePod(), test.UnschedulablePod(), test.UnschedulablePod(),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
nodes := &v1.NodeList{}
Expect(env.Client.List(ctx, nodes)).To(Succeed())
Expect(len(nodes.Items)).To(Equal(3))
for _, pod := range pods {
ExpectScheduled(ctx, env.Client, pod)
}
})
It("should schedule all pods on one node when node is in deleting state", func() {
provisioner := test.Provisioner()
its, err := cloudProvider.GetInstanceTypes(ctx, provisioner)
Expect(err).To(BeNil())
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: its[0].Name,
},
Finalizers: []string{v1alpha5.TerminationFinalizer},
}},
)
ExpectApplied(ctx, env.Client, node, provisioner)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
// Schedule 3 pods to the node that currently exists
for i := 0; i < 3; i++ {
pod := test.UnschedulablePod()
ExpectApplied(ctx, env.Client, pod)
ExpectManualBinding(ctx, env.Client, pod, node)
}
// Node shouldn't fully delete since it has a finalizer
Expect(env.Client.Delete(ctx, node)).To(Succeed())
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
// Provision without a binding since some pods will already be bound
// Should all schedule to the new node, ignoring the old node
bindings := ExpectProvisionedNoBinding(ctx, env.Client, cluster, cloudProvider, prov, test.UnschedulablePod(), test.UnschedulablePod())
nodes := &v1.NodeList{}
Expect(env.Client.List(ctx, nodes)).To(Succeed())
Expect(len(nodes.Items)).To(Equal(2))
// Scheduler should attempt to schedule all the pods to the new node
for _, n := range bindings {
Expect(n.Node.Name).ToNot(Equal(node.Name))
}
})
Context("Resource Limits", func() {
It("should not schedule when limits are exceeded", func() {
ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("20")},
Status: v1alpha5.ProvisionerStatus{
Resources: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100"),
},
},
}))
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should schedule if limits would be met", func() {
ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")},
}))
pod := test.UnschedulablePod(
test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
// requires a 2 CPU node, but leaves room for overhead
v1.ResourceCPU: resource.MustParse("1.75"),
},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
// A 2 CPU node can be launched
ExpectScheduled(ctx, env.Client, pod)
})
It("should partially schedule if limits would be exceeded", func() {
ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("3")},
}))
// prevent these pods from scheduling on the same node
opts := test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"app": "foo"},
},
PodAntiRequirements: []v1.PodAffinityTerm{
{
TopologyKey: v1.LabelHostname,
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "foo",
},
},
},
},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1.5"),
}}}
pods := []*v1.Pod{
test.UnschedulablePod(opts),
test.UnschedulablePod(opts),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
scheduledPodCount := 0
unscheduledPodCount := 0
pod0 := ExpectPodExists(ctx, env.Client, pods[0].Name, pods[0].Namespace)
pod1 := ExpectPodExists(ctx, env.Client, pods[1].Name, pods[1].Namespace)
if pod0.Spec.NodeName == "" {
unscheduledPodCount++
} else {
scheduledPodCount++
}
if pod1.Spec.NodeName == "" {
unscheduledPodCount++
} else {
scheduledPodCount++
}
Expect(scheduledPodCount).To(Equal(1))
Expect(unscheduledPodCount).To(Equal(1))
})
It("should not schedule if limits would be exceeded", func() {
ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")},
}))
pod := test.UnschedulablePod(
test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2.1"),
},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should not schedule if limits would be exceeded (GPU)", func() {
ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{
Limits: v1.ResourceList{v1.ResourcePods: resource.MustParse("1")},
}))
pod := test.UnschedulablePod(
test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Limits: v1.ResourceList{
fake.ResourceGPUVendorA: resource.MustParse("1"),
},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
// only available instance type has 2 GPUs which would exceed the limit
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should not schedule to a provisioner after a scheduling round if limits would be exceeded", func() {
ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")},
}))
pod := test.UnschedulablePod(
test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
// requires a 2 CPU node, but leaves room for overhead
v1.ResourceCPU: resource.MustParse("1.75"),
},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
// A 2 CPU node can be launched
ExpectScheduled(ctx, env.Client, pod)
// This pod requests over the existing limit (would add to 3.5 CPUs) so this should fail
pod = test.UnschedulablePod(
test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
// requires a 2 CPU node, but leaves room for overhead
v1.ResourceCPU: resource.MustParse("1.75"),
},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
})
Context("Daemonsets and Node Overhead", func() {
It("should account for overhead", func() {
ExpectApplied(ctx, env.Client, test.Provisioner(), test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}},
}},
))
pod := test.UnschedulablePod(
test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}},
},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
allocatable := instanceTypeMap[node.Labels[v1.LabelInstanceTypeStable]].Capacity
Expect(*allocatable.Cpu()).To(Equal(resource.MustParse("4")))
Expect(*allocatable.Memory()).To(Equal(resource.MustParse("4Gi")))
})
It("should account for overhead (with startup taint)", func() {
provisioner := test.Provisioner(test.ProvisionerOptions{
StartupTaints: []v1.Taint{{Key: "foo.com/taint", Effect: v1.TaintEffectNoSchedule}},
})
ExpectApplied(ctx, env.Client, provisioner, test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}},
}},
))
pod := test.UnschedulablePod(
test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}},
},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
allocatable := instanceTypeMap[node.Labels[v1.LabelInstanceTypeStable]].Capacity
Expect(*allocatable.Cpu()).To(Equal(resource.MustParse("4")))
Expect(*allocatable.Memory()).To(Equal(resource.MustParse("4Gi")))
})
It("should not schedule if overhead is too large", func() {
ExpectApplied(ctx, env.Client, test.Provisioner(), test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("10000"), v1.ResourceMemory: resource.MustParse("10000Gi")}},
}},
))
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should account for overhead using daemonset pod spec instead of daemonset spec", func() {
provisioner := test.Provisioner()
// Create a daemonset with large resource requests
daemonset := test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("4"), v1.ResourceMemory: resource.MustParse("4Gi")}},
}},
)
ExpectApplied(ctx, env.Client, provisioner, daemonset)
// Create the actual daemonSet pod with lower resource requests and expect to use the pod
daemonsetPod := test.UnschedulablePod(
test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "DaemonSet",
Name: daemonset.Name,
UID: daemonset.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
},
},
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}},
})
ExpectApplied(ctx, env.Client, provisioner, daemonsetPod)
ExpectReconcileSucceeded(ctx, daemonsetController, client.ObjectKeyFromObject(daemonset))
pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}},
NodeSelector: map[string]string{v1alpha5.ProvisionerNameLabelKey: provisioner.Name}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
// We expect a smaller instance since the daemonset pod is smaller then daemonset spec
allocatable := instanceTypeMap[node.Labels[v1.LabelInstanceTypeStable]].Capacity
Expect(*allocatable.Cpu()).To(Equal(resource.MustParse("4")))
Expect(*allocatable.Memory()).To(Equal(resource.MustParse("4Gi")))
})
It("should not schedule if resource requests are not defined and limits (requests) are too large", func() {
ExpectApplied(ctx, env.Client, test.Provisioner(), test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("10000"), v1.ResourceMemory: resource.MustParse("10000Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")},
},
}},
))
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should schedule based on the max resource requests of containers and initContainers", func() {
ExpectApplied(ctx, env.Client, test.Provisioner(), test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2"), v1.ResourceMemory: resource.MustParse("1Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")},
},
InitImage: "pause",
InitResourceRequirements: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("10000"), v1.ResourceMemory: resource.MustParse("2Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")},
},
}},
))
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
allocatable := instanceTypeMap[node.Labels[v1.LabelInstanceTypeStable]].Capacity
Expect(*allocatable.Cpu()).To(Equal(resource.MustParse("4")))
Expect(*allocatable.Memory()).To(Equal(resource.MustParse("4Gi")))
})
It("should not schedule if combined max resources are too large for any node", func() {
ExpectApplied(ctx, env.Client, test.Provisioner(), test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("10000"), v1.ResourceMemory: resource.MustParse("1Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")},
},
InitImage: "pause",
InitResourceRequirements: v1.ResourceRequirements{
Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("10000"), v1.ResourceMemory: resource.MustParse("10000Gi")},
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")},
},
}},
))
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should not schedule if initContainer resources are too large", func() {
ExpectApplied(ctx, env.Client, test.Provisioner(), test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
InitImage: "pause",
InitResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("10000"), v1.ResourceMemory: resource.MustParse("10000Gi")},
},
}},
))
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should be able to schedule pods if resource requests and limits are not defined", func() {
ExpectApplied(ctx, env.Client, test.Provisioner(), test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{}},
))
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectScheduled(ctx, env.Client, pod)
})
It("should ignore daemonsets without matching tolerations", func() {
ExpectApplied(ctx, env.Client,
test.Provisioner(test.ProvisionerOptions{Taints: []v1.Taint{{Key: "foo", Value: "bar", Effect: v1.TaintEffectNoSchedule}}}),
test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}},
}},
))
pod := test.UnschedulablePod(
test.PodOptions{
Tolerations: []v1.Toleration{{Operator: v1.TolerationOperator(v1.NodeSelectorOpExists)}},
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}},
},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
allocatable := instanceTypeMap[node.Labels[v1.LabelInstanceTypeStable]].Capacity
Expect(*allocatable.Cpu()).To(Equal(resource.MustParse("2")))
Expect(*allocatable.Memory()).To(Equal(resource.MustParse("2Gi")))
})
It("should ignore daemonsets with an invalid selector", func() {
ExpectApplied(ctx, env.Client, test.Provisioner(), test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
NodeSelector: map[string]string{"node": "invalid"},
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}},
}},
))
pod := test.UnschedulablePod(
test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}},
},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
allocatable := instanceTypeMap[node.Labels[v1.LabelInstanceTypeStable]].Capacity
Expect(*allocatable.Cpu()).To(Equal(resource.MustParse("2")))
Expect(*allocatable.Memory()).To(Equal(resource.MustParse("2Gi")))
})
It("should account daemonsets with NotIn operator and unspecified key", func() {
ExpectApplied(ctx, env.Client, test.Provisioner(), test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
NodeRequirements: []v1.NodeSelectorRequirement{{Key: "foo", Operator: v1.NodeSelectorOpNotIn, Values: []string{"bar"}}},
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}},
}},
))
pod := test.UnschedulablePod(
test.PodOptions{
NodeRequirements: []v1.NodeSelectorRequirement{{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2"}}},
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}},
},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
allocatable := instanceTypeMap[node.Labels[v1.LabelInstanceTypeStable]].Capacity
Expect(*allocatable.Cpu()).To(Equal(resource.MustParse("4")))
Expect(*allocatable.Memory()).To(Equal(resource.MustParse("4Gi")))
})
})
Context("Annotations", func() {
It("should annotate nodes", func() {
provisioner := test.Provisioner(test.ProvisionerOptions{
Annotations: map[string]string{v1alpha5.DoNotConsolidateNodeAnnotationKey: "true"},
})
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Annotations).To(HaveKeyWithValue(v1alpha5.DoNotConsolidateNodeAnnotationKey, "true"))
})
})
Context("Labels", func() {
It("should label nodes", func() {
provisioner := test.Provisioner(test.ProvisionerOptions{
Labels: map[string]string{"test-key-1": "test-value-1"},
Requirements: []v1.NodeSelectorRequirement{
{Key: "test-key-2", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value-2"}},
{Key: "test-key-3", Operator: v1.NodeSelectorOpNotIn, Values: []string{"test-value-3"}},
{Key: "test-key-4", Operator: v1.NodeSelectorOpLt, Values: []string{"4"}},
{Key: "test-key-5", Operator: v1.NodeSelectorOpGt, Values: []string{"5"}},
{Key: "test-key-6", Operator: v1.NodeSelectorOpExists},
{Key: "test-key-7", Operator: v1.NodeSelectorOpDoesNotExist},
},
})
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1alpha5.ProvisionerNameLabelKey, provisioner.Name))
Expect(node.Labels).To(HaveKeyWithValue("test-key-1", "test-value-1"))
Expect(node.Labels).To(HaveKeyWithValue("test-key-2", "test-value-2"))
Expect(node.Labels).To(And(HaveKey("test-key-3"), Not(HaveValue(Equal("test-value-3")))))
Expect(node.Labels).To(And(HaveKey("test-key-4"), Not(HaveValue(Equal("test-value-4")))))
Expect(node.Labels).To(And(HaveKey("test-key-5"), Not(HaveValue(Equal("test-value-5")))))
Expect(node.Labels).To(HaveKey("test-key-6"))
Expect(node.Labels).ToNot(HaveKey("test-key-7"))
})
It("should label nodes with labels in the LabelDomainExceptions list", func() {
for domain := range v1alpha5.LabelDomainExceptions {
provisioner := test.Provisioner(test.ProvisionerOptions{Labels: map[string]string{domain + "/test": "test-value"}})
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{
NodeRequirements: []v1.NodeSelectorRequirement{{Key: domain + "/test", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}},
},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(domain+"/test", "test-value"))
}
})
})
Context("Taints", func() {
It("should schedule pods that tolerate taints", func() {
provisioner := test.Provisioner(test.ProvisionerOptions{Taints: []v1.Taint{{Key: "nvidia.com/gpu", Value: "true", Effect: v1.TaintEffectNoSchedule}}})
ExpectApplied(ctx, env.Client, provisioner)
pods := []*v1.Pod{
test.UnschedulablePod(
test.PodOptions{Tolerations: []v1.Toleration{
{
Key: "nvidia.com/gpu",
Operator: v1.TolerationOpEqual,
Value: "true",
Effect: v1.TaintEffectNoSchedule,
},
}}),
test.UnschedulablePod(
test.PodOptions{Tolerations: []v1.Toleration{
{
Key: "nvidia.com/gpu",
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
},
}}),
test.UnschedulablePod(
test.PodOptions{Tolerations: []v1.Toleration{
{
Key: "nvidia.com/gpu",
Operator: v1.TolerationOpExists,
},
}}),
test.UnschedulablePod(
test.PodOptions{Tolerations: []v1.Toleration{
{
Operator: v1.TolerationOpExists,
},
}}),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
for _, pod := range pods {
ExpectScheduled(ctx, env.Client, pod)
}
})
})
Context("Machine Creation", func() {
It("should create a machine request with expected requirements", func() {
provisioner := test.Provisioner()
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
Expect(cloudProvider.CreateCalls).To(HaveLen(1))
ExpectMachineRequirements(cloudProvider.CreateCalls[0],
v1.NodeSelectorRequirement{
Key: v1.LabelInstanceTypeStable,
Operator: v1.NodeSelectorOpIn,
Values: lo.Keys(instanceTypeMap),
},
v1.NodeSelectorRequirement{
Key: v1alpha5.ProvisionerNameLabelKey,
Operator: v1.NodeSelectorOpIn,
Values: []string{provisioner.Name},
},
)
ExpectScheduled(ctx, env.Client, pod)
})
It("should create a machine request with additional expected requirements", func() {
provisioner := test.Provisioner(test.ProvisionerOptions{
Requirements: []v1.NodeSelectorRequirement{
{
Key: "custom-requirement-key",
Operator: v1.NodeSelectorOpIn,
Values: []string{"value"},
},
{
Key: "custom-requirement-key2",
Operator: v1.NodeSelectorOpIn,
Values: []string{"value"},
},
},
})
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
Expect(cloudProvider.CreateCalls).To(HaveLen(1))
ExpectMachineRequirements(cloudProvider.CreateCalls[0],
v1.NodeSelectorRequirement{
Key: v1.LabelInstanceTypeStable,
Operator: v1.NodeSelectorOpIn,
Values: lo.Keys(instanceTypeMap),
},
v1.NodeSelectorRequirement{
Key: v1alpha5.ProvisionerNameLabelKey,
Operator: v1.NodeSelectorOpIn,
Values: []string{provisioner.Name},
},
v1.NodeSelectorRequirement{
Key: "custom-requirement-key",
Operator: v1.NodeSelectorOpIn,
Values: []string{"value"},
},
v1.NodeSelectorRequirement{
Key: "custom-requirement-key2",
Operator: v1.NodeSelectorOpIn,
Values: []string{"value"},
},
)
ExpectScheduled(ctx, env.Client, pod)
})
It("should create a machine request restricting instance types on architecture", func() {
provisioner := test.Provisioner(test.ProvisionerOptions{
Requirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelArchStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{"arm64"},
},
},
})
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
Expect(cloudProvider.CreateCalls).To(HaveLen(1))
// Expect a more restricted set of instance types
ExpectMachineRequirements(cloudProvider.CreateCalls[0],
v1.NodeSelectorRequirement{
Key: v1.LabelArchStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{"arm64"},
},
v1.NodeSelectorRequirement{
Key: v1.LabelInstanceTypeStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{"arm-instance-type"},
},
)
ExpectScheduled(ctx, env.Client, pod)
})
It("should create a machine request restricting instance types on operating system", func() {
provisioner := test.Provisioner(test.ProvisionerOptions{
Requirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelOSStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{"ios"},
},
},
})
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
Expect(cloudProvider.CreateCalls).To(HaveLen(1))
// Expect a more restricted set of instance types
ExpectMachineRequirements(cloudProvider.CreateCalls[0],
v1.NodeSelectorRequirement{
Key: v1.LabelOSStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{"ios"},
},
v1.NodeSelectorRequirement{
Key: v1.LabelInstanceTypeStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{"arm-instance-type"},
},
)
ExpectScheduled(ctx, env.Client, pod)
})
It("should create a machine request restricting instance types based on pod resource requests", func() {
provisioner := test.Provisioner()
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
fake.ResourceGPUVendorA: resource.MustParse("1"),
},
Limits: v1.ResourceList{
fake.ResourceGPUVendorA: resource.MustParse("1"),
},
},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
Expect(cloudProvider.CreateCalls).To(HaveLen(1))
// Expect a more restricted set of instance types
ExpectMachineRequirements(cloudProvider.CreateCalls[0],
v1.NodeSelectorRequirement{
Key: v1.LabelInstanceTypeStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{"gpu-vendor-instance-type"},
},
)
ExpectScheduled(ctx, env.Client, pod)
})
It("should create a machine request with the correct owner reference", func() {
provisioner := test.Provisioner(test.ProvisionerOptions{})
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
Expect(cloudProvider.CreateCalls).To(HaveLen(1))
Expect(cloudProvider.CreateCalls[0].OwnerReferences).To(ContainElement(
metav1.OwnerReference{
APIVersion: "karpenter.sh/v1alpha5",
Kind: "Provisioner",
Name: provisioner.Name,
UID: provisioner.UID,
BlockOwnerDeletion: lo.ToPtr(true),
},
))
ExpectScheduled(ctx, env.Client, pod)
})
It("should create a machine request propagating the provider reference", func() {
ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{
ProviderRef: &v1alpha5.MachineTemplateRef{
APIVersion: "cloudprovider.karpenter.sh/v1alpha1",
Kind: "CloudProvider",
Name: "default",
},
}))
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
Expect(cloudProvider.CreateCalls).To(HaveLen(1))
Expect(cloudProvider.CreateCalls[0].Spec.MachineTemplateRef).To(Equal(
&v1alpha5.MachineTemplateRef{
APIVersion: "cloudprovider.karpenter.sh/v1alpha1",
Kind: "CloudProvider",
Name: "default",
},
))
ExpectScheduled(ctx, env.Client, pod)
})
It("should create a machine request with the karpenter.sh/compatibility/provider annotation", func() {
ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{
Provider: map[string]string{
"providerField1": "value",
"providerField2": "value",
},
}))
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
Expect(cloudProvider.CreateCalls).To(HaveLen(1))
Expect(cloudProvider.CreateCalls[0].Annotations).To(HaveKey(v1alpha5.ProviderCompatabilityAnnotationKey))
// Deserialize the provider into the expected format
provider := map[string]string{}
Expect(json.Unmarshal([]byte(cloudProvider.CreateCalls[0].Annotations[v1alpha5.ProviderCompatabilityAnnotationKey]), &provider)).To(Succeed())
Expect(provider).To(HaveKeyWithValue("providerField1", "value"))
Expect(provider).To(HaveKeyWithValue("providerField2", "value"))
ExpectScheduled(ctx, env.Client, pod)
})
It("should create a machine with resource requests", func() {
ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{
Provider: map[string]string{
"providerField1": "value",
"providerField2": "value",
},
}))
pod := test.UnschedulablePod(
test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("1Mi"),
fake.ResourceGPUVendorA: resource.MustParse("1"),
},
Limits: v1.ResourceList{
fake.ResourceGPUVendorA: resource.MustParse("1"),
},
},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
Expect(cloudProvider.CreateCalls).To(HaveLen(1))
Expect(cloudProvider.CreateCalls[0].Spec.Resources.Requests).To(HaveLen(4))
ExpectMachineRequests(cloudProvider.CreateCalls[0], v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("1Mi"),
fake.ResourceGPUVendorA: resource.MustParse("1"),
v1.ResourcePods: resource.MustParse("1"),
})
ExpectScheduled(ctx, env.Client, pod)
})
It("should create a machine with resource requests with daemon overhead", func() {
ExpectApplied(ctx, env.Client, test.Provisioner(), test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Mi")}},
}},
))
pod := test.UnschedulablePod(
test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Mi")}},
},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
Expect(cloudProvider.CreateCalls).To(HaveLen(1))
ExpectMachineRequests(cloudProvider.CreateCalls[0], v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("2Mi"),
v1.ResourcePods: resource.MustParse("2"),
})
ExpectScheduled(ctx, env.Client, pod)
})
})
})
var _ = Describe("Volume Topology Requirements", func() {
var storageClass *storagev1.StorageClass
BeforeEach(func() {
storageClass = test.StorageClass(test.StorageClassOptions{Zones: []string{"test-zone-2", "test-zone-3"}})
})
It("should not schedule if invalid pvc", func() {
ExpectApplied(ctx, env.Client, test.Provisioner())
pod := test.UnschedulablePod(test.PodOptions{
PersistentVolumeClaims: []string{"invalid"},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should schedule with an empty storage class", func() {
storageClass := ""
persistentVolumeClaim := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{StorageClassName: &storageClass})
ExpectApplied(ctx, env.Client, test.Provisioner(), persistentVolumeClaim)
pod := test.UnschedulablePod(test.PodOptions{
PersistentVolumeClaims: []string{persistentVolumeClaim.Name},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectScheduled(ctx, env.Client, pod)
})
It("should schedule valid pods when a pod with an invalid pvc is encountered (pvc)", func() {
ExpectApplied(ctx, env.Client, test.Provisioner())
invalidPod := test.UnschedulablePod(test.PodOptions{
PersistentVolumeClaims: []string{"invalid"},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, invalidPod)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, invalidPod)
ExpectScheduled(ctx, env.Client, pod)
})
It("should schedule valid pods when a pod with an invalid pvc is encountered (storage class)", func() {
invalidStorageClass := "invalid-storage-class"
persistentVolumeClaim := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{StorageClassName: &invalidStorageClass})
ExpectApplied(ctx, env.Client, test.Provisioner(), persistentVolumeClaim)
invalidPod := test.UnschedulablePod(test.PodOptions{
PersistentVolumeClaims: []string{persistentVolumeClaim.Name},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, invalidPod)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, invalidPod)
ExpectScheduled(ctx, env.Client, pod)
})
It("should schedule valid pods when a pod with an invalid pvc is encountered (volume name)", func() {
invalidVolumeName := "invalid-volume-name"
persistentVolumeClaim := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{VolumeName: invalidVolumeName})
ExpectApplied(ctx, env.Client, test.Provisioner(), persistentVolumeClaim)
invalidPod := test.UnschedulablePod(test.PodOptions{
PersistentVolumeClaims: []string{persistentVolumeClaim.Name},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, invalidPod)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, invalidPod)
ExpectScheduled(ctx, env.Client, pod)
})
It("should schedule to storage class zones if volume does not exist", func() {
persistentVolumeClaim := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{StorageClassName: &storageClass.Name})
ExpectApplied(ctx, env.Client, test.Provisioner(), storageClass, persistentVolumeClaim)
pod := test.UnschedulablePod(test.PodOptions{
PersistentVolumeClaims: []string{persistentVolumeClaim.Name},
NodeRequirements: []v1.NodeSelectorRequirement{{
Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-3"},
}},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-3"))
})
It("should not schedule if storage class zones are incompatible", func() {
persistentVolumeClaim := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{StorageClassName: &storageClass.Name})
ExpectApplied(ctx, env.Client, test.Provisioner(), storageClass, persistentVolumeClaim)
pod := test.UnschedulablePod(test.PodOptions{
PersistentVolumeClaims: []string{persistentVolumeClaim.Name},
NodeRequirements: []v1.NodeSelectorRequirement{{
Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"},
}},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should schedule to volume zones if volume already bound", func() {
persistentVolume := test.PersistentVolume(test.PersistentVolumeOptions{Zones: []string{"test-zone-3"}})
persistentVolumeClaim := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{VolumeName: persistentVolume.Name, StorageClassName: &storageClass.Name})
ExpectApplied(ctx, env.Client, test.Provisioner(), storageClass, persistentVolumeClaim, persistentVolume)
pod := test.UnschedulablePod(test.PodOptions{
PersistentVolumeClaims: []string{persistentVolumeClaim.Name},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-3"))
})
It("should not schedule if volume zones are incompatible", func() {
persistentVolume := test.PersistentVolume(test.PersistentVolumeOptions{Zones: []string{"test-zone-3"}})
persistentVolumeClaim := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{VolumeName: persistentVolume.Name, StorageClassName: &storageClass.Name})
ExpectApplied(ctx, env.Client, test.Provisioner(), storageClass, persistentVolumeClaim, persistentVolume)
pod := test.UnschedulablePod(test.PodOptions{
PersistentVolumeClaims: []string{persistentVolumeClaim.Name},
NodeRequirements: []v1.NodeSelectorRequirement{{
Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"},
}},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should not relax an added volume topology zone node-selector away", func() {
persistentVolume := test.PersistentVolume(test.PersistentVolumeOptions{Zones: []string{"test-zone-3"}})
persistentVolumeClaim := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{VolumeName: persistentVolume.Name, StorageClassName: &storageClass.Name})
ExpectApplied(ctx, env.Client, test.Provisioner(), storageClass, persistentVolumeClaim, persistentVolume)
pod := test.UnschedulablePod(test.PodOptions{
PersistentVolumeClaims: []string{persistentVolumeClaim.Name},
NodeRequirements: []v1.NodeSelectorRequirement{
{
Key: "example.com/label",
Operator: v1.NodeSelectorOpIn,
Values: []string{"unsupported"},
},
},
})
// Add the second capacity type that is OR'd with the first. Previously we only added the volume topology requirement
// to a single node selector term which would sometimes get relaxed away. Now we add it to all of them to AND
// it with each existing term.
pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms,
v1.NodeSelectorTerm{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: v1alpha5.LabelCapacityType,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.CapacityTypeOnDemand},
},
},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-3"))
})
})
var _ = Describe("Preferential Fallback", func() {
Context("Required", func() {
It("should not relax the final term", func() {
pod := test.UnschedulablePod()
pod.Spec.Affinity = &v1.Affinity{NodeAffinity: &v1.NodeAffinity{RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{
{MatchExpressions: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"invalid"}}, // Should not be relaxed
}},
}}}}
// Don't relax
ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{Requirements: []v1.NodeSelectorRequirement{{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}}}}))
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should relax multiple terms", func() {
pod := test.UnschedulablePod()
pod.Spec.Affinity = &v1.Affinity{NodeAffinity: &v1.NodeAffinity{RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{
{MatchExpressions: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"invalid"}},
}},
{MatchExpressions: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"invalid"}},
}},
{MatchExpressions: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}},
}},
{MatchExpressions: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2"}}, // OR operator, never get to this one
}},
}}}}
// Success
ExpectApplied(ctx, env.Client, test.Provisioner())
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-1"))
})
})
Context("Preferences", func() {
It("should relax all node affinity terms", func() {
pod := test.UnschedulablePod()
pod.Spec.Affinity = &v1.Affinity{NodeAffinity: &v1.NodeAffinity{PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
{
Weight: 1, Preference: v1.NodeSelectorTerm{MatchExpressions: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"invalid"}},
}},
},
{
Weight: 1, Preference: v1.NodeSelectorTerm{MatchExpressions: []v1.NodeSelectorRequirement{
{Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpIn, Values: []string{"invalid"}},
}},
},
}}}
// Success
ExpectApplied(ctx, env.Client, test.Provisioner())
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectScheduled(ctx, env.Client, pod)
})
It("should relax to use lighter weights", func() {
pod := test.UnschedulablePod()
pod.Spec.Affinity = &v1.Affinity{NodeAffinity: &v1.NodeAffinity{PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
{
Weight: 100, Preference: v1.NodeSelectorTerm{MatchExpressions: []v1.NodeSelectorRequirement{
{Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-3"}},
}},
},
{
Weight: 50, Preference: v1.NodeSelectorTerm{MatchExpressions: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2"}},
}},
},
{
Weight: 1, Preference: v1.NodeSelectorTerm{MatchExpressions: []v1.NodeSelectorRequirement{ // OR operator, never get to this one
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}},
}},
},
}}}
// Success
ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{Requirements: []v1.NodeSelectorRequirement{{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2"}}}}))
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-2"))
})
It("should tolerate PreferNoSchedule taint only after trying to relax Affinity terms", func() {
pod := test.UnschedulablePod()
pod.Spec.Affinity = &v1.Affinity{NodeAffinity: &v1.NodeAffinity{PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
{
Weight: 1, Preference: v1.NodeSelectorTerm{MatchExpressions: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"invalid"}},
}},
},
{
Weight: 1, Preference: v1.NodeSelectorTerm{MatchExpressions: []v1.NodeSelectorRequirement{
{Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpIn, Values: []string{"invalid"}},
}},
},
}}}
// Success
ExpectApplied(ctx, env.Client, test.Provisioner(test.ProvisionerOptions{Taints: []v1.Taint{{Key: "foo", Value: "bar", Effect: v1.TaintEffectPreferNoSchedule}}}))
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Spec.Taints).To(ContainElement(v1.Taint{Key: "foo", Value: "bar", Effect: v1.TaintEffectPreferNoSchedule}))
})
})
})
var _ = Describe("Multiple Provisioners", func() {
It("should schedule to an explicitly selected provisioner", func() {
provisioner := test.Provisioner()
ExpectApplied(ctx, env.Client, provisioner, test.Provisioner())
pod := test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1alpha5.ProvisionerNameLabelKey: provisioner.Name}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels[v1alpha5.ProvisionerNameLabelKey]).To(Equal(provisioner.Name))
})
It("should schedule to a provisioner by labels", func() {
provisioner := test.Provisioner(test.ProvisionerOptions{Labels: map[string]string{"foo": "bar"}})
ExpectApplied(ctx, env.Client, provisioner, test.Provisioner())
pod := test.UnschedulablePod(test.PodOptions{NodeSelector: provisioner.Spec.Labels})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels[v1alpha5.ProvisionerNameLabelKey]).To(Equal(provisioner.Name))
})
It("should not match provisioner with PreferNoSchedule taint when other provisioner match", func() {
provisioner := test.Provisioner(test.ProvisionerOptions{Taints: []v1.Taint{{Key: "foo", Value: "bar", Effect: v1.TaintEffectPreferNoSchedule}}})
ExpectApplied(ctx, env.Client, provisioner, test.Provisioner())
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels[v1alpha5.ProvisionerNameLabelKey]).ToNot(Equal(provisioner.Name))
})
Context("Weighted Provisioners", func() {
It("should schedule to the provisioner with the highest priority always", func() {
provisioners := []client.Object{
test.Provisioner(),
test.Provisioner(test.ProvisionerOptions{Weight: ptr.Int32(20)}),
test.Provisioner(test.ProvisionerOptions{Weight: ptr.Int32(100)}),
}
ExpectApplied(ctx, env.Client, provisioners...)
pods := []*v1.Pod{
test.UnschedulablePod(), test.UnschedulablePod(), test.UnschedulablePod(),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
for _, pod := range pods {
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels[v1alpha5.ProvisionerNameLabelKey]).To(Equal(provisioners[2].GetName()))
}
})
It("should schedule to explicitly selected provisioner even if other provisioners are higher priority", func() {
targetedProvisioner := test.Provisioner()
provisioners := []client.Object{
targetedProvisioner,
test.Provisioner(test.ProvisionerOptions{Weight: ptr.Int32(20)}),
test.Provisioner(test.ProvisionerOptions{Weight: ptr.Int32(100)}),
}
ExpectApplied(ctx, env.Client, provisioners...)
pod := test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1alpha5.ProvisionerNameLabelKey: targetedProvisioner.Name}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels[v1alpha5.ProvisionerNameLabelKey]).To(Equal(targetedProvisioner.Name))
})
})
})
func ExpectMachineRequirements(machine *v1alpha5.Machine, requirements ...v1.NodeSelectorRequirement) {
for _, requirement := range requirements {
req, ok := lo.Find(machine.Spec.Requirements, func(r v1.NodeSelectorRequirement) bool {
return r.Key == requirement.Key && r.Operator == requirement.Operator
})
ExpectWithOffset(1, ok).To(BeTrue())
have := sets.New(req.Values...)
expected := sets.New(requirement.Values...)
ExpectWithOffset(1, have.Len()).To(Equal(expected.Len()))
ExpectWithOffset(1, have.Intersection(expected).Len()).To(Equal(expected.Len()))
}
}
func ExpectMachineRequests(machine *v1alpha5.Machine, resources v1.ResourceList) {
for name, value := range resources {
v := machine.Spec.Resources.Requests[name]
ExpectWithOffset(1, v.AsApproximateFloat64()).To(BeNumerically("~", value.AsApproximateFloat64(), 10))
}
}
| 1,281 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/scheduling"
"github.com/aws/karpenter-core/pkg/utils/resources"
)
type ExistingNode struct {
*state.StateNode
Pods []*v1.Pod
topology *Topology
requests v1.ResourceList
requirements scheduling.Requirements
}
func NewExistingNode(n *state.StateNode, topology *Topology, daemonResources v1.ResourceList) *ExistingNode {
// The state node passed in here must be a deep copy from cluster state as we modify it
// the remaining daemonResources to schedule are the total daemonResources minus what has already scheduled
remainingDaemonResources := resources.Subtract(daemonResources, n.DaemonSetRequests())
// If unexpected daemonset pods schedule to the node due to labels appearing on the node which cause the
// DS to be able to schedule, we need to ensure that we don't let our remainingDaemonResources go negative as
// it will cause us to mis-calculate the amount of remaining resources
for k, v := range remainingDaemonResources {
if v.AsApproximateFloat64() < 0 {
v.Set(0)
remainingDaemonResources[k] = v
}
}
node := &ExistingNode{
StateNode: n,
topology: topology,
requests: remainingDaemonResources,
requirements: scheduling.NewLabelRequirements(n.Labels()),
}
node.requirements.Add(scheduling.NewRequirement(v1.LabelHostname, v1.NodeSelectorOpIn, n.HostName()))
topology.Register(v1.LabelHostname, n.HostName())
return node
}
func (n *ExistingNode) Add(ctx context.Context, kubeClient client.Client, pod *v1.Pod) error {
// Check Taints
if err := scheduling.Taints(n.Taints()).Tolerates(pod); err != nil {
return err
}
if err := n.HostPortUsage().Validate(pod); err != nil {
return err
}
// determine the number of volumes that will be mounted if the pod schedules
mountedVolumeCount, err := n.VolumeUsage().Validate(ctx, kubeClient, pod)
if err != nil {
return err
}
if mountedVolumeCount.Exceeds(n.VolumeLimits()) {
return fmt.Errorf("would exceed node volume limits")
}
// check resource requests first since that's a pretty likely reason the pod won't schedule on an in-flight
// node, which at this point can't be increased in size
requests := resources.Merge(n.requests, resources.RequestsForPods(pod))
if !resources.Fits(requests, n.Available()) {
return fmt.Errorf("exceeds node resources")
}
nodeRequirements := scheduling.NewRequirements(n.requirements.Values()...)
podRequirements := scheduling.NewPodRequirements(pod)
// Check Machine Affinity Requirements
if err = nodeRequirements.Compatible(podRequirements); err != nil {
return err
}
nodeRequirements.Add(podRequirements.Values()...)
// Check Topology Requirements
topologyRequirements, err := n.topology.AddRequirements(podRequirements, nodeRequirements, pod)
if err != nil {
return err
}
if err = nodeRequirements.Compatible(topologyRequirements); err != nil {
return err
}
nodeRequirements.Add(topologyRequirements.Values()...)
// Update node
n.Pods = append(n.Pods, pod)
n.requests = requests
n.requirements = nodeRequirements
n.topology.Record(pod, nodeRequirements)
n.HostPortUsage().Add(ctx, pod)
n.VolumeUsage().Add(ctx, kubeClient, pod)
return nil
}
| 116 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling_test
import (
"fmt"
"math"
"math/rand"
"github.com/mitchellh/hashstructure/v2"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/cloudprovider/fake"
"github.com/aws/karpenter-core/pkg/scheduling"
"github.com/aws/karpenter-core/pkg/test"
. "github.com/aws/karpenter-core/pkg/test/expectations"
"github.com/aws/karpenter-core/pkg/utils/resources"
)
var _ = Describe("Instance Type Selection", func() {
var minPrice float64
var instanceTypeMap map[string]*cloudprovider.InstanceType
nodePrice := func(n *v1.Node) float64 {
of, _ := instanceTypeMap[n.Labels[v1.LabelInstanceTypeStable]].Offerings.Get(n.Labels[v1alpha5.LabelCapacityType], n.Labels[v1.LabelTopologyZone])
return of.Price
}
BeforeEach(func() {
// open up the provisioner to any instance types
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{
Key: v1.LabelArchStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.ArchitectureArm64, v1alpha5.ArchitectureAmd64},
},
}
cloudProvider.CreateCalls = nil
cloudProvider.InstanceTypes = fake.InstanceTypesAssorted()
instanceTypeMap = getInstanceTypeMap(cloudProvider.InstanceTypes)
minPrice = getMinPrice(cloudProvider.InstanceTypes)
// add some randomness to instance type ordering to ensure we sort everywhere we need to
rand.Shuffle(len(cloudProvider.InstanceTypes), func(i, j int) {
cloudProvider.InstanceTypes[i], cloudProvider.InstanceTypes[j] = cloudProvider.InstanceTypes[j], cloudProvider.InstanceTypes[i]
})
})
// This set of tests ensure that we schedule on the cheapest valid instance type while also ensuring that all of the
// instance types passed to the cloud provider are also valid per provisioner and node selector requirements. In some
// ways they repeat some other tests, but the testing regarding checking against all possible instance types
// passed to the cloud provider is unique.
It("should schedule on one of the cheapest instances", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(nodePrice(node)).To(Equal(minPrice))
})
It("should schedule on one of the cheapest instances (pod arch = amd64)", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{{
Key: v1.LabelArchStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.ArchitectureAmd64},
}}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(nodePrice(node)).To(Equal(minPrice))
// ensure that the entire list of instance types match the label
ExpectInstancesWithLabel(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1.LabelArchStable, v1alpha5.ArchitectureAmd64)
})
It("should schedule on one of the cheapest instances (pod arch = arm64)", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{{
Key: v1.LabelArchStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.ArchitectureArm64},
}}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(nodePrice(node)).To(Equal(minPrice))
ExpectInstancesWithLabel(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1.LabelArchStable, v1alpha5.ArchitectureArm64)
})
It("should schedule on one of the cheapest instances (prov arch = amd64)", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{
Key: v1.LabelArchStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.ArchitectureAmd64},
},
}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(nodePrice(node)).To(Equal(minPrice))
ExpectInstancesWithLabel(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1.LabelArchStable, v1alpha5.ArchitectureAmd64)
})
It("should schedule on one of the cheapest instances (prov arch = arm64)", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{
Key: v1.LabelArchStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.ArchitectureArm64},
},
}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(nodePrice(node)).To(Equal(minPrice))
ExpectInstancesWithLabel(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1.LabelArchStable, v1alpha5.ArchitectureArm64)
})
It("should schedule on one of the cheapest instances (prov os = windows)", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{
Key: v1.LabelOSStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{string(v1.Windows)},
},
}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(nodePrice(node)).To(Equal(minPrice))
ExpectInstancesWithLabel(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1.LabelOSStable, string(v1.Windows))
})
It("should schedule on one of the cheapest instances (pod os = windows)", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{{
Key: v1.LabelOSStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{string(v1.Windows)},
}}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(nodePrice(node)).To(Equal(minPrice))
ExpectInstancesWithLabel(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1.LabelOSStable, string(v1.Windows))
})
It("should schedule on one of the cheapest instances (prov os = windows)", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{
Key: v1.LabelOSStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{string(v1.Windows)},
},
}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(nodePrice(node)).To(Equal(minPrice))
ExpectInstancesWithLabel(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1.LabelOSStable, string(v1.Windows))
})
It("should schedule on one of the cheapest instances (pod os = linux)", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{{
Key: v1.LabelOSStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{string(v1.Linux)},
}}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(nodePrice(node)).To(Equal(minPrice))
ExpectInstancesWithLabel(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1.LabelOSStable, string(v1.Linux))
})
It("should schedule on one of the cheapest instances (pod os = linux)", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{{
Key: v1.LabelOSStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{string(v1.Linux)},
}}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(nodePrice(node)).To(Equal(minPrice))
ExpectInstancesWithLabel(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1.LabelOSStable, string(v1.Linux))
})
It("should schedule on one of the cheapest instances (prov zone = test-zone-2)", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-2"},
},
}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(nodePrice(node)).To(Equal(minPrice))
ExpectInstancesWithLabel(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1.LabelTopologyZone, "test-zone-2")
})
It("should schedule on one of the cheapest instances (pod zone = test-zone-2)", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-2"},
}}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(nodePrice(node)).To(Equal(minPrice))
ExpectInstancesWithLabel(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1.LabelTopologyZone, "test-zone-2")
})
It("should schedule on one of the cheapest instances (prov ct = spot)", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{
Key: v1alpha5.LabelCapacityType,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.CapacityTypeSpot},
},
}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(nodePrice(node)).To(Equal(minPrice))
ExpectInstancesWithLabel(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1alpha5.LabelCapacityType, v1alpha5.CapacityTypeSpot)
})
It("should schedule on one of the cheapest instances (pod ct = spot)", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{{
Key: v1alpha5.LabelCapacityType,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.CapacityTypeSpot},
}}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(nodePrice(node)).To(Equal(minPrice))
ExpectInstancesWithLabel(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1alpha5.LabelCapacityType, v1alpha5.CapacityTypeSpot)
})
It("should schedule on one of the cheapest instances (prov ct = ondemand, prov zone = test-zone-1)", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{
Key: v1alpha5.LabelCapacityType,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.CapacityTypeOnDemand},
},
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-1"},
},
}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(nodePrice(node)).To(Equal(minPrice))
ExpectInstancesWithOffering(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1alpha5.CapacityTypeOnDemand, "test-zone-1")
})
It("should schedule on one of the cheapest instances (pod ct = spot, pod zone = test-zone-1)", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{{
Key: v1alpha5.LabelCapacityType,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.CapacityTypeSpot},
},
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-1"},
},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(nodePrice(node)).To(Equal(minPrice))
ExpectInstancesWithOffering(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1alpha5.CapacityTypeSpot, "test-zone-1")
})
It("should schedule on one of the cheapest instances (prov ct = spot, pod zone = test-zone-2)", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{
Key: v1alpha5.LabelCapacityType,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.CapacityTypeSpot},
},
}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-2"},
}}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(nodePrice(node)).To(Equal(minPrice))
ExpectInstancesWithOffering(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1alpha5.CapacityTypeSpot, "test-zone-2")
})
It("should schedule on one of the cheapest instances (prov ct = ondemand/test-zone-1/arm64/windows)", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{
Key: v1.LabelArchStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.ArchitectureArm64},
},
{
Key: v1.LabelOSStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{string(v1.Windows)},
},
{
Key: v1alpha5.LabelCapacityType,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.CapacityTypeOnDemand},
},
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-1"},
},
}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(nodePrice(node)).To(Equal(minPrice))
ExpectInstancesWithOffering(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1alpha5.CapacityTypeOnDemand, "test-zone-1")
ExpectInstancesWithLabel(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1.LabelOSStable, string(v1.Windows))
ExpectInstancesWithLabel(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1.LabelArchStable, "arm64")
})
It("should schedule on one of the cheapest instances (prov = spot/test-zone-2, pod = amd64/linux)", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{
Key: v1.LabelArchStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.ArchitectureAmd64},
},
{
Key: v1.LabelOSStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{string(v1.Linux)},
},
}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{
Key: v1alpha5.LabelCapacityType,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.CapacityTypeSpot},
},
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-2"},
},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(nodePrice(node)).To(Equal(minPrice))
ExpectInstancesWithOffering(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1alpha5.CapacityTypeSpot, "test-zone-2")
ExpectInstancesWithLabel(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1.LabelOSStable, string(v1.Linux))
ExpectInstancesWithLabel(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1.LabelArchStable, "amd64")
})
It("should schedule on one of the cheapest instances (pod ct = spot/test-zone-2/amd64/linux)", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelArchStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.ArchitectureAmd64},
},
{
Key: v1.LabelOSStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{string(v1.Linux)},
},
{
Key: v1alpha5.LabelCapacityType,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.CapacityTypeSpot},
},
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-2"},
},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(nodePrice(node)).To(Equal(minPrice))
ExpectInstancesWithOffering(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1alpha5.CapacityTypeSpot, "test-zone-2")
ExpectInstancesWithLabel(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1.LabelOSStable, string(v1.Linux))
ExpectInstancesWithLabel(supportedInstanceTypes(cloudProvider.CreateCalls[0]), v1.LabelArchStable, "amd64")
})
It("should not schedule if no instance type matches selector (pod arch = arm)", func() {
// remove all Arm instance types
cloudProvider.InstanceTypes = filterInstanceTypes(cloudProvider.InstanceTypes, func(i *cloudprovider.InstanceType) bool {
return i.Requirements.Get(v1.LabelArchStable).Has(v1alpha5.ArchitectureAmd64)
})
Expect(len(cloudProvider.InstanceTypes)).To(BeNumerically(">", 0))
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelArchStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.ArchitectureArm64},
},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
Expect(cloudProvider.CreateCalls).To(HaveLen(0))
})
It("should not schedule if no instance type matches selector (pod arch = arm zone=test-zone-2)", func() {
// remove all Arm instance types in zone-2
cloudProvider.InstanceTypes = filterInstanceTypes(cloudProvider.InstanceTypes, func(i *cloudprovider.InstanceType) bool {
for _, off := range i.Offerings {
if off.Zone == "test-zone-2" {
return i.Requirements.Get(v1.LabelArchStable).Has(v1alpha5.ArchitectureAmd64)
}
}
return true
})
Expect(len(cloudProvider.InstanceTypes)).To(BeNumerically(">", 0))
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelArchStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.ArchitectureArm64},
},
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-2"},
},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
Expect(cloudProvider.CreateCalls).To(HaveLen(0))
})
It("should not schedule if no instance type matches selector (prov arch = arm / pod zone=test-zone-2)", func() {
// remove all Arm instance types in zone-2
cloudProvider.InstanceTypes = filterInstanceTypes(cloudProvider.InstanceTypes, func(i *cloudprovider.InstanceType) bool {
for _, off := range i.Offerings {
if off.Zone == "test-zone-2" {
return i.Requirements.Get(v1.LabelArchStable).Has(v1alpha5.ArchitectureAmd64)
}
}
return true
})
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{
Key: v1.LabelArchStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.ArchitectureArm64},
},
}
Expect(len(cloudProvider.InstanceTypes)).To(BeNumerically(">", 0))
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-2"},
},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
Expect(cloudProvider.CreateCalls).To(HaveLen(0))
})
It("should schedule on an instance with enough resources", func() {
// this is a pretty thorough exercise of scheduling, so we also check an invariant that scheduling doesn't
// modify the instance type's Overhead() or Resources() maps so they can return the same map every time instead
// of re-alllocating a new one per call
resourceHashes := map[string]uint64{}
overheadHashes := map[string]uint64{}
for _, it := range cloudProvider.InstanceTypes {
var err error
resourceHashes[it.Name], err = hashstructure.Hash(it.Capacity, hashstructure.FormatV2, nil)
Expect(err).To(BeNil())
overheadHashes[it.Name], err = hashstructure.Hash(it.Overhead.Total(), hashstructure.FormatV2, nil)
Expect(err).To(BeNil())
}
ExpectApplied(ctx, env.Client, provisioner)
// these values are constructed so that three of these pods can always fit on at least one of our instance types
for _, cpu := range []float64{0.1, 1.0, 2, 2.5, 4, 8, 16} {
for _, mem := range []float64{0.1, 1.0, 2, 4, 8, 16, 32} {
cluster.Reset()
cloudProvider.CreateCalls = nil
opts := test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%0.1f", cpu)),
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%0.1fGi", mem)),
}}}
pods := []*v1.Pod{
test.UnschedulablePod(opts), test.UnschedulablePod(opts), test.UnschedulablePod(opts),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
nodeNames := sets.NewString()
for _, p := range pods {
node := ExpectScheduled(ctx, env.Client, p)
nodeNames.Insert(node.Name)
}
// should fit on one node
Expect(nodeNames).To(HaveLen(1))
totalPodResources := resources.RequestsForPods(pods...)
for _, it := range supportedInstanceTypes(cloudProvider.CreateCalls[0]) {
totalReserved := resources.Merge(totalPodResources, it.Overhead.Total())
// the total pod resources in CPU and memory + instance overhead should always be less than the
// resources available on every viable instance has
Expect(totalReserved.Cpu().Cmp(it.Capacity[v1.ResourceCPU])).To(Equal(-1))
Expect(totalReserved.Memory().Cmp(it.Capacity[v1.ResourceMemory])).To(Equal(-1))
}
}
}
for _, it := range cloudProvider.InstanceTypes {
resourceHash, err := hashstructure.Hash(it.Capacity, hashstructure.FormatV2, nil)
Expect(err).To(BeNil())
overheadHash, err := hashstructure.Hash(it.Overhead.Total(), hashstructure.FormatV2, nil)
Expect(err).To(BeNil())
Expect(resourceHash).To(Equal(resourceHashes[it.Name]), fmt.Sprintf("expected %s Resources() to not be modified by scheduling", it.Name))
Expect(overheadHash).To(Equal(overheadHashes[it.Name]), fmt.Sprintf("expected %s Overhead() to not be modified by scheduling", it.Name))
}
})
It("should schedule on cheaper on-demand instance even when spot price ordering would place other instance types first", func() {
cloudProvider.InstanceTypes = []*cloudprovider.InstanceType{
fake.NewInstanceType(fake.InstanceTypeOptions{
Name: "test-instance1",
Architecture: "amd64",
OperatingSystems: sets.NewString(string(v1.Linux)),
Resources: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("1Gi"),
},
Offerings: []cloudprovider.Offering{
{CapacityType: v1alpha5.CapacityTypeOnDemand, Zone: "test-zone-1a", Price: 1.0, Available: true},
{CapacityType: v1alpha5.CapacityTypeSpot, Zone: "test-zone-1a", Price: 0.2, Available: true},
},
}),
fake.NewInstanceType(fake.InstanceTypeOptions{
Name: "test-instance2",
Architecture: "amd64",
OperatingSystems: sets.NewString(string(v1.Linux)),
Resources: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("1Gi"),
},
Offerings: []cloudprovider.Offering{
{CapacityType: v1alpha5.CapacityTypeOnDemand, Zone: "test-zone-1a", Price: 1.3, Available: true},
{CapacityType: v1alpha5.CapacityTypeSpot, Zone: "test-zone-1a", Price: 0.1, Available: true},
},
}),
}
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{
Key: v1alpha5.LabelCapacityType,
Operator: v1.NodeSelectorOpIn,
Values: []string{"on-demand"},
},
}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels[v1.LabelInstanceTypeStable]).To(Equal("test-instance1"))
})
})
func supportedInstanceTypes(machine *v1alpha5.Machine) (res []*cloudprovider.InstanceType) {
reqs := scheduling.NewNodeSelectorRequirements(machine.Spec.Requirements...)
return lo.Filter(cloudProvider.InstanceTypes, func(i *cloudprovider.InstanceType, _ int) bool {
return reqs.Get(v1.LabelInstanceTypeStable).Has(i.Name)
})
}
func getInstanceTypeMap(its []*cloudprovider.InstanceType) map[string]*cloudprovider.InstanceType {
return lo.SliceToMap(its, func(it *cloudprovider.InstanceType) (string, *cloudprovider.InstanceType) {
return it.Name, it
})
}
func getMinPrice(its []*cloudprovider.InstanceType) float64 {
minPrice := math.MaxFloat64
for _, it := range its {
for _, of := range it.Offerings {
minPrice = math.Min(minPrice, of.Price)
}
}
return minPrice
}
func filterInstanceTypes(types []*cloudprovider.InstanceType, pred func(i *cloudprovider.InstanceType) bool) []*cloudprovider.InstanceType {
var ret []*cloudprovider.InstanceType
for _, it := range types {
if pred(it) {
ret = append(ret, it)
}
}
return ret
}
func ExpectInstancesWithOffering(instanceTypes []*cloudprovider.InstanceType, capacityType string, zone string) {
for _, it := range instanceTypes {
matched := false
for _, offering := range it.Offerings {
if offering.CapacityType == capacityType && offering.Zone == zone {
matched = true
}
}
Expect(matched).To(BeTrue(), fmt.Sprintf("expected to find zone %s / capacity type %s in an offering", zone, capacityType))
}
}
func ExpectInstancesWithLabel(instanceTypes []*cloudprovider.InstanceType, label string, value string) {
for _, it := range instanceTypes {
switch label {
case v1.LabelArchStable:
Expect(it.Requirements.Get(v1.LabelArchStable).Has(value)).To(BeTrue(), fmt.Sprintf("expected to find an arch of %s", value))
case v1.LabelOSStable:
Expect(it.Requirements.Get(v1.LabelOSStable).Has(value)).To(BeTrue(), fmt.Sprintf("expected to find an OS of %s", value))
case v1.LabelTopologyZone:
{
matched := false
for _, offering := range it.Offerings {
if offering.Zone == value {
matched = true
break
}
}
Expect(matched).To(BeTrue(), fmt.Sprintf("expected to find zone %s in an offering", value))
}
case v1alpha5.LabelCapacityType:
{
matched := false
for _, offering := range it.Offerings {
if offering.CapacityType == value {
matched = true
break
}
}
Expect(matched).To(BeTrue(), fmt.Sprintf("expected to find caapacity type %s in an offering", value))
}
default:
Fail(fmt.Sprintf("unsupported label %s in test", label))
}
}
}
| 677 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"context"
"fmt"
"strings"
"sync/atomic"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/scheduling"
"github.com/aws/karpenter-core/pkg/utils/resources"
)
// Machine is a set of constraints, compatible pods, and possible instance types that could fulfill these constraints. This
// will be turned into one or more actual node instances within the cluster after bin packing.
type Machine struct {
MachineTemplate
Pods []*v1.Pod
topology *Topology
hostPortUsage *scheduling.HostPortUsage
daemonResources v1.ResourceList
}
var nodeID int64
func NewMachine(machineTemplate *MachineTemplate, topology *Topology, daemonResources v1.ResourceList, instanceTypes []*cloudprovider.InstanceType) *Machine {
// Copy the template, and add hostname
hostname := fmt.Sprintf("hostname-placeholder-%04d", atomic.AddInt64(&nodeID, 1))
topology.Register(v1.LabelHostname, hostname)
template := *machineTemplate
template.Requirements = scheduling.NewRequirements()
template.Requirements.Add(machineTemplate.Requirements.Values()...)
template.Requirements.Add(scheduling.NewRequirement(v1.LabelHostname, v1.NodeSelectorOpIn, hostname))
template.InstanceTypeOptions = instanceTypes
template.Requests = daemonResources
return &Machine{
MachineTemplate: template,
hostPortUsage: scheduling.NewHostPortUsage(),
topology: topology,
daemonResources: daemonResources,
}
}
func (m *Machine) Add(ctx context.Context, pod *v1.Pod) error {
// Check Taints
if err := m.Taints.Tolerates(pod); err != nil {
return err
}
// exposed host ports on the node
if err := m.hostPortUsage.Validate(pod); err != nil {
return err
}
machineRequirements := scheduling.NewRequirements(m.Requirements.Values()...)
podRequirements := scheduling.NewPodRequirements(pod)
// Check Machine Affinity Requirements
if err := machineRequirements.Compatible(podRequirements); err != nil {
return fmt.Errorf("incompatible requirements, %w", err)
}
machineRequirements.Add(podRequirements.Values()...)
// Check Topology Requirements
topologyRequirements, err := m.topology.AddRequirements(podRequirements, machineRequirements, pod)
if err != nil {
return err
}
if err = machineRequirements.Compatible(topologyRequirements); err != nil {
return err
}
machineRequirements.Add(topologyRequirements.Values()...)
// Check instance type combinations
requests := resources.Merge(m.Requests, resources.RequestsForPods(pod))
filtered := filterInstanceTypesByRequirements(m.InstanceTypeOptions, machineRequirements, requests)
if len(filtered.remaining) == 0 {
// log the total resources being requested (daemonset + the pod)
cumulativeResources := resources.Merge(m.daemonResources, resources.RequestsForPods(pod))
return fmt.Errorf("no instance type satisfied resources %s and requirements %s (%s)", resources.String(cumulativeResources), machineRequirements, filtered.FailureReason())
}
// Update node
m.Pods = append(m.Pods, pod)
m.InstanceTypeOptions = filtered.remaining
m.Requests = requests
m.Requirements = machineRequirements
m.topology.Record(pod, machineRequirements)
m.hostPortUsage.Add(ctx, pod)
return nil
}
// FinalizeScheduling is called once all scheduling has completed and allows the node to perform any cleanup
// necessary before its requirements are used for instance launching
func (m *Machine) FinalizeScheduling() {
// We need nodes to have hostnames for topology purposes, but we don't want to pass that node name on to consumers
// of the node as it will be displayed in error messages
delete(m.Requirements, v1.LabelHostname)
}
func InstanceTypeList(instanceTypeOptions []*cloudprovider.InstanceType) string {
var itSb strings.Builder
for i, it := range instanceTypeOptions {
// print the first 5 instance types only (indices 0-4)
if i > 4 {
fmt.Fprintf(&itSb, " and %d other(s)", len(instanceTypeOptions)-i)
break
} else if i > 0 {
fmt.Fprint(&itSb, ", ")
}
fmt.Fprint(&itSb, it.Name)
}
return itSb.String()
}
type filterResults struct {
remaining []*cloudprovider.InstanceType
// Each of these three flags indicates if that particular criteria was met by at least one instance type
requirementsMet bool
fits bool
hasOffering bool
// requirementsAndFits indicates if a single instance type met the scheduling requirements and had enough resources
requirementsAndFits bool
// requirementsAndOffering indicates if a single instance type met the scheduling requirements and was a required offering
requirementsAndOffering bool
// fitsAndOffering indicates if a single instance type had enough resources and was a required offering
fitsAndOffering bool
requests v1.ResourceList
}
// FailureReason returns a presentable string explaining why all instance types were filtered out
//
//nolint:gocyclo
func (r filterResults) FailureReason() string {
if len(r.remaining) > 0 {
return ""
}
// no instance type met any of the three criteria, meaning each criteria was enough to completely prevent
// this pod from scheduling
if !r.requirementsMet && !r.fits && !r.hasOffering {
return "no instance type met the scheduling requirements or had enough resources or had a required offering"
}
// check the other pairwise criteria
if !r.requirementsMet && !r.fits {
return "no instance type met the scheduling requirements or had enough resources"
}
if !r.requirementsMet && !r.hasOffering {
return "no instance type met the scheduling requirements or had a required offering"
}
if !r.fits && !r.hasOffering {
return "no instance type had enough resources or had a required offering"
}
// and then each individual criteria. These are sort of the same as above in that each one indicates that no
// instance type matched that criteria at all, so it was enough to exclude all instance types. I think it's
// helpful to have these separate, since we can report the multiple excluding criteria above.
if !r.requirementsMet {
return "no instance type met all requirements"
}
if !r.fits {
msg := "no instance type has enough resources"
// special case for a user typo I saw reported once
if r.requests.Cpu().Cmp(resource.MustParse("1M")) >= 0 {
msg += " (CPU request >= 1 Million, m vs M typo?)"
}
return msg
}
if !r.hasOffering {
return "no instance type has the required offering"
}
// see if any pair of criteria was enough to exclude all instances
if r.requirementsAndFits {
return "no instance type which met the scheduling requirements and had enough resources, had a required offering"
}
if r.fitsAndOffering {
return "no instance type which had enough resources and the required offering met the scheduling requirements"
}
if r.requirementsAndOffering {
return "no instance type which met the scheduling requirements and the required offering had the required resources"
}
// finally all instances were filtered out, but we had at least one instance that met each criteria, and met each
// pairwise set of criteria, so the only thing that remains is no instance which met all three criteria simultaneously
return "no instance type met the requirements/resources/offering tuple"
}
//nolint:gocyclo
func filterInstanceTypesByRequirements(instanceTypes []*cloudprovider.InstanceType, requirements scheduling.Requirements, requests v1.ResourceList) filterResults {
results := filterResults{
requests: requests,
requirementsMet: false,
fits: false,
hasOffering: false,
requirementsAndFits: false,
requirementsAndOffering: false,
fitsAndOffering: false,
}
for _, it := range instanceTypes {
// the tradeoff to not short circuiting on the filtering is that we can report much better error messages
// about why scheduling failed
itCompat := compatible(it, requirements)
itFits := fits(it, requests)
itHasOffering := hasOffering(it, requirements)
// track if any single instance type met a single criteria
results.requirementsMet = results.requirementsMet || itCompat
results.fits = results.fits || itFits
results.hasOffering = results.hasOffering || itHasOffering
// track if any single instance type met the three pairs of criteria
results.requirementsAndFits = results.requirementsAndFits || (itCompat && itFits && !itHasOffering)
results.requirementsAndOffering = results.requirementsAndOffering || (itCompat && itHasOffering && !itFits)
results.fitsAndOffering = results.fitsAndOffering || (itFits && itHasOffering && !itCompat)
// and if it met all criteria, we keep the instance type and continue filtering. We now won't be reporting
// any errors.
if itCompat && itFits && itHasOffering {
results.remaining = append(results.remaining, it)
}
}
return results
}
func compatible(instanceType *cloudprovider.InstanceType, requirements scheduling.Requirements) bool {
return instanceType.Requirements.Intersects(requirements) == nil
}
func fits(instanceType *cloudprovider.InstanceType, requests v1.ResourceList) bool {
return resources.Fits(requests, instanceType.Allocatable())
}
func hasOffering(instanceType *cloudprovider.InstanceType, requirements scheduling.Requirements) bool {
for _, offering := range instanceType.Offerings.Available() {
if (!requirements.Has(v1.LabelTopologyZone) || requirements.Get(v1.LabelTopologyZone).Has(offering.Zone)) &&
(!requirements.Has(v1alpha5.LabelCapacityType) || requirements.Get(v1alpha5.LabelCapacityType).Has(offering.CapacityType)) {
return true
}
}
return false
}
| 271 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"fmt"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/ptr"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/scheduling"
)
// MachineTemplate encapsulates the fields required to create a node and mirrors
// the fields in Provisioner. These structs are maintained separately in order
// for fields like Requirements to be able to be stored more efficiently.
type MachineTemplate struct {
ProvisionerName string
InstanceTypeOptions cloudprovider.InstanceTypes
Provider *v1alpha5.Provider
ProviderRef *v1alpha5.MachineTemplateRef
Annotations map[string]string
Labels map[string]string
Taints scheduling.Taints
StartupTaints scheduling.Taints
Requirements scheduling.Requirements
Requests v1.ResourceList
Kubelet *v1alpha5.KubeletConfiguration
}
func NewMachineTemplate(provisioner *v1alpha5.Provisioner) *MachineTemplate {
labels := lo.Assign(provisioner.Spec.Labels, map[string]string{v1alpha5.ProvisionerNameLabelKey: provisioner.Name})
requirements := scheduling.NewRequirements()
requirements.Add(scheduling.NewNodeSelectorRequirements(provisioner.Spec.Requirements...).Values()...)
requirements.Add(scheduling.NewLabelRequirements(labels).Values()...)
return &MachineTemplate{
ProvisionerName: provisioner.Name,
Provider: provisioner.Spec.Provider,
ProviderRef: provisioner.Spec.ProviderRef,
Kubelet: provisioner.Spec.KubeletConfiguration,
Annotations: provisioner.Spec.Annotations,
Labels: labels,
Taints: provisioner.Spec.Taints,
StartupTaints: provisioner.Spec.StartupTaints,
Requirements: requirements,
}
}
func (i *MachineTemplate) ToNode() *v1.Node {
return &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: lo.Assign(i.Labels, i.Requirements.Labels()),
Annotations: i.Annotations,
Finalizers: []string{v1alpha5.TerminationFinalizer},
},
Spec: v1.NodeSpec{
Taints: append(i.Taints, i.StartupTaints...),
},
}
}
func (i *MachineTemplate) ToMachine(owner *v1alpha5.Provisioner) *v1alpha5.Machine {
// Order the instance types by price and only take the first 100 of them to decrease the instance type size in the requirements
instanceTypes := lo.Slice(i.InstanceTypeOptions.OrderByPrice(i.Requirements), 0, 100)
i.Requirements.Add(scheduling.NewRequirement(v1.LabelInstanceTypeStable, v1.NodeSelectorOpIn, lo.Map(instanceTypes, func(i *cloudprovider.InstanceType, _ int) string {
return i.Name
})...))
m := &v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
GenerateName: fmt.Sprintf("%s-", i.ProvisionerName),
Annotations: lo.Assign(i.Annotations, v1alpha5.ProviderAnnotation(i.Provider)),
Labels: i.Labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: v1alpha5.SchemeGroupVersion.String(),
Kind: "Provisioner",
Name: owner.Name,
UID: owner.UID,
BlockOwnerDeletion: ptr.Bool(true),
},
},
},
Spec: v1alpha5.MachineSpec{
Taints: i.Taints,
StartupTaints: i.StartupTaints,
Requirements: i.Requirements.NodeSelectorRequirements(),
Kubelet: i.Kubelet,
Resources: v1alpha5.ResourceRequirements{
Requests: i.Requests,
},
MachineTemplateRef: i.ProviderRef,
},
}
return m
}
| 112 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"context"
"fmt"
"sort"
v1 "k8s.io/api/core/v1"
"knative.dev/pkg/logging"
"knative.dev/pkg/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/utils/pretty"
)
type Preferences struct {
// ToleratePreferNoSchedule controls if preference relaxation adds a toleration for PreferNoSchedule taints. This only
// helps if there is a corresponding taint, so we don't always add it.
ToleratePreferNoSchedule bool
}
func (p *Preferences) Relax(ctx context.Context, pod *v1.Pod) bool {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("pod", client.ObjectKeyFromObject(pod)))
relaxations := []func(*v1.Pod) *string{
p.removeRequiredNodeAffinityTerm,
p.removePreferredPodAffinityTerm,
p.removePreferredPodAntiAffinityTerm,
p.removePreferredNodeAffinityTerm,
p.removeTopologySpreadScheduleAnyway}
if p.ToleratePreferNoSchedule {
relaxations = append(relaxations, p.toleratePreferNoScheduleTaints)
}
for _, relaxFunc := range relaxations {
if reason := relaxFunc(pod); reason != nil {
logging.FromContext(ctx).Debugf("relaxing soft constraints for pod since it previously failed to schedule, %s", ptr.StringValue(reason))
return true
}
}
return false
}
func (p *Preferences) removePreferredNodeAffinityTerm(pod *v1.Pod) *string {
if pod.Spec.Affinity == nil || pod.Spec.Affinity.NodeAffinity == nil || len(pod.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution) == 0 {
return nil
}
terms := pod.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution
// Remove the terms if there are any (terms are an OR semantic)
if len(terms) > 0 {
// Sort descending by weight to remove heaviest preferences to try lighter ones
sort.SliceStable(terms, func(i, j int) bool { return terms[i].Weight > terms[j].Weight })
pod.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = terms[1:]
return ptr.String(fmt.Sprintf("removing: spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution[0]=%s", pretty.Concise(terms[0])))
}
return nil
}
func (p *Preferences) removeRequiredNodeAffinityTerm(pod *v1.Pod) *string {
if pod.Spec.Affinity == nil ||
pod.Spec.Affinity.NodeAffinity == nil ||
pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil ||
len(pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) == 0 {
return nil
}
terms := pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
// Remove the first term if there's more than one (terms are an OR semantic), Unlike preferred affinity, we cannot remove all terms
if len(terms) > 1 {
pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = terms[1:]
return ptr.String(fmt.Sprintf("removing: spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution[0]=%s", pretty.Concise(terms[0])))
}
return nil
}
func (p *Preferences) removeTopologySpreadScheduleAnyway(pod *v1.Pod) *string {
for i, tsc := range pod.Spec.TopologySpreadConstraints {
if tsc.WhenUnsatisfiable == v1.ScheduleAnyway {
msg := fmt.Sprintf("removing: spec.topologySpreadConstraints = %s", pretty.Concise(tsc))
pod.Spec.TopologySpreadConstraints[i] = pod.Spec.TopologySpreadConstraints[len(pod.Spec.TopologySpreadConstraints)-1]
pod.Spec.TopologySpreadConstraints = pod.Spec.TopologySpreadConstraints[:len(pod.Spec.TopologySpreadConstraints)-1]
return ptr.String(msg)
}
}
return nil
}
func (p *Preferences) removePreferredPodAffinityTerm(pod *v1.Pod) *string {
if pod.Spec.Affinity == nil || pod.Spec.Affinity.PodAffinity == nil || len(pod.Spec.Affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution) == 0 {
return nil
}
terms := pod.Spec.Affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution
// Remove the all the terms
if len(terms) > 0 {
// Sort descending by weight to remove heaviest preferences to try lighter ones
sort.SliceStable(terms, func(i, j int) bool { return terms[i].Weight > terms[j].Weight })
pod.Spec.Affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution = terms[1:]
return ptr.String(fmt.Sprintf("removing: spec.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution[0]=%s", pretty.Concise(terms[0])))
}
return nil
}
func (p *Preferences) removePreferredPodAntiAffinityTerm(pod *v1.Pod) *string {
if pod.Spec.Affinity == nil || pod.Spec.Affinity.PodAntiAffinity == nil || len(pod.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution) == 0 {
return nil
}
terms := pod.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution
// Remove the all the terms
if len(terms) > 0 {
// Sort descending by weight to remove heaviest preferences to try lighter ones
sort.SliceStable(terms, func(i, j int) bool { return terms[i].Weight > terms[j].Weight })
pod.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = terms[1:]
return ptr.String(fmt.Sprintf("removing: spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0]=%s", pretty.Concise(terms[0])))
}
return nil
}
func (p *Preferences) toleratePreferNoScheduleTaints(pod *v1.Pod) *string {
// Tolerate all Taints with PreferNoSchedule effect
toleration := v1.Toleration{
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectPreferNoSchedule,
}
for _, t := range pod.Spec.Tolerations {
if t.MatchToleration(&toleration) {
return nil
}
}
tolerations := append(pod.Spec.Tolerations, toleration)
pod.Spec.Tolerations = tolerations
return ptr.String("adding: toleration for PreferNoSchedule taints")
}
| 146 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"sort"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"github.com/aws/karpenter-core/pkg/utils/resources"
)
// Queue is a queue of pods that is scheduled. It's used to attempt to schedule pods as long as we are making progress
// in scheduling. This is sometimes required to maintain zonal topology spreads with constrained pods, and can satisfy
// pod affinities that occur in a batch of pods if there are enough constraints provided.
type Queue struct {
pods []*v1.Pod
lastLen map[types.UID]int
}
// NewQueue constructs a new queue given the input pods, sorting them to optimize for bin-packing into nodes.
func NewQueue(pods ...*v1.Pod) *Queue {
sort.Slice(pods, byCPUAndMemoryDescending(pods))
return &Queue{
pods: pods,
lastLen: map[types.UID]int{},
}
}
// Pop returns the next pod or false if no longer making progress
func (q *Queue) Pop() (*v1.Pod, bool) {
if len(q.pods) == 0 {
return nil, false
}
p := q.pods[0]
// If we are about to pop a pod when it was last pushed with the same number of pods in the queue, then
// we've cycled through all pods in the queue without making progress and can stop
if q.lastLen[p.UID] == len(q.pods) {
return nil, false
}
q.pods = q.pods[1:]
return p, true
}
// Push a pod onto the queue, counting each time a pod is immediately requeued. This is used to detect staleness.
func (q *Queue) Push(pod *v1.Pod, relaxed bool) {
q.pods = append(q.pods, pod)
if relaxed {
q.lastLen = map[types.UID]int{}
} else {
q.lastLen[pod.UID] = len(q.pods)
}
}
func (q *Queue) List() []*v1.Pod {
return q.pods
}
func byCPUAndMemoryDescending(pods []*v1.Pod) func(i int, j int) bool {
return func(i, j int) bool {
lhsPod := pods[i]
rhsPod := pods[j]
lhs := resources.RequestsForPods(lhsPod)
rhs := resources.RequestsForPods(rhsPod)
cpuCmp := resources.Cmp(lhs[v1.ResourceCPU], rhs[v1.ResourceCPU])
if cpuCmp < 0 {
// LHS has less CPU, so it should be sorted after
return false
} else if cpuCmp > 0 {
return true
}
memCmp := resources.Cmp(lhs[v1.ResourceMemory], rhs[v1.ResourceMemory])
if memCmp < 0 {
return false
} else if memCmp > 0 {
return true
}
// If all else is equal, give a consistent ordering. This reduces the number of NominatePod events as we
// de-duplicate those based on identical content.
// unfortunately creation timestamp only has a 1-second resolution, so we would still re-order pods created
// during a deployment scale-up if we only looked at creation time
if lhsPod.CreationTimestamp != rhsPod.CreationTimestamp {
return lhsPod.CreationTimestamp.Before(&rhsPod.CreationTimestamp)
}
// pod UIDs aren't in any order, but since we first sort by creation time this only serves to consistently order
// pods created within the same second
return lhsPod.UID < rhsPod.UID
}
}
| 111 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"bytes"
"context"
"fmt"
"sort"
"github.com/samber/lo"
"go.uber.org/multierr"
v1 "k8s.io/api/core/v1"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider"
schedulingevents "github.com/aws/karpenter-core/pkg/controllers/provisioning/scheduling/events"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/events"
"github.com/aws/karpenter-core/pkg/scheduling"
"github.com/aws/karpenter-core/pkg/utils/resources"
)
// SchedulerOptions can be used to control the scheduling, these options are currently only used during consolidation.
type SchedulerOptions struct {
// SimulationMode if true will prevent recording of the pod nomination decisions as events
SimulationMode bool
}
func NewScheduler(ctx context.Context, kubeClient client.Client, machines []*MachineTemplate,
provisioners []v1alpha5.Provisioner, cluster *state.Cluster, stateNodes []*state.StateNode, topology *Topology,
instanceTypes map[string][]*cloudprovider.InstanceType, daemonSetPods []*v1.Pod,
recorder events.Recorder, opts SchedulerOptions) *Scheduler {
// if any of the provisioners add a taint with a prefer no schedule effect, we add a toleration for the taint
// during preference relaxation
toleratePreferNoSchedule := false
for _, prov := range provisioners {
for _, taint := range prov.Spec.Taints {
if taint.Effect == v1.TaintEffectPreferNoSchedule {
toleratePreferNoSchedule = true
}
}
}
s := &Scheduler{
ctx: ctx,
kubeClient: kubeClient,
machineTemplates: machines,
topology: topology,
cluster: cluster,
instanceTypes: instanceTypes,
daemonOverhead: getDaemonOverhead(machines, daemonSetPods),
recorder: recorder,
opts: opts,
preferences: &Preferences{ToleratePreferNoSchedule: toleratePreferNoSchedule},
remainingResources: map[string]v1.ResourceList{},
}
for _, provisioner := range provisioners {
if provisioner.Spec.Limits != nil {
s.remainingResources[provisioner.Name] = provisioner.Spec.Limits.Resources
}
}
s.calculateExistingMachines(stateNodes, daemonSetPods)
return s
}
type Scheduler struct {
ctx context.Context
newMachines []*Machine
existingNodes []*ExistingNode
machineTemplates []*MachineTemplate
remainingResources map[string]v1.ResourceList // provisioner name -> remaining resources for that provisioner
instanceTypes map[string][]*cloudprovider.InstanceType
daemonOverhead map[*MachineTemplate]v1.ResourceList
preferences *Preferences
topology *Topology
cluster *state.Cluster
recorder events.Recorder
opts SchedulerOptions
kubeClient client.Client
}
// Results contains the results of the scheduling operation
type Results struct {
NewMachines []*Machine
ExistingNodes []*ExistingNode
PodErrors map[*v1.Pod]error
}
func (r Results) AllPodsScheduled() bool {
return len(r.PodErrors) == 0
}
// PodSchedulingErrors creates a string that describes why pods wouldn't schedule that is suitable for presentation
func (r Results) PodSchedulingErrors() string {
if len(r.PodErrors) == 0 {
return "No Pod Scheduling Errors"
}
var msg bytes.Buffer
fmt.Fprintf(&msg, "not all pods would schedule, ")
const MaxErrors = 5
numErrors := 0
for k, err := range r.PodErrors {
fmt.Fprintf(&msg, "%s/%s => %s ", k.Namespace, k.Name, err)
numErrors++
if numErrors >= MaxErrors {
fmt.Fprintf(&msg, " and %d other(s)", len(r.PodErrors)-MaxErrors)
break
}
}
return msg.String()
}
func (s *Scheduler) Solve(ctx context.Context, pods []*v1.Pod) (*Results, error) {
// We loop trying to schedule unschedulable pods as long as we are making progress. This solves a few
// issues including pods with affinity to another pod in the batch. We could topo-sort to solve this, but it wouldn't
// solve the problem of scheduling pods where a particular order is needed to prevent a max-skew violation. E.g. if we
// had 5xA pods and 5xB pods were they have a zonal topology spread, but A can only go in one zone and B in another.
// We need to schedule them alternating, A, B, A, B, .... and this solution also solves that as well.
errors := map[*v1.Pod]error{}
q := NewQueue(pods...)
for {
// Try the next pod
pod, ok := q.Pop()
if !ok {
break
}
// Schedule to existing nodes or create a new node
if errors[pod] = s.add(ctx, pod); errors[pod] == nil {
continue
}
// If unsuccessful, relax the pod and recompute topology
relaxed := s.preferences.Relax(ctx, pod)
q.Push(pod, relaxed)
if relaxed {
if err := s.topology.Update(ctx, pod); err != nil {
logging.FromContext(ctx).Errorf("updating topology, %s", err)
}
}
}
for _, m := range s.newMachines {
m.FinalizeScheduling()
}
if !s.opts.SimulationMode {
s.recordSchedulingResults(ctx, pods, q.List(), errors)
}
// clear any nil errors so we can know that len(PodErrors) == 0 => all pods scheduled
for k, v := range errors {
if v == nil {
delete(errors, k)
}
}
return &Results{
NewMachines: s.newMachines,
ExistingNodes: s.existingNodes,
PodErrors: errors,
}, nil
}
func (s *Scheduler) recordSchedulingResults(ctx context.Context, pods []*v1.Pod, failedToSchedule []*v1.Pod, errors map[*v1.Pod]error) {
// Report failures and nominations
for _, pod := range failedToSchedule {
logging.FromContext(ctx).With("pod", client.ObjectKeyFromObject(pod)).Errorf("Could not schedule pod, %s", errors[pod])
s.recorder.Publish(schedulingevents.PodFailedToSchedule(pod, errors[pod]))
}
for _, existing := range s.existingNodes {
if len(existing.Pods) > 0 {
s.cluster.NominateNodeForPod(ctx, existing.Name())
}
for _, pod := range existing.Pods {
s.recorder.Publish(schedulingevents.NominatePod(pod, existing.Node, existing.Machine))
}
}
// Report new nodes, or exit to avoid log spam
newCount := 0
for _, machine := range s.newMachines {
newCount += len(machine.Pods)
}
if newCount == 0 {
return
}
logging.FromContext(ctx).With("pods", len(pods)).Infof("found provisionable pod(s)")
logging.FromContext(ctx).With("machines", len(s.newMachines), "pods", newCount).Infof("computed new machine(s) to fit pod(s)")
// Report in flight newNodes, or exit to avoid log spam
inflightCount := 0
existingCount := 0
for _, node := range lo.Filter(s.existingNodes, func(node *ExistingNode, _ int) bool { return len(node.Pods) > 0 }) {
inflightCount++
existingCount += len(node.Pods)
}
if existingCount == 0 {
return
}
logging.FromContext(ctx).Infof("computed %d unready node(s) will fit %d pod(s)", inflightCount, existingCount)
}
func (s *Scheduler) add(ctx context.Context, pod *v1.Pod) error {
// first try to schedule against an in-flight real node
for _, node := range s.existingNodes {
if err := node.Add(ctx, s.kubeClient, pod); err == nil {
return nil
}
}
// Consider using https://pkg.go.dev/container/heap
sort.Slice(s.newMachines, func(a, b int) bool { return len(s.newMachines[a].Pods) < len(s.newMachines[b].Pods) })
// Pick existing node that we are about to create
for _, machine := range s.newMachines {
if err := machine.Add(ctx, pod); err == nil {
return nil
}
}
// Create new node
var errs error
for _, machineTemplate := range s.machineTemplates {
instanceTypes := s.instanceTypes[machineTemplate.ProvisionerName]
// if limits have been applied to the provisioner, ensure we filter instance types to avoid violating those limits
if remaining, ok := s.remainingResources[machineTemplate.ProvisionerName]; ok {
instanceTypes = filterByRemainingResources(s.instanceTypes[machineTemplate.ProvisionerName], remaining)
if len(instanceTypes) == 0 {
errs = multierr.Append(errs, fmt.Errorf("all available instance types exceed limits for provisioner: %q", machineTemplate.ProvisionerName))
continue
} else if len(s.instanceTypes[machineTemplate.ProvisionerName]) != len(instanceTypes) && !s.opts.SimulationMode {
logging.FromContext(ctx).With("provisioner", machineTemplate.ProvisionerName).Debugf("%d out of %d instance types were excluded because they would breach provisioner limits",
len(s.instanceTypes[machineTemplate.ProvisionerName])-len(instanceTypes), len(s.instanceTypes[machineTemplate.ProvisionerName]))
}
}
machine := NewMachine(machineTemplate, s.topology, s.daemonOverhead[machineTemplate], instanceTypes)
if err := machine.Add(ctx, pod); err != nil {
errs = multierr.Append(errs, fmt.Errorf("incompatible with provisioner %q, daemonset overhead=%s, %w",
machineTemplate.ProvisionerName,
resources.String(s.daemonOverhead[machineTemplate]),
err))
continue
}
// we will launch this machine and need to track its maximum possible resource usage against our remaining resources
s.newMachines = append(s.newMachines, machine)
s.remainingResources[machineTemplate.ProvisionerName] = subtractMax(s.remainingResources[machineTemplate.ProvisionerName], machine.InstanceTypeOptions)
return nil
}
return errs
}
func (s *Scheduler) calculateExistingMachines(stateNodes []*state.StateNode, daemonSetPods []*v1.Pod) {
// create our existing nodes
for _, node := range stateNodes {
if !node.Owned() {
// ignoring this node as it wasn't launched by us
continue
}
// Calculate any daemonsets that should schedule to the inflight node
var daemons []*v1.Pod
for _, p := range daemonSetPods {
if err := scheduling.Taints(node.Taints()).Tolerates(p); err != nil {
continue
}
if err := scheduling.NewLabelRequirements(node.Labels()).Compatible(scheduling.NewPodRequirements(p)); err != nil {
continue
}
daemons = append(daemons, p)
}
s.existingNodes = append(s.existingNodes, NewExistingNode(node, s.topology, resources.RequestsForPods(daemons...)))
// We don't use the status field and instead recompute the remaining resources to ensure we have a consistent view
// of the cluster during scheduling. Depending on how node creation falls out, this will also work for cases where
// we don't create Machine resources.
if _, ok := s.remainingResources[node.Labels()[v1alpha5.ProvisionerNameLabelKey]]; ok {
s.remainingResources[node.Labels()[v1alpha5.ProvisionerNameLabelKey]] = resources.Subtract(s.remainingResources[node.Labels()[v1alpha5.ProvisionerNameLabelKey]], node.Capacity())
}
}
// Order the existing nodes for scheduling with initialized nodes first
// This is done specifically for consolidation where we want to make sure we schedule to initialized nodes
// before we attempt to schedule un-initialized ones
sort.SliceStable(s.existingNodes, func(i, j int) bool {
if s.existingNodes[i].Initialized() && !s.existingNodes[j].Initialized() {
return true
}
if !s.existingNodes[i].Initialized() && s.existingNodes[j].Initialized() {
return false
}
return s.existingNodes[i].Name() < s.existingNodes[j].Name()
})
}
func getDaemonOverhead(nodeTemplates []*MachineTemplate, daemonSetPods []*v1.Pod) map[*MachineTemplate]v1.ResourceList {
overhead := map[*MachineTemplate]v1.ResourceList{}
for _, nodeTemplate := range nodeTemplates {
var daemons []*v1.Pod
for _, p := range daemonSetPods {
if err := nodeTemplate.Taints.Tolerates(p); err != nil {
continue
}
if err := nodeTemplate.Requirements.Compatible(scheduling.NewPodRequirements(p)); err != nil {
continue
}
daemons = append(daemons, p)
}
overhead[nodeTemplate] = resources.RequestsForPods(daemons...)
}
return overhead
}
// subtractMax returns the remaining resources after subtracting the max resource quantity per instance type. To avoid
// overshooting out, we need to pessimistically assume that if e.g. we request a 2, 4 or 8 CPU instance type
// that the 8 CPU instance type is all that will be available. This could cause a batch of pods to take multiple rounds
// to schedule.
func subtractMax(remaining v1.ResourceList, instanceTypes []*cloudprovider.InstanceType) v1.ResourceList {
// shouldn't occur, but to be safe
if len(instanceTypes) == 0 {
return remaining
}
var allInstanceResources []v1.ResourceList
for _, it := range instanceTypes {
allInstanceResources = append(allInstanceResources, it.Capacity)
}
result := v1.ResourceList{}
itResources := resources.MaxResources(allInstanceResources...)
for k, v := range remaining {
cp := v.DeepCopy()
cp.Sub(itResources[k])
result[k] = cp
}
return result
}
// filterByRemainingResources is used to filter out instance types that if launched would exceed the provisioner limits
func filterByRemainingResources(instanceTypes []*cloudprovider.InstanceType, remaining v1.ResourceList) []*cloudprovider.InstanceType {
var filtered []*cloudprovider.InstanceType
for _, it := range instanceTypes {
itResources := it.Capacity
viableInstance := true
for resourceName, remainingQuantity := range remaining {
// if the instance capacity is greater than the remaining quantity for this resource
if resources.Cmp(itResources[resourceName], remainingQuantity) > 0 {
viableInstance = false
}
}
if viableInstance {
filtered = append(filtered, it)
}
}
return filtered
}
| 368 |
karpenter-core | aws | Go | //go:build test_performance
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling_test
import (
"context"
"fmt"
"math"
"math/rand"
"os"
"runtime/pprof"
"testing"
"text/tabwriter"
"time"
"github.com/samber/lo"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/client-go/tools/record"
"k8s.io/utils/clock"
"github.com/aws/karpenter-core/pkg/apis/settings"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/cloudprovider/fake"
"github.com/aws/karpenter-core/pkg/controllers/provisioning/scheduling"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/events"
"github.com/aws/karpenter-core/pkg/test"
"go.uber.org/zap"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/logging"
v1 "k8s.io/api/core/v1"
)
const MinPodsPerSec = 100.0
const PrintStats = false
//nolint:gosec
var r = rand.New(rand.NewSource(42))
func BenchmarkScheduling1(b *testing.B) {
benchmarkScheduler(b, 400, 1)
}
func BenchmarkScheduling50(b *testing.B) {
benchmarkScheduler(b, 400, 50)
}
func BenchmarkScheduling100(b *testing.B) {
benchmarkScheduler(b, 400, 100)
}
func BenchmarkScheduling500(b *testing.B) {
benchmarkScheduler(b, 400, 500)
}
func BenchmarkScheduling1000(b *testing.B) {
benchmarkScheduler(b, 400, 1000)
}
func BenchmarkScheduling2000(b *testing.B) {
benchmarkScheduler(b, 400, 2000)
}
func BenchmarkScheduling5000(b *testing.B) {
benchmarkScheduler(b, 400, 5000)
}
// TestSchedulingProfile is used to gather profiling metrics, benchmarking is primarily done with standard
// Go benchmark functions
// go test -tags=test_performance -run=SchedulingProfile
func TestSchedulingProfile(t *testing.T) {
tw := tabwriter.NewWriter(os.Stdout, 8, 8, 2, ' ', 0)
cpuf, err := os.Create("schedule.cpuprofile")
if err != nil {
t.Fatalf("error creating CPU profile: %s", err)
}
lo.Must0(pprof.StartCPUProfile(cpuf))
defer pprof.StopCPUProfile()
heapf, err := os.Create("schedule.heapprofile")
if err != nil {
t.Fatalf("error creating heap profile: %s", err)
}
defer lo.Must0(pprof.WriteHeapProfile(heapf))
totalPods := 0
totalNodes := 0
var totalTime time.Duration
for _, instanceCount := range []int{400} {
for _, podCount := range []int{10, 100, 500, 1000, 1500, 2000, 2500} {
start := time.Now()
res := testing.Benchmark(func(b *testing.B) { benchmarkScheduler(b, instanceCount, podCount) })
totalTime += time.Since(start) / time.Duration(res.N)
nodeCount := res.Extra["nodes"]
fmt.Fprintf(tw, "%d instances %d pods\t%d nodes\t%s per scheduling\t%s per pod\n", instanceCount, podCount, int(nodeCount), time.Duration(res.NsPerOp()), time.Duration(res.NsPerOp()/int64(podCount)))
totalPods += podCount
totalNodes += int(nodeCount)
}
}
fmt.Println("scheduled", totalPods, "against", totalNodes, "nodes in total in", totalTime, float64(totalPods)/totalTime.Seconds(), "pods/sec")
tw.Flush()
}
func benchmarkScheduler(b *testing.B, instanceCount, podCount int) {
// disable logging
ctx := logging.WithLogger(context.Background(), zap.NewNop().Sugar())
ctx = settings.ToContext(ctx, test.Settings())
provisioner = test.Provisioner(test.ProvisionerOptions{Limits: map[v1.ResourceName]resource.Quantity{}})
instanceTypes := fake.InstanceTypes(instanceCount)
cloudProvider = fake.NewCloudProvider()
cloudProvider.InstanceTypes = instanceTypes
scheduler := scheduling.NewScheduler(ctx, nil, []*scheduling.MachineTemplate{scheduling.NewMachineTemplate(provisioner)},
nil, state.NewCluster(&clock.RealClock{}, nil, cloudProvider), nil, &scheduling.Topology{},
map[string][]*cloudprovider.InstanceType{provisioner.Name: instanceTypes}, nil,
events.NewRecorder(&record.FakeRecorder{}),
scheduling.SchedulerOptions{})
pods := makeDiversePods(podCount)
b.ResetTimer()
// Pack benchmark
start := time.Now()
podsScheduledInRound1 := 0
nodesInRound1 := 0
for i := 0; i < b.N; i++ {
results, err := scheduler.Solve(ctx, pods)
if err != nil {
b.FailNow()
}
if i == 0 {
minPods := math.MaxInt64
maxPods := 0
var podCounts []int
for _, n := range results.NewMachines {
podCounts = append(podCounts, len(n.Pods))
podsScheduledInRound1 += len(n.Pods)
nodesInRound1 = len(results.NewMachines)
if len(n.Pods) > maxPods {
maxPods = len(n.Pods)
}
if len(n.Pods) < minPods {
minPods = len(n.Pods)
}
}
if PrintStats {
meanPodsPerNode := float64(podsScheduledInRound1) / float64(nodesInRound1)
variance := 0.0
for _, pc := range podCounts {
variance += math.Pow(float64(pc)-meanPodsPerNode, 2.0)
}
variance /= float64(nodesInRound1)
stddev := math.Sqrt(variance)
fmt.Printf("%d instance types %d pods resulted in %d nodes with pods per node min=%d max=%d mean=%f stddev=%f\n",
instanceCount, podCount, nodesInRound1, minPods, maxPods, meanPodsPerNode, stddev)
}
}
}
duration := time.Since(start)
podsPerSec := float64(len(pods)) / (duration.Seconds() / float64(b.N))
b.ReportMetric(podsPerSec, "pods/sec")
b.ReportMetric(float64(podsScheduledInRound1), "pods")
b.ReportMetric(float64(nodesInRound1), "nodes")
// we don't care if it takes a bit of time to schedule a few pods as there is some setup time required for sorting
// instance types, computing topologies, etc. We want to ensure that the larger batches of pods don't become too
// slow.
if len(pods) > 100 {
if podsPerSec < MinPodsPerSec {
b.Fatalf("scheduled %f pods/sec, expected at least %f", podsPerSec, MinPodsPerSec)
}
}
}
func makeDiversePods(count int) []*v1.Pod {
var pods []*v1.Pod
pods = append(pods, makeGenericPods(count/7)...)
pods = append(pods, makeTopologySpreadPods(count/7, v1.LabelTopologyZone)...)
pods = append(pods, makeTopologySpreadPods(count/7, v1.LabelHostname)...)
pods = append(pods, makePodAffinityPods(count/7, v1.LabelHostname)...)
pods = append(pods, makePodAffinityPods(count/7, v1.LabelTopologyZone)...)
// fill out due to count being not evenly divisible with generic pods
nRemaining := count - len(pods)
pods = append(pods, makeGenericPods(nRemaining)...)
return pods
}
func makePodAffinityPods(count int, key string) []*v1.Pod {
var pods []*v1.Pod
for i := 0; i < count; i++ {
pods = append(pods, test.Pod(
test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: randomAffinityLabels()},
PodRequirements: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{MatchLabels: randomAffinityLabels()},
TopologyKey: key,
},
},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: randomCPU(),
v1.ResourceMemory: randomMemory(),
},
}}))
}
return pods
}
func makeTopologySpreadPods(count int, key string) []*v1.Pod {
var pods []*v1.Pod
for i := 0; i < count; i++ {
pods = append(pods, test.Pod(
test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: randomLabels()},
TopologySpreadConstraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: key,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{
MatchLabels: randomLabels(),
},
},
},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: randomCPU(),
v1.ResourceMemory: randomMemory(),
},
}}))
}
return pods
}
func makeGenericPods(count int) []*v1.Pod {
var pods []*v1.Pod
for i := 0; i < count; i++ {
pods = append(pods, test.Pod(
test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: randomLabels()},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: randomCPU(),
v1.ResourceMemory: randomMemory(),
},
}}))
}
return pods
}
func randomAffinityLabels() map[string]string {
return map[string]string{
"my-affininity": randomLabelValue(),
}
}
func randomLabels() map[string]string {
return map[string]string{
"my-label": randomLabelValue(),
}
}
func randomLabelValue() string {
labelValues := []string{"a", "b", "c", "d", "e", "f", "g"}
return labelValues[r.Intn(len(labelValues))]
}
func randomMemory() resource.Quantity {
mem := []int{100, 256, 512, 1024, 2048, 4096}
return resource.MustParse(fmt.Sprintf("%dMi", mem[r.Intn(len(mem))]))
}
func randomCPU() resource.Quantity {
cpu := []int{100, 250, 500, 1000, 1500}
return resource.MustParse(fmt.Sprintf("%dm", cpu[r.Intn(len(cpu))]))
}
| 291 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//nolint:gosec
package scheduling_test
import (
"context"
"fmt"
"math"
"math/rand"
"testing"
"time"
"github.com/samber/lo"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
cloudproviderapi "k8s.io/cloud-provider/api"
clock "k8s.io/utils/clock/testing"
"knative.dev/pkg/ptr"
v1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/apis"
"github.com/aws/karpenter-core/pkg/apis/settings"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/cloudprovider/fake"
"github.com/aws/karpenter-core/pkg/controllers/provisioning"
"github.com/aws/karpenter-core/pkg/controllers/provisioning/scheduling"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/controllers/state/informer"
"github.com/aws/karpenter-core/pkg/events"
"github.com/aws/karpenter-core/pkg/operator/controller"
"github.com/aws/karpenter-core/pkg/operator/scheme"
pscheduling "github.com/aws/karpenter-core/pkg/scheduling"
"github.com/aws/karpenter-core/pkg/test"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "knative.dev/pkg/logging/testing"
. "github.com/aws/karpenter-core/pkg/test/expectations"
)
var ctx context.Context
var provisioner *v1alpha5.Provisioner
var prov *provisioning.Provisioner
var env *test.Environment
var fakeClock *clock.FakeClock
var cluster *state.Cluster
var cloudProvider *fake.CloudProvider
var nodeStateController controller.Controller
var machineStateController controller.Controller
var podStateController controller.Controller
const csiProvider = "fake.csi.provider"
func TestScheduling(t *testing.T) {
ctx = TestContextWithLogger(t)
RegisterFailHandler(Fail)
RunSpecs(t, "Controllers/Scheduling")
}
var _ = BeforeSuite(func() {
env = test.NewEnvironment(scheme.Scheme, test.WithCRDs(apis.CRDs...))
ctx = settings.ToContext(ctx, test.Settings())
cloudProvider = fake.NewCloudProvider()
instanceTypes, _ := cloudProvider.GetInstanceTypes(ctx, nil)
// set these on the cloud provider, so we can manipulate them if needed
cloudProvider.InstanceTypes = instanceTypes
fakeClock = clock.NewFakeClock(time.Now())
cluster = state.NewCluster(fakeClock, env.Client, cloudProvider)
machineStateController = informer.NewMachineController(env.Client, cluster)
nodeStateController = informer.NewNodeController(env.Client, cluster)
machineStateController = informer.NewMachineController(env.Client, cluster)
podStateController = informer.NewPodController(env.Client, cluster)
prov = provisioning.NewProvisioner(env.Client, env.KubernetesInterface.CoreV1(), events.NewRecorder(&record.FakeRecorder{}), cloudProvider, cluster)
})
var _ = AfterSuite(func() {
Expect(env.Stop()).To(Succeed(), "Failed to stop environment")
})
var _ = BeforeEach(func() {
provisioner = test.Provisioner(test.ProvisionerOptions{Requirements: []v1.NodeSelectorRequirement{{
Key: v1alpha5.LabelCapacityType,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.CapacityTypeSpot, v1alpha5.CapacityTypeOnDemand},
}}})
// reset instance types
newCP := fake.CloudProvider{}
cloudProvider.InstanceTypes, _ = newCP.GetInstanceTypes(context.Background(), nil)
cloudProvider.CreateCalls = nil
})
var _ = AfterEach(func() {
ExpectCleanedUp(ctx, env.Client)
cluster.Reset()
})
var _ = Describe("Custom Constraints", func() {
Context("Provisioner with Labels", func() {
It("should schedule unconstrained pods that don't have matching node selectors", func() {
provisioner.Spec.Labels = map[string]string{"test-key": "test-value"}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue("test-key", "test-value"))
})
It("should not schedule pods that have conflicting node selectors", func() {
provisioner.Spec.Labels = map[string]string{"test-key": "test-value"}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeSelector: map[string]string{"test-key": "different-value"}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should not schedule pods that have node selectors with undefined key", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeSelector: map[string]string{"test-key": "test-value"}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should schedule pods that have matching requirements", func() {
provisioner.Spec.Labels = map[string]string{"test-key": "test-value"}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value", "another-value"}},
}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue("test-key", "test-value"))
})
It("should not schedule pods that have conflicting requirements", func() {
provisioner.Spec.Labels = map[string]string{"test-key": "test-value"}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"another-value"}},
}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
})
Context("Well Known Labels", func() {
It("should use provisioner constraints", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2"}}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-2"))
})
It("should use node selectors", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2"}}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-2"}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-2"))
})
It("should not schedule nodes with a hostname selector", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeSelector: map[string]string{v1.LabelHostname: "red-node"}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should not schedule the pod if nodeselector unknown", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeSelector: map[string]string{v1.LabelTopologyZone: "unknown"}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should not schedule if node selector outside of provisioner constraints", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-2"}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should schedule compatible requirements with Operator=In", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-3"}},
}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-3"))
})
It("should schedule compatible requirements with Operator=Gt", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{{
Key: fake.IntegerInstanceLabelKey, Operator: v1.NodeSelectorOpGt, Values: []string{"8"},
}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(fake.IntegerInstanceLabelKey, "16"))
})
It("should schedule compatible requirements with Operator=Lt", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{{
Key: fake.IntegerInstanceLabelKey, Operator: v1.NodeSelectorOpLt, Values: []string{"8"},
}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(fake.IntegerInstanceLabelKey, "2"))
})
It("should not schedule incompatible preferences and requirements with Operator=In", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"unknown"}},
}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should schedule compatible requirements with Operator=NotIn", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpNotIn, Values: []string{"test-zone-1", "test-zone-2", "unknown"}},
}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-3"))
})
It("should not schedule incompatible preferences and requirements with Operator=NotIn", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{
NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpNotIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3", "unknown"}},
}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should schedule compatible preferences and requirements with Operator=In", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{
NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3", "unknown"}}},
NodePreferences: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2", "unknown"}}},
},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-2"))
})
It("should schedule incompatible preferences and requirements with Operator=In", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{
NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3", "unknown"}}},
NodePreferences: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"unknown"}}},
},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectScheduled(ctx, env.Client, pod)
})
It("should schedule compatible preferences and requirements with Operator=NotIn", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{
NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3", "unknown"}}},
NodePreferences: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpNotIn, Values: []string{"test-zone-1", "test-zone-3"}}},
},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-2"))
})
It("should schedule incompatible preferences and requirements with Operator=NotIn", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{
NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3", "unknown"}}},
NodePreferences: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpNotIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3"}}},
},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectScheduled(ctx, env.Client, pod)
})
It("should schedule compatible node selectors, preferences and requirements", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{
NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-3"},
NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3"}}},
NodePreferences: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3"}}},
},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-3"))
})
It("should combine multidimensional node selectors, preferences and requirements", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{
NodeSelector: map[string]string{
v1.LabelTopologyZone: "test-zone-3",
v1.LabelInstanceTypeStable: "arm-instance-type",
},
NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-3"}},
{Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpIn, Values: []string{"default-instance-type", "arm-instance-type"}},
},
NodePreferences: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpNotIn, Values: []string{"unknown"}},
{Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpNotIn, Values: []string{"unknown"}},
},
},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-3"))
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelInstanceTypeStable, "arm-instance-type"))
})
})
Context("Constraints Validation", func() {
It("should not schedule pods that have node selectors with restricted labels", func() {
ExpectApplied(ctx, env.Client, provisioner)
for label := range v1alpha5.RestrictedLabels {
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: label, Operator: v1.NodeSelectorOpIn, Values: []string{"test"}},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
}
})
It("should not schedule pods that have node selectors with restricted domains", func() {
ExpectApplied(ctx, env.Client, provisioner)
for domain := range v1alpha5.RestrictedLabelDomains {
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: domain + "/test", Operator: v1.NodeSelectorOpIn, Values: []string{"test"}},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
}
})
It("should schedule pods that have node selectors with label in restricted domains exceptions list", func() {
var requirements []v1.NodeSelectorRequirement
for domain := range v1alpha5.LabelDomainExceptions {
requirements = append(requirements, v1.NodeSelectorRequirement{Key: domain + "/test", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}})
}
provisioner.Spec.Requirements = requirements
ExpectApplied(ctx, env.Client, provisioner)
for domain := range v1alpha5.LabelDomainExceptions {
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(domain+"/test", "test-value"))
}
})
It("should schedule pods that have node selectors with label in wellknown label list", func() {
schedulable := []*v1.Pod{
// Constrained by zone
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-1"}}),
// Constrained by instanceType
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1.LabelInstanceTypeStable: "default-instance-type"}}),
// Constrained by architecture
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1.LabelArchStable: "arm64"}}),
// Constrained by operatingSystem
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1.LabelOSStable: string(v1.Linux)}}),
// Constrained by capacity type
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1alpha5.LabelCapacityType: "spot"}}),
}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, schedulable...)
for _, pod := range schedulable {
ExpectScheduled(ctx, env.Client, pod)
}
})
})
Context("Scheduling Logic", func() {
It("should not schedule pods that have node selectors with In operator and undefined key", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should schedule pods that have node selectors with NotIn operator and undefined key", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpNotIn, Values: []string{"test-value"}},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).ToNot(HaveKeyWithValue("test-key", "test-value"))
})
It("should not schedule pods that have node selectors with Exists operator and undefined key", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpExists},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should schedule pods that with DoesNotExists operator and undefined key", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpDoesNotExist},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).ToNot(HaveKey("test-key"))
})
It("should schedule unconstrained pods that don't have matching node selectors", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue("test-key", "test-value"))
})
It("should schedule pods that have node selectors with matching value and In operator", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue("test-key", "test-value"))
})
It("should not schedule pods that have node selectors with matching value and NotIn operator", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpNotIn, Values: []string{"test-value"}},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should schedule the pod with Exists operator and defined key", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpExists},
}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectScheduled(ctx, env.Client, pod)
})
It("should not schedule the pod with DoesNotExists operator and defined key", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpDoesNotExist},
}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should not schedule pods that have node selectors with different value and In operator", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"another-value"}},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should schedule pods that have node selectors with different value and NotIn operator", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpNotIn, Values: []string{"another-value"}},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue("test-key", "test-value"))
})
It("should schedule compatible pods to the same node", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value", "another-value"}}}
ExpectApplied(ctx, env.Client, provisioner)
pods := []*v1.Pod{
test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}},
}}),
test.UnschedulablePod(test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpNotIn, Values: []string{"another-value"}},
}}),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
node1 := ExpectScheduled(ctx, env.Client, pods[0])
node2 := ExpectScheduled(ctx, env.Client, pods[1])
Expect(node1.Labels).To(HaveKeyWithValue("test-key", "test-value"))
Expect(node2.Labels).To(HaveKeyWithValue("test-key", "test-value"))
Expect(node1.Name).To(Equal(node2.Name))
})
It("should schedule incompatible pods to the different node", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value", "another-value"}}}
ExpectApplied(ctx, env.Client, provisioner)
pods := []*v1.Pod{
test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}},
}}),
test.UnschedulablePod(test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"another-value"}},
}}),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
node1 := ExpectScheduled(ctx, env.Client, pods[0])
node2 := ExpectScheduled(ctx, env.Client, pods[1])
Expect(node1.Labels).To(HaveKeyWithValue("test-key", "test-value"))
Expect(node2.Labels).To(HaveKeyWithValue("test-key", "another-value"))
Expect(node1.Name).ToNot(Equal(node2.Name))
})
It("Exists operator should not overwrite the existing value", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{
NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"non-existent-zone"}},
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpExists},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
})
Context("Well Known Labels", func() {
It("should use provisioner constraints", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2"}}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-2"))
})
It("should use node selectors", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2"}}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-2"}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-2"))
})
It("should not schedule nodes with a hostname selector", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeSelector: map[string]string{v1.LabelHostname: "red-node"}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should not schedule the pod if nodeselector unknown", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeSelector: map[string]string{v1.LabelTopologyZone: "unknown"}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should not schedule if node selector outside of provisioner constraints", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-2"}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should schedule compatible requirements with Operator=In", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-3"}},
}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-3"))
})
It("should schedule compatible requirements with Operator=Gt", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{{
Key: fake.IntegerInstanceLabelKey, Operator: v1.NodeSelectorOpGt, Values: []string{"8"},
}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(fake.IntegerInstanceLabelKey, "16"))
})
It("should schedule compatible requirements with Operator=Lt", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{{
Key: fake.IntegerInstanceLabelKey, Operator: v1.NodeSelectorOpLt, Values: []string{"8"},
}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(fake.IntegerInstanceLabelKey, "2"))
})
It("should not schedule incompatible preferences and requirements with Operator=In", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"unknown"}},
}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should schedule compatible requirements with Operator=NotIn", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpNotIn, Values: []string{"test-zone-1", "test-zone-2", "unknown"}},
}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-3"))
})
It("should not schedule incompatible preferences and requirements with Operator=NotIn", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{
NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpNotIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3", "unknown"}},
}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should schedule compatible preferences and requirements with Operator=In", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{
NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3", "unknown"}}},
NodePreferences: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2", "unknown"}}},
},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-2"))
})
It("should schedule incompatible preferences and requirements with Operator=In", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{
NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3", "unknown"}}},
NodePreferences: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"unknown"}}},
},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectScheduled(ctx, env.Client, pod)
})
It("should schedule compatible preferences and requirements with Operator=NotIn", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{
NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3", "unknown"}}},
NodePreferences: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpNotIn, Values: []string{"test-zone-1", "test-zone-3"}}},
},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-2"))
})
It("should schedule incompatible preferences and requirements with Operator=NotIn", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{
NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3", "unknown"}}},
NodePreferences: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpNotIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3"}}},
},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectScheduled(ctx, env.Client, pod)
})
It("should schedule compatible node selectors, preferences and requirements", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{
NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-3"},
NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3"}}},
NodePreferences: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3"}}},
},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-3"))
})
It("should combine multidimensional node selectors, preferences and requirements", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{
NodeSelector: map[string]string{
v1.LabelTopologyZone: "test-zone-3",
v1.LabelInstanceTypeStable: "arm-instance-type",
},
NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-3"}},
{Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpIn, Values: []string{"default-instance-type", "arm-instance-type"}},
},
NodePreferences: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpNotIn, Values: []string{"unknown"}},
{Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpNotIn, Values: []string{"unknown"}},
},
},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-3"))
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelInstanceTypeStable, "arm-instance-type"))
})
})
Context("Constraints Validation", func() {
It("should not schedule pods that have node selectors with restricted labels", func() {
ExpectApplied(ctx, env.Client, provisioner)
for label := range v1alpha5.RestrictedLabels {
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: label, Operator: v1.NodeSelectorOpIn, Values: []string{"test"}},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
}
})
It("should not schedule pods that have node selectors with restricted domains", func() {
ExpectApplied(ctx, env.Client, provisioner)
for domain := range v1alpha5.RestrictedLabelDomains {
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: domain + "/test", Operator: v1.NodeSelectorOpIn, Values: []string{"test"}},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
}
})
It("should schedule pods that have node selectors with label in restricted domains exceptions list", func() {
var requirements []v1.NodeSelectorRequirement
for domain := range v1alpha5.LabelDomainExceptions {
requirements = append(requirements, v1.NodeSelectorRequirement{Key: domain + "/test", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}})
}
provisioner.Spec.Requirements = requirements
ExpectApplied(ctx, env.Client, provisioner)
for domain := range v1alpha5.LabelDomainExceptions {
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(domain+"/test", "test-value"))
}
})
It("should schedule pods that have node selectors with label in wellknown label list", func() {
schedulable := []*v1.Pod{
// Constrained by zone
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-1"}}),
// Constrained by instanceType
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1.LabelInstanceTypeStable: "default-instance-type"}}),
// Constrained by architecture
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1.LabelArchStable: "arm64"}}),
// Constrained by operatingSystem
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1.LabelOSStable: string(v1.Linux)}}),
// Constrained by capacity type
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1alpha5.LabelCapacityType: "spot"}}),
}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, schedulable...)
for _, pod := range schedulable {
ExpectScheduled(ctx, env.Client, pod)
}
})
})
Context("Scheduling Logic", func() {
It("should not schedule pods that have node selectors with In operator and undefined key", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should schedule pods that have node selectors with NotIn operator and undefined key", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpNotIn, Values: []string{"test-value"}},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).ToNot(HaveKeyWithValue("test-key", "test-value"))
})
It("should not schedule pods that have node selectors with Exists operator and undefined key", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpExists},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should schedule pods that with DoesNotExists operator and undefined key", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpDoesNotExist},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).ToNot(HaveKey("test-key"))
})
It("should schedule unconstrained pods that don't have matching node selectors", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue("test-key", "test-value"))
})
It("should schedule pods that have node selectors with matching value and In operator", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue("test-key", "test-value"))
})
It("should not schedule pods that have node selectors with matching value and NotIn operator", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpNotIn, Values: []string{"test-value"}},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should schedule the pod with Exists operator and defined key", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpExists},
}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectScheduled(ctx, env.Client, pod)
})
It("should not schedule the pod with DoesNotExists operator and defined key", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpDoesNotExist},
}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should not schedule pods that have node selectors with different value and In operator", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"another-value"}},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should schedule pods that have node selectors with different value and NotIn operator", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpNotIn, Values: []string{"another-value"}},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue("test-key", "test-value"))
})
It("should schedule compatible pods to the same node", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value", "another-value"}}}
ExpectApplied(ctx, env.Client, provisioner)
pods := []*v1.Pod{
test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}},
}}),
test.UnschedulablePod(test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpNotIn, Values: []string{"another-value"}},
}}),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
node1 := ExpectScheduled(ctx, env.Client, pods[0])
node2 := ExpectScheduled(ctx, env.Client, pods[1])
Expect(node1.Labels).To(HaveKeyWithValue("test-key", "test-value"))
Expect(node2.Labels).To(HaveKeyWithValue("test-key", "test-value"))
Expect(node1.Name).To(Equal(node2.Name))
})
It("should schedule incompatible pods to the different node", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value", "another-value"}}}
ExpectApplied(ctx, env.Client, provisioner)
pods := []*v1.Pod{
test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"test-value"}},
}}),
test.UnschedulablePod(
test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
{Key: "test-key", Operator: v1.NodeSelectorOpIn, Values: []string{"another-value"}},
}}),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
node1 := ExpectScheduled(ctx, env.Client, pods[0])
node2 := ExpectScheduled(ctx, env.Client, pods[1])
Expect(node1.Labels).To(HaveKeyWithValue("test-key", "test-value"))
Expect(node2.Labels).To(HaveKeyWithValue("test-key", "another-value"))
Expect(node1.Name).ToNot(Equal(node2.Name))
})
It("Exists operator should not overwrite the existing value", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{
NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"non-existent-zone"}},
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpExists},
}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
})
})
var _ = Describe("Preferential Fallback", func() {
Context("Required", func() {
It("should not relax the final term", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}},
{Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpIn, Values: []string{"default-instance-type"}},
}
pod := test.UnschedulablePod()
pod.Spec.Affinity = &v1.Affinity{NodeAffinity: &v1.NodeAffinity{RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{
{MatchExpressions: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"invalid"}}, // Should not be relaxed
}},
}}}}
// Don't relax
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should relax multiple terms", func() {
pod := test.UnschedulablePod()
pod.Spec.Affinity = &v1.Affinity{NodeAffinity: &v1.NodeAffinity{RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{
{MatchExpressions: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"invalid"}},
}},
{MatchExpressions: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"invalid"}},
}},
{MatchExpressions: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}},
}},
{MatchExpressions: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2"}}, // OR operator, never get to this one
}},
}}}}
// Success
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-1"))
})
})
Context("Preferred", func() {
It("should relax all terms", func() {
pod := test.UnschedulablePod()
pod.Spec.Affinity = &v1.Affinity{NodeAffinity: &v1.NodeAffinity{PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
{
Weight: 1, Preference: v1.NodeSelectorTerm{MatchExpressions: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"invalid"}},
}},
},
{
Weight: 1, Preference: v1.NodeSelectorTerm{MatchExpressions: []v1.NodeSelectorRequirement{
{Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpIn, Values: []string{"invalid"}},
}},
},
}}}
// Success
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectScheduled(ctx, env.Client, pod)
})
It("should relax to use lighter weights", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2"}}}
pod := test.UnschedulablePod()
pod.Spec.Affinity = &v1.Affinity{NodeAffinity: &v1.NodeAffinity{PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
{
Weight: 100, Preference: v1.NodeSelectorTerm{MatchExpressions: []v1.NodeSelectorRequirement{
{Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-3"}},
}},
},
{
Weight: 50, Preference: v1.NodeSelectorTerm{MatchExpressions: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2"}},
}},
},
{
Weight: 1, Preference: v1.NodeSelectorTerm{MatchExpressions: []v1.NodeSelectorRequirement{ // OR operator, never get to this one
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}},
}},
},
}}}
// Success
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-2"))
})
It("should schedule even if preference is conflicting with requirement", func() {
pod := test.UnschedulablePod()
pod.Spec.Affinity = &v1.Affinity{NodeAffinity: &v1.NodeAffinity{PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
{
Weight: 1, Preference: v1.NodeSelectorTerm{MatchExpressions: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpNotIn, Values: []string{"test-zone-3"}},
}},
},
},
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{
{MatchExpressions: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-3"}}, // Should not be relaxed
}},
}},
}}
// Success
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-3"))
})
It("should schedule even if preference requirements are conflicting", func() {
pod := test.UnschedulablePod(test.PodOptions{NodePreferences: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"invalid"}},
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpNotIn, Values: []string{"invalid"}},
}})
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectScheduled(ctx, env.Client, pod)
})
})
})
var _ = Describe("Instance Type Compatibility", func() {
It("should not schedule if requesting more resources than any instance type has", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("512"),
}},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should launch pods with different archs on different instances", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{{
Key: v1.LabelArchStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.ArchitectureArm64, v1alpha5.ArchitectureAmd64},
}}
nodeNames := sets.NewString()
ExpectApplied(ctx, env.Client, provisioner)
pods := []*v1.Pod{
test.UnschedulablePod(test.PodOptions{
NodeSelector: map[string]string{v1.LabelArchStable: v1alpha5.ArchitectureAmd64},
}),
test.UnschedulablePod(test.PodOptions{
NodeSelector: map[string]string{v1.LabelArchStable: v1alpha5.ArchitectureArm64},
}),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
for _, pod := range pods {
node := ExpectScheduled(ctx, env.Client, pod)
nodeNames.Insert(node.Name)
}
Expect(nodeNames.Len()).To(Equal(2))
})
It("should exclude instance types that are not supported by the pod constraints (node affinity/instance type)", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{{
Key: v1.LabelArchStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.ArchitectureAmd64},
}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(test.PodOptions{
NodeRequirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelInstanceTypeStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{"arm-instance-type"},
},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
// arm instance type conflicts with the provisioner limitation of AMD only
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should exclude instance types that are not supported by the pod constraints (node affinity/operating system)", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{{
Key: v1.LabelArchStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.ArchitectureAmd64},
}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(test.PodOptions{
NodeRequirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelOSStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{"ios"},
},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
// there's an instance with an OS of ios, but it has an arm processor so the provider requirements will
// exclude it
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should exclude instance types that are not supported by the provider constraints (arch)", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{{
Key: v1.LabelArchStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.ArchitectureAmd64},
}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Limits: map[v1.ResourceName]resource.Quantity{v1.ResourceCPU: resource.MustParse("14")}}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
// only the ARM instance has enough CPU, but it's not allowed per the provisioner
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should launch pods with different operating systems on different instances", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{{
Key: v1.LabelArchStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.ArchitectureArm64, v1alpha5.ArchitectureAmd64},
}}
nodeNames := sets.NewString()
ExpectApplied(ctx, env.Client, provisioner)
pods := []*v1.Pod{
test.UnschedulablePod(test.PodOptions{
NodeSelector: map[string]string{v1.LabelOSStable: string(v1.Linux)},
}),
test.UnschedulablePod(test.PodOptions{
NodeSelector: map[string]string{v1.LabelOSStable: string(v1.Windows)},
}),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
for _, pod := range pods {
node := ExpectScheduled(ctx, env.Client, pod)
nodeNames.Insert(node.Name)
}
Expect(nodeNames.Len()).To(Equal(2))
})
It("should launch pods with different instance type node selectors on different instances", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{{
Key: v1.LabelArchStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.ArchitectureArm64, v1alpha5.ArchitectureAmd64},
}}
nodeNames := sets.NewString()
ExpectApplied(ctx, env.Client, provisioner)
pods := []*v1.Pod{
test.UnschedulablePod(test.PodOptions{
NodeSelector: map[string]string{v1.LabelInstanceType: "small-instance-type"},
}),
test.UnschedulablePod(test.PodOptions{
NodeSelector: map[string]string{v1.LabelInstanceTypeStable: "default-instance-type"},
}),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
for _, pod := range pods {
node := ExpectScheduled(ctx, env.Client, pod)
nodeNames.Insert(node.Name)
}
Expect(nodeNames.Len()).To(Equal(2))
})
It("should launch pods with different zone selectors on different instances", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{{
Key: v1.LabelArchStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{v1alpha5.ArchitectureArm64, v1alpha5.ArchitectureAmd64},
}}
nodeNames := sets.NewString()
ExpectApplied(ctx, env.Client, provisioner)
pods := []*v1.Pod{
test.UnschedulablePod(test.PodOptions{
NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-1"},
}),
test.UnschedulablePod(test.PodOptions{
NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-2"},
}),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
for _, pod := range pods {
node := ExpectScheduled(ctx, env.Client, pod)
nodeNames.Insert(node.Name)
}
Expect(nodeNames.Len()).To(Equal(2))
})
It("should launch pods with resources that aren't on any single instance type on different instances", func() {
cloudProvider.InstanceTypes = fake.InstanceTypes(5)
const fakeGPU1 = "karpenter.sh/super-great-gpu"
const fakeGPU2 = "karpenter.sh/even-better-gpu"
cloudProvider.InstanceTypes[0].Capacity[fakeGPU1] = resource.MustParse("25")
cloudProvider.InstanceTypes[1].Capacity[fakeGPU2] = resource.MustParse("25")
nodeNames := sets.NewString()
ExpectApplied(ctx, env.Client, provisioner)
pods := []*v1.Pod{
test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Limits: v1.ResourceList{fakeGPU1: resource.MustParse("1")},
},
}),
// Should pack onto a different instance since no instance type has both GPUs
test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Limits: v1.ResourceList{fakeGPU2: resource.MustParse("1")},
},
}),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
for _, pod := range pods {
node := ExpectScheduled(ctx, env.Client, pod)
nodeNames.Insert(node.Name)
}
Expect(nodeNames.Len()).To(Equal(2))
})
It("should fail to schedule a pod with resources requests that aren't on a single instance type", func() {
cloudProvider.InstanceTypes = fake.InstanceTypes(5)
const fakeGPU1 = "karpenter.sh/super-great-gpu"
const fakeGPU2 = "karpenter.sh/even-better-gpu"
cloudProvider.InstanceTypes[0].Capacity[fakeGPU1] = resource.MustParse("25")
cloudProvider.InstanceTypes[1].Capacity[fakeGPU2] = resource.MustParse("25")
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Limits: v1.ResourceList{
fakeGPU1: resource.MustParse("1"),
fakeGPU2: resource.MustParse("1")},
},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
Context("Provider Specific Labels", func() {
It("should filter instance types that match labels", func() {
cloudProvider.InstanceTypes = fake.InstanceTypes(5)
ExpectApplied(ctx, env.Client, provisioner)
pods := []*v1.Pod{
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{fake.LabelInstanceSize: "large"}}),
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{fake.LabelInstanceSize: "small"}}),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
node := ExpectScheduled(ctx, env.Client, pods[0])
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelInstanceTypeStable, "fake-it-4"))
node = ExpectScheduled(ctx, env.Client, pods[1])
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelInstanceTypeStable, "fake-it-0"))
})
It("should not schedule with incompatible labels", func() {
cloudProvider.InstanceTypes = fake.InstanceTypes(5)
ExpectApplied(ctx, env.Client, provisioner)
pods := []*v1.Pod{
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{
fake.LabelInstanceSize: "large",
v1.LabelInstanceTypeStable: cloudProvider.InstanceTypes[0].Name,
}}),
test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{
fake.LabelInstanceSize: "small",
v1.LabelInstanceTypeStable: cloudProvider.InstanceTypes[4].Name,
}}),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
ExpectNotScheduled(ctx, env.Client, pods[0])
ExpectNotScheduled(ctx, env.Client, pods[1])
})
It("should schedule optional labels", func() {
cloudProvider.InstanceTypes = fake.InstanceTypes(5)
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
// Only some instance types have this key
{Key: fake.ExoticInstanceLabelKey, Operator: v1.NodeSelectorOpExists},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).To(HaveKey(fake.ExoticInstanceLabelKey))
Expect(node.Labels).To(HaveKeyWithValue(v1.LabelInstanceTypeStable, cloudProvider.InstanceTypes[4].Name))
})
It("should schedule without optional labels if disallowed", func() {
cloudProvider.InstanceTypes = fake.InstanceTypes(5)
ExpectApplied(ctx, env.Client, test.Provisioner())
pod := test.UnschedulablePod(test.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{
// Only some instance types have this key
{Key: fake.ExoticInstanceLabelKey, Operator: v1.NodeSelectorOpDoesNotExist},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels).ToNot(HaveKey(fake.ExoticInstanceLabelKey))
})
})
})
var _ = Describe("Binpacking", func() {
It("should schedule a small pod on the smallest instance", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceMemory: resource.MustParse("100M"),
},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels[v1.LabelInstanceTypeStable]).To(Equal("small-instance-type"))
})
It("should schedule a small pod on the smallest possible instance type", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceMemory: resource.MustParse("2000M"),
},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels[v1.LabelInstanceTypeStable]).To(Equal("small-instance-type"))
})
It("should schedule multiple small pods on the smallest possible instance type", func() {
opts := test.PodOptions{
Conditions: []v1.PodCondition{{Type: v1.PodScheduled, Reason: v1.PodReasonUnschedulable, Status: v1.ConditionFalse}},
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceMemory: resource.MustParse("10M"),
},
}}
pods := test.Pods(5, opts)
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
nodeNames := sets.NewString()
for _, p := range pods {
node := ExpectScheduled(ctx, env.Client, p)
nodeNames.Insert(node.Name)
Expect(node.Labels[v1.LabelInstanceTypeStable]).To(Equal("small-instance-type"))
}
Expect(nodeNames).To(HaveLen(1))
})
It("should create new nodes when a node is at capacity", func() {
opts := test.PodOptions{
NodeSelector: map[string]string{v1.LabelArchStable: "amd64"},
Conditions: []v1.PodCondition{{Type: v1.PodScheduled, Reason: v1.PodReasonUnschedulable, Status: v1.ConditionFalse}},
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceMemory: resource.MustParse("1.8G"),
},
}}
ExpectApplied(ctx, env.Client, provisioner)
pods := test.Pods(40, opts)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
nodeNames := sets.NewString()
for _, p := range pods {
node := ExpectScheduled(ctx, env.Client, p)
nodeNames.Insert(node.Name)
Expect(node.Labels[v1.LabelInstanceTypeStable]).To(Equal("default-instance-type"))
}
Expect(nodeNames).To(HaveLen(20))
})
It("should pack small and large pods together", func() {
largeOpts := test.PodOptions{
NodeSelector: map[string]string{v1.LabelArchStable: "amd64"},
Conditions: []v1.PodCondition{{Type: v1.PodScheduled, Reason: v1.PodReasonUnschedulable, Status: v1.ConditionFalse}},
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceMemory: resource.MustParse("1.8G"),
},
}}
smallOpts := test.PodOptions{
NodeSelector: map[string]string{v1.LabelArchStable: "amd64"},
Conditions: []v1.PodCondition{{Type: v1.PodScheduled, Reason: v1.PodReasonUnschedulable, Status: v1.ConditionFalse}},
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceMemory: resource.MustParse("400M"),
},
}}
// Two large pods are all that will fit on the default-instance type (the largest instance type) which will create
// twenty nodes. This leaves just enough room on each of those newNodes for one additional small pod per node, so we
// should only end up with 20 newNodes total.
provPods := append(test.Pods(40, largeOpts), test.Pods(20, smallOpts)...)
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, provPods...)
nodeNames := sets.NewString()
for _, p := range provPods {
node := ExpectScheduled(ctx, env.Client, p)
nodeNames.Insert(node.Name)
Expect(node.Labels[v1.LabelInstanceTypeStable]).To(Equal("default-instance-type"))
}
Expect(nodeNames).To(HaveLen(20))
})
It("should pack newNodes tightly", func() {
cloudProvider.InstanceTypes = fake.InstanceTypes(5)
var nodes []*v1.Node
ExpectApplied(ctx, env.Client, provisioner)
pods := []*v1.Pod{
test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("4.5")},
},
}),
test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")},
},
}),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
for _, pod := range pods {
node := ExpectScheduled(ctx, env.Client, pod)
nodes = append(nodes, node)
}
Expect(nodes).To(HaveLen(2))
// the first pod consumes nearly all CPU of the largest instance type with no room for the second pod, the
// second pod is much smaller in terms of resources and should get a smaller node
Expect(nodes[0].Labels[v1.LabelInstanceTypeStable]).ToNot(Equal(nodes[1].Labels[v1.LabelInstanceTypeStable]))
})
It("should handle zero-quantity resource requests", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{"foo.com/weird-resources": resource.MustParse("0")},
Limits: v1.ResourceList{"foo.com/weird-resources": resource.MustParse("0")},
},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
// requesting a resource of quantity zero of a type unsupported by any instance is fine
ExpectScheduled(ctx, env.Client, pod)
})
It("should not schedule pods that exceed every instance type's capacity", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceMemory: resource.MustParse("2Ti"),
},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should create new newNodes when a node is at capacity due to pod limits per node", func() {
opts := test.PodOptions{
NodeSelector: map[string]string{v1.LabelArchStable: "amd64"},
Conditions: []v1.PodCondition{{Type: v1.PodScheduled, Reason: v1.PodReasonUnschedulable, Status: v1.ConditionFalse}},
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceMemory: resource.MustParse("1m"),
v1.ResourceCPU: resource.MustParse("1m"),
},
}}
ExpectApplied(ctx, env.Client, provisioner)
pods := test.Pods(25, opts)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
nodeNames := sets.NewString()
// all of the test instance types support 5 pods each, so we use the 5 instances of the smallest one for our 25 pods
for _, p := range pods {
node := ExpectScheduled(ctx, env.Client, p)
nodeNames.Insert(node.Name)
Expect(node.Labels[v1.LabelInstanceTypeStable]).To(Equal("small-instance-type"))
}
Expect(nodeNames).To(HaveLen(5))
})
It("should take into account initContainer resource requests when binpacking", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceMemory: resource.MustParse("1Gi"),
v1.ResourceCPU: resource.MustParse("1"),
},
},
InitImage: "pause",
InitResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceMemory: resource.MustParse("1Gi"),
v1.ResourceCPU: resource.MustParse("2"),
},
},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Labels[v1.LabelInstanceTypeStable]).To(Equal("default-instance-type"))
})
It("should not schedule pods when initContainer resource requests are greater than available instance types", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceMemory: resource.MustParse("1Gi"),
v1.ResourceCPU: resource.MustParse("1"),
},
},
InitImage: "pause",
InitResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceMemory: resource.MustParse("1Ti"),
v1.ResourceCPU: resource.MustParse("2"),
},
},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should select for valid instance types, regardless of price", func() {
// capacity sizes and prices don't correlate here, regardless we should filter and see that all three instance types
// are valid before preferring the cheapest one 'large'
cloudProvider.InstanceTypes = []*cloudprovider.InstanceType{
fake.NewInstanceType(fake.InstanceTypeOptions{
Name: "medium",
Resources: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("2Gi"),
},
Offerings: []cloudprovider.Offering{
{
CapacityType: v1alpha5.CapacityTypeOnDemand,
Zone: "test-zone-1a",
Price: 3.00,
Available: true,
},
},
}),
fake.NewInstanceType(fake.InstanceTypeOptions{
Name: "small",
Resources: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("1Gi"),
},
Offerings: []cloudprovider.Offering{
{
CapacityType: v1alpha5.CapacityTypeOnDemand,
Zone: "test-zone-1a",
Price: 2.00,
Available: true,
},
},
}),
fake.NewInstanceType(fake.InstanceTypeOptions{
Name: "large",
Resources: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("4Gi"),
},
Offerings: []cloudprovider.Offering{
{
CapacityType: v1alpha5.CapacityTypeOnDemand,
Zone: "test-zone-1a",
Price: 1.00,
Available: true,
},
},
}),
}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1m"),
v1.ResourceMemory: resource.MustParse("1Mi"),
},
}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
// large is the cheapest, so we should pick it, but the other two types are also valid options
Expect(node.Labels[v1.LabelInstanceTypeStable]).To(Equal("large"))
// all three options should be passed to the cloud provider
possibleInstanceType := sets.NewString(pscheduling.NewNodeSelectorRequirements(cloudProvider.CreateCalls[0].Spec.Requirements...).Get(v1.LabelInstanceTypeStable).Values()...)
Expect(possibleInstanceType).To(Equal(sets.NewString("small", "medium", "large")))
})
})
var _ = Describe("In-Flight Nodes", func() {
It("should not launch a second node if there is an in-flight node that can support the pod", func() {
opts := test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("10m"),
},
}}
ExpectApplied(ctx, env.Client, provisioner)
initialPod := test.UnschedulablePod(opts)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, initialPod)
node1 := ExpectScheduled(ctx, env.Client, initialPod)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1))
secondPod := test.UnschedulablePod(opts)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, secondPod)
node2 := ExpectScheduled(ctx, env.Client, secondPod)
Expect(node1.Name).To(Equal(node2.Name))
})
It("should not launch a second node if there is an in-flight node that can support the pod (node selectors)", func() {
ExpectApplied(ctx, env.Client, provisioner)
initialPod := test.UnschedulablePod(test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("10m"),
},
},
NodeRequirements: []v1.NodeSelectorRequirement{{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-2"},
}}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, initialPod)
node1 := ExpectScheduled(ctx, env.Client, initialPod)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1))
// the node gets created in test-zone-2
secondPod := test.UnschedulablePod(test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("10m"),
},
},
NodeRequirements: []v1.NodeSelectorRequirement{{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-1", "test-zone-2"},
}}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, secondPod)
// test-zone-2 is in the intersection of their node selectors and the node has capacity, so we shouldn't create a new node
node2 := ExpectScheduled(ctx, env.Client, secondPod)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1))
Expect(node1.Name).To(Equal(node2.Name))
// the node gets created in test-zone-2
thirdPod := test.UnschedulablePod(test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("10m"),
},
},
NodeRequirements: []v1.NodeSelectorRequirement{{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-1", "test-zone-3"},
}}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, thirdPod)
// node is in test-zone-2, so this pod needs a new node
node3 := ExpectScheduled(ctx, env.Client, thirdPod)
Expect(node1.Name).ToNot(Equal(node3.Name))
})
It("should launch a second node if a pod won't fit on the existingNodes node", func() {
ExpectApplied(ctx, env.Client, provisioner)
opts := test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1001m"),
},
}}
initialPod := test.UnschedulablePod(opts)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, initialPod)
node1 := ExpectScheduled(ctx, env.Client, initialPod)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1))
// the node will have 2000m CPU, so these two pods can't both fit on it
opts.ResourceRequirements.Limits[v1.ResourceCPU] = resource.MustParse("1")
secondPod := test.UnschedulablePod(opts)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, secondPod)
node2 := ExpectScheduled(ctx, env.Client, secondPod)
Expect(node1.Name).ToNot(Equal(node2.Name))
})
It("should launch a second node if a pod isn't compatible with the existingNodes node (node selector)", func() {
ExpectApplied(ctx, env.Client, provisioner)
opts := test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("10m"),
},
}}
initialPod := test.UnschedulablePod(opts)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, initialPod)
node1 := ExpectScheduled(ctx, env.Client, initialPod)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1))
secondPod := test.UnschedulablePod(test.PodOptions{NodeSelector: map[string]string{v1.LabelArchStable: "arm64"}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, secondPod)
node2 := ExpectScheduled(ctx, env.Client, secondPod)
Expect(node1.Name).ToNot(Equal(node2.Name))
})
It("should launch a second node if an in-flight node is terminating", func() {
opts := test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("10m"),
},
}}
ExpectApplied(ctx, env.Client, provisioner)
initialPod := test.UnschedulablePod(opts)
bindings := ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, initialPod)
ExpectScheduled(ctx, env.Client, initialPod)
// delete the node/machine
machine1 := bindings.Get(initialPod).Machine
node1 := bindings.Get(initialPod).Node
machine1.Finalizers = nil
node1.Finalizers = nil
ExpectApplied(ctx, env.Client, machine1, node1)
ExpectDeleted(ctx, env.Client, machine1, node1)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1))
ExpectReconcileSucceeded(ctx, machineStateController, client.ObjectKeyFromObject(machine1))
secondPod := test.UnschedulablePod(opts)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, secondPod)
node2 := ExpectScheduled(ctx, env.Client, secondPod)
Expect(node1.Name).ToNot(Equal(node2.Name))
})
Context("Ordering Existing Machines", func() {
It("should order initialized nodes for scheduling un-initialized nodes", func() {
ExpectApplied(ctx, env.Client, provisioner)
var machines []*v1alpha5.Machine
var nodes []*v1.Node
for i := 0; i < 100; i++ {
m := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
})
ExpectApplied(ctx, env.Client, m)
m, n := ExpectMachineDeployed(ctx, env.Client, cluster, cloudProvider, m)
machines = append(machines, m)
nodes = append(nodes, n)
}
// Make one of the nodes and machines initialized
elem := rand.Intn(100) //nolint:gosec
ExpectMakeMachinesInitialized(ctx, env.Client, machines[elem])
ExpectMakeNodesInitialized(ctx, env.Client, nodes[elem])
ExpectReconcileSucceeded(ctx, machineStateController, client.ObjectKeyFromObject(machines[elem]))
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(nodes[elem]))
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
scheduledNode := ExpectScheduled(ctx, env.Client, pod)
// Expect that the scheduled node is equal to the ready node since it's initialized
Expect(scheduledNode.Name).To(Equal(nodes[elem].Name))
})
It("should order initialized nodes for scheduling un-initialized nodes when all other nodes are inflight", func() {
ExpectApplied(ctx, env.Client, provisioner)
var machines []*v1alpha5.Machine
var node *v1.Node
elem := rand.Intn(100) // The machine/node that will be marked as initialized
for i := 0; i < 100; i++ {
m := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
},
},
})
ExpectApplied(ctx, env.Client, m)
if i == elem {
m, node = ExpectMachineDeployed(ctx, env.Client, cluster, cloudProvider, m)
} else {
var err error
m, err = ExpectMachineDeployedNoNode(ctx, env.Client, cluster, cloudProvider, m)
Expect(err).ToNot(HaveOccurred())
}
machines = append(machines, m)
}
// Make one of the nodes and machines initialized
ExpectMakeMachinesInitialized(ctx, env.Client, machines[elem])
ExpectMakeNodesInitialized(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, machineStateController, client.ObjectKeyFromObject(machines[elem]))
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node))
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
scheduledNode := ExpectScheduled(ctx, env.Client, pod)
// Expect that the scheduled node is equal to node3 since it's initialized
Expect(scheduledNode.Name).To(Equal(node.Name))
})
})
Context("Topology", func() {
It("should balance pods across zones with in-flight newNodes", func() {
labels := map[string]string{"foo": "bar"}
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 4)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 1, 2))
// reconcile our newNodes with the cluster state so they'll show up as in-flight
var nodeList v1.NodeList
Expect(env.Client.List(ctx, &nodeList)).To(Succeed())
for _, node := range nodeList.Items {
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKey{Name: node.Name})
}
firstRoundNumNodes := len(nodeList.Items)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 5)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(3, 3, 3))
Expect(env.Client.List(ctx, &nodeList)).To(Succeed())
// shouldn't create any new newNodes as the in-flight ones can support the pods
Expect(nodeList.Items).To(HaveLen(firstRoundNumNodes))
})
It("should balance pods across hostnames with in-flight newNodes", func() {
labels := map[string]string{"foo": "bar"}
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelHostname,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 4)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 1, 1, 1))
// reconcile our newNodes with the cluster state so they'll show up as in-flight
var nodeList v1.NodeList
Expect(env.Client.List(ctx, &nodeList)).To(Succeed())
for _, node := range nodeList.Items {
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKey{Name: node.Name})
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 5)...,
)
// we prefer to launch new newNodes to satisfy the topology spread even though we could technically schedule against existingNodes
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 1, 1, 1, 1, 1, 1, 1, 1))
})
})
Context("Taints", func() {
It("should assume pod will schedule to a tainted node with no taints", func() {
opts := test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("8"),
},
}}
ExpectApplied(ctx, env.Client, provisioner)
initialPod := test.UnschedulablePod(opts)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, initialPod)
node1 := ExpectScheduled(ctx, env.Client, initialPod)
// delete the pod so that the node is empty
ExpectDeleted(ctx, env.Client, initialPod)
node1.Spec.Taints = nil
ExpectApplied(ctx, env.Client, node1)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1))
secondPod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, secondPod)
node2 := ExpectScheduled(ctx, env.Client, secondPod)
Expect(node1.Name).To(Equal(node2.Name))
})
It("should not assume pod will schedule to a tainted node", func() {
opts := test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("8"),
},
}}
ExpectApplied(ctx, env.Client, provisioner)
initialPod := test.UnschedulablePod(opts)
bindings := ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, initialPod)
ExpectScheduled(ctx, env.Client, initialPod)
machine1 := bindings.Get(initialPod).Machine
node1 := bindings.Get(initialPod).Node
machine1.StatusConditions().MarkTrue(v1alpha5.MachineInitialized)
// delete the pod so that the node is empty
ExpectDeleted(ctx, env.Client, initialPod)
// and taint it
node1.Spec.Taints = append(node1.Spec.Taints, v1.Taint{
Key: "foo.com/taint",
Value: "tainted",
Effect: v1.TaintEffectNoSchedule,
})
ExpectApplied(ctx, env.Client, machine1, node1)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1))
secondPod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, secondPod)
node2 := ExpectScheduled(ctx, env.Client, secondPod)
Expect(node1.Name).ToNot(Equal(node2.Name))
})
It("should assume pod will schedule to a tainted node with a custom startup taint", func() {
opts := test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("8"),
},
}}
provisioner.Spec.StartupTaints = append(provisioner.Spec.StartupTaints, v1.Taint{
Key: "foo.com/taint",
Value: "tainted",
Effect: v1.TaintEffectNoSchedule,
})
ExpectApplied(ctx, env.Client, provisioner)
initialPod := test.UnschedulablePod(opts)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, initialPod)
node1 := ExpectScheduled(ctx, env.Client, initialPod)
// delete the pod so that the node is empty
ExpectDeleted(ctx, env.Client, initialPod)
// startup taint + node not ready taint = 2
Expect(node1.Spec.Taints).To(HaveLen(2))
Expect(node1.Spec.Taints).To(ContainElement(v1.Taint{
Key: "foo.com/taint",
Value: "tainted",
Effect: v1.TaintEffectNoSchedule,
}))
ExpectApplied(ctx, env.Client, node1)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1))
secondPod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, secondPod)
node2 := ExpectScheduled(ctx, env.Client, secondPod)
Expect(node1.Name).To(Equal(node2.Name))
})
It("should not assume pod will schedule to a node with startup taints after initialization", func() {
startupTaint := v1.Taint{Key: "ignore-me", Value: "nothing-to-see-here", Effect: v1.TaintEffectNoSchedule}
provisioner.Spec.StartupTaints = []v1.Taint{startupTaint}
ExpectApplied(ctx, env.Client, provisioner)
initialPod := test.UnschedulablePod()
bindings := ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, initialPod)
ExpectScheduled(ctx, env.Client, initialPod)
// delete the pod so that the node is empty
ExpectDeleted(ctx, env.Client, initialPod)
// Mark it initialized which only occurs once the startup taint was removed and re-apply only the startup taint.
// We also need to add resource capacity as after initialization we assume that kubelet has recorded them.
machine1 := bindings.Get(initialPod).Machine
node1 := bindings.Get(initialPod).Node
machine1.StatusConditions().MarkTrue(v1alpha5.MachineInitialized)
node1.Spec.Taints = []v1.Taint{startupTaint}
node1.Status.Capacity = v1.ResourceList{v1.ResourcePods: resource.MustParse("10")}
ExpectApplied(ctx, env.Client, machine1, node1)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1))
// we should launch a new node since the startup taint is there, but was gone at some point
secondPod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, secondPod)
node2 := ExpectScheduled(ctx, env.Client, secondPod)
Expect(node1.Name).ToNot(Equal(node2.Name))
})
It("should consider a tainted NotReady node as in-flight even if initialized", func() {
opts := test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{v1.ResourceCPU: resource.MustParse("10m")},
}}
ExpectApplied(ctx, env.Client, provisioner)
// Schedule to New Machine
pod := test.UnschedulablePod(opts)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node1 := ExpectScheduled(ctx, env.Client, pod)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1))
// Mark Initialized
node1.Labels[v1alpha5.LabelNodeInitialized] = "true"
node1.Spec.Taints = []v1.Taint{
{Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoSchedule},
{Key: v1.TaintNodeUnreachable, Effect: v1.TaintEffectNoSchedule},
{Key: cloudproviderapi.TaintExternalCloudProvider, Effect: v1.TaintEffectNoSchedule, Value: "true"},
}
ExpectApplied(ctx, env.Client, node1)
// Schedule to In Flight Machine
pod = test.UnschedulablePod(opts)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node2 := ExpectScheduled(ctx, env.Client, pod)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node2))
Expect(node1.Name).To(Equal(node2.Name))
})
})
Context("Daemonsets", func() {
It("should track daemonset usage separately so we know how many DS resources are remaining to be scheduled", func() {
ds := test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("1Gi")}},
}},
)
ExpectApplied(ctx, env.Client, provisioner, ds)
Expect(env.Client.Get(ctx, client.ObjectKeyFromObject(ds), ds)).To(Succeed())
opts := test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("8"),
},
}}
initialPod := test.UnschedulablePod(opts)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, initialPod)
node1 := ExpectScheduled(ctx, env.Client, initialPod)
// create our daemonset pod and manually bind it to the node
dsPod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("2Gi"),
}},
})
dsPod.OwnerReferences = append(dsPod.OwnerReferences, metav1.OwnerReference{
APIVersion: "apps/v1",
Kind: "DaemonSet",
Name: ds.Name,
UID: ds.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
})
// delete the pod so that the node is empty
ExpectDeleted(ctx, env.Client, initialPod)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1))
ExpectApplied(ctx, env.Client, provisioner, dsPod)
cluster.ForEachNode(func(f *state.StateNode) bool {
dsRequests := f.DaemonSetRequests()
available := f.Available()
Expect(dsRequests.Cpu().AsApproximateFloat64()).To(BeNumerically("~", 0))
// no pods so we have the full (16 cpu - 100m overhead)
Expect(available.Cpu().AsApproximateFloat64()).To(BeNumerically("~", 15.9))
return true
})
ExpectManualBinding(ctx, env.Client, dsPod, node1)
ExpectReconcileSucceeded(ctx, podStateController, client.ObjectKeyFromObject(dsPod))
cluster.ForEachNode(func(f *state.StateNode) bool {
dsRequests := f.DaemonSetRequests()
available := f.Available()
Expect(dsRequests.Cpu().AsApproximateFloat64()).To(BeNumerically("~", 1))
// only the DS pod is bound, so available is reduced by one and the DS requested is incremented by one
Expect(available.Cpu().AsApproximateFloat64()).To(BeNumerically("~", 14.9))
return true
})
opts = test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("14.9"),
},
}}
// this pod should schedule on the existingNodes node as the daemonset pod has already bound, meaning that the
// remaining daemonset resources should be zero leaving 14.9 CPUs for the pod
secondPod := test.UnschedulablePod(opts)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, secondPod)
node2 := ExpectScheduled(ctx, env.Client, secondPod)
Expect(node1.Name).To(Equal(node2.Name))
})
It("should handle unexpected daemonset pods binding to the node", func() {
ds1 := test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
NodeSelector: map[string]string{
"my-node-label": "value",
},
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("1Gi")}},
}},
)
ds2 := test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1m"),
}}}})
ExpectApplied(ctx, env.Client, provisioner, ds1, ds2)
Expect(env.Client.Get(ctx, client.ObjectKeyFromObject(ds1), ds1)).To(Succeed())
opts := test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("8"),
},
}}
initialPod := test.UnschedulablePod(opts)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, initialPod)
node1 := ExpectScheduled(ctx, env.Client, initialPod)
// this label appears on the node for some reason that Karpenter can't track
node1.Labels["my-node-label"] = "value"
ExpectApplied(ctx, env.Client, node1)
// create our daemonset pod and manually bind it to the node
dsPod := test.UnschedulablePod(test.PodOptions{
NodeSelector: map[string]string{
"my-node-label": "value",
},
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("2Gi"),
}},
})
dsPod.OwnerReferences = append(dsPod.OwnerReferences, metav1.OwnerReference{
APIVersion: "apps/v1",
Kind: "DaemonSet",
Name: ds1.Name,
UID: ds1.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
})
// delete the pod so that the node is empty
ExpectDeleted(ctx, env.Client, initialPod)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1))
ExpectApplied(ctx, env.Client, provisioner, dsPod)
cluster.ForEachNode(func(f *state.StateNode) bool {
dsRequests := f.DaemonSetRequests()
available := f.Available()
Expect(dsRequests.Cpu().AsApproximateFloat64()).To(BeNumerically("~", 0))
// no pods, so we have the full (16 CPU - 100m overhead)
Expect(available.Cpu().AsApproximateFloat64()).To(BeNumerically("~", 15.9))
return true
})
ExpectManualBinding(ctx, env.Client, dsPod, node1)
ExpectReconcileSucceeded(ctx, podStateController, client.ObjectKeyFromObject(dsPod))
cluster.ForEachNode(func(f *state.StateNode) bool {
dsRequests := f.DaemonSetRequests()
available := f.Available()
Expect(dsRequests.Cpu().AsApproximateFloat64()).To(BeNumerically("~", 1))
// only the DS pod is bound, so available is reduced by one and the DS requested is incremented by one
Expect(available.Cpu().AsApproximateFloat64()).To(BeNumerically("~", 14.9))
return true
})
opts = test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("15.5"),
},
}}
// This pod should not schedule on the inflight node as it requires more CPU than we have. This verifies
// we don't reintroduce a bug where more daemonsets scheduled than anticipated due to unexepected labels
// appearing on the node which caused us to compute a negative amount of resources remaining for daemonsets
// which in turn caused us to mis-calculate the amount of resources that were free on the node.
secondPod := test.UnschedulablePod(opts)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, secondPod)
node2 := ExpectScheduled(ctx, env.Client, secondPod)
// must create a new node
Expect(node1.Name).ToNot(Equal(node2.Name))
})
})
// nolint:gosec
It("should pack in-flight newNodes before launching new newNodes", func() {
cloudProvider.InstanceTypes = []*cloudprovider.InstanceType{
fake.NewInstanceType(fake.InstanceTypeOptions{
Name: "medium",
Resources: v1.ResourceList{
// enough CPU for four pods + a bit of overhead
v1.ResourceCPU: resource.MustParse("4.25"),
v1.ResourcePods: resource.MustParse("4"),
},
}),
}
opts := test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1"),
},
}}
ExpectApplied(ctx, env.Client, provisioner)
// scheduling in multiple batches random sets of pods
for i := 0; i < 10; i++ {
initialPods := test.UnschedulablePods(opts, rand.Intn(10))
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, initialPods...)
for _, pod := range initialPods {
node := ExpectScheduled(ctx, env.Client, pod)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node))
}
}
// due to the in-flight node support, we should pack existing newNodes before launching new node. The end result
// is that we should only have some spare capacity on our final node
nodesWithCPUFree := 0
cluster.ForEachNode(func(n *state.StateNode) bool {
available := n.Available()
if available.Cpu().AsApproximateFloat64() >= 1 {
nodesWithCPUFree++
}
return true
})
Expect(nodesWithCPUFree).To(BeNumerically("<=", 1))
})
It("should not launch a second node if there is an in-flight node that can support the pod (#2011)", func() {
opts := test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("10m"),
},
}}
// there was a bug in cluster state where we failed to identify the instance type resources when using a
// MachineTemplateRef so modify our provisioner to use the MachineTemplateRef and ensure that the second pod schedules
// to the existingNodes node
provisioner.Spec.Provider = nil
provisioner.Spec.ProviderRef = &v1alpha5.MachineTemplateRef{}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(opts)
ExpectProvisionedNoBinding(ctx, env.Client, cluster, cloudProvider, prov, pod)
var nodes v1.NodeList
Expect(env.Client.List(ctx, &nodes)).To(Succeed())
Expect(nodes.Items).To(HaveLen(1))
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(&nodes.Items[0]))
pod.Status.Conditions = []v1.PodCondition{{Type: v1.PodScheduled, Reason: v1.PodReasonUnschedulable, Status: v1.ConditionFalse}}
ExpectApplied(ctx, env.Client, pod)
ExpectProvisionedNoBinding(ctx, env.Client, cluster, cloudProvider, prov, pod)
Expect(env.Client.List(ctx, &nodes)).To(Succeed())
// shouldn't create a second node
Expect(nodes.Items).To(HaveLen(1))
})
})
var _ = Describe("No Pre-Binding", func() {
It("should not bind pods to newNodes", func() {
opts := test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("10m"),
},
}}
var nodeList v1.NodeList
// shouldn't have any newNodes
Expect(env.Client.List(ctx, &nodeList)).To(Succeed())
Expect(nodeList.Items).To(HaveLen(0))
ExpectApplied(ctx, env.Client, provisioner)
initialPod := test.UnschedulablePod(opts)
ExpectProvisionedNoBinding(ctx, env.Client, cluster, cloudProvider, prov, initialPod)
ExpectNotScheduled(ctx, env.Client, initialPod)
// should launch a single node
Expect(env.Client.List(ctx, &nodeList)).To(Succeed())
Expect(nodeList.Items).To(HaveLen(1))
node1 := &nodeList.Items[0]
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1))
secondPod := test.UnschedulablePod(opts)
ExpectProvisionedNoBinding(ctx, env.Client, cluster, cloudProvider, prov, secondPod)
ExpectNotScheduled(ctx, env.Client, secondPod)
// shouldn't create a second node as it can bind to the existingNodes node
Expect(env.Client.List(ctx, &nodeList)).To(Succeed())
Expect(nodeList.Items).To(HaveLen(1))
})
It("should handle resource zeroing of extended resources by kubelet", func() {
// Issue #1459
opts := test.PodOptions{ResourceRequirements: v1.ResourceRequirements{
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("10m"),
fake.ResourceGPUVendorA: resource.MustParse("1"),
},
}}
var nodeList v1.NodeList
// shouldn't have any newNodes
Expect(env.Client.List(ctx, &nodeList)).To(Succeed())
Expect(nodeList.Items).To(HaveLen(0))
ExpectApplied(ctx, env.Client, provisioner)
initialPod := test.UnschedulablePod(opts)
ExpectProvisionedNoBinding(ctx, env.Client, cluster, cloudProvider, prov, initialPod)
ExpectNotScheduled(ctx, env.Client, initialPod)
// should launch a single node
Expect(env.Client.List(ctx, &nodeList)).To(Succeed())
Expect(nodeList.Items).To(HaveLen(1))
node1 := &nodeList.Items[0]
// simulate kubelet zeroing out the extended resources on the node at startup
node1.Status.Capacity = map[v1.ResourceName]resource.Quantity{
fake.ResourceGPUVendorA: resource.MustParse("0"),
}
node1.Status.Allocatable = map[v1.ResourceName]resource.Quantity{
fake.ResourceGPUVendorB: resource.MustParse("0"),
}
ExpectApplied(ctx, env.Client, node1)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1))
secondPod := test.UnschedulablePod(opts)
ExpectProvisionedNoBinding(ctx, env.Client, cluster, cloudProvider, prov, secondPod)
ExpectNotScheduled(ctx, env.Client, secondPod)
// shouldn't create a second node as it can bind to the existingNodes node
Expect(env.Client.List(ctx, &nodeList)).To(Succeed())
Expect(nodeList.Items).To(HaveLen(1))
})
It("should respect self pod affinity without pod binding (zone)", func() {
// Issue #1975
affLabels := map[string]string{"security": "s2"}
pods := test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: affLabels,
},
PodRequirements: []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelTopologyZone,
}},
}, 2)
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisionedNoBinding(ctx, env.Client, cluster, cloudProvider, prov, pods[0])
var nodeList v1.NodeList
Expect(env.Client.List(ctx, &nodeList)).To(Succeed())
for i := range nodeList.Items {
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(&nodeList.Items[i]))
}
// the second pod can schedule against the in-flight node, but for that to work we need to be careful
// in how we fulfill the self-affinity by taking the existing node's domain as a preference over any
// random viable domain
ExpectProvisionedNoBinding(ctx, env.Client, cluster, cloudProvider, prov, pods[1])
Expect(env.Client.List(ctx, &nodeList)).To(Succeed())
Expect(nodeList.Items).To(HaveLen(1))
})
})
var _ = Describe("VolumeUsage", func() {
It("should launch multiple newNodes if required due to volume limits", func() {
const csiProvider = "fake.csi.provider"
cloudProvider.InstanceTypes = []*cloudprovider.InstanceType{
fake.NewInstanceType(
fake.InstanceTypeOptions{
Name: "instance-type",
Resources: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1024"),
v1.ResourcePods: resource.MustParse("1024"),
},
}),
}
provisioner.Spec.Limits = nil
ExpectApplied(ctx, env.Client, provisioner)
initialPod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, initialPod)
node := ExpectScheduled(ctx, env.Client, initialPod)
csiNode := &storagev1.CSINode{
ObjectMeta: metav1.ObjectMeta{
Name: node.Name,
},
Spec: storagev1.CSINodeSpec{
Drivers: []storagev1.CSINodeDriver{
{
Name: csiProvider,
NodeID: "fake-node-id",
Allocatable: &storagev1.VolumeNodeResources{
Count: ptr.Int32(10),
},
},
},
},
}
ExpectApplied(ctx, env.Client, csiNode)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node))
sc := test.StorageClass(test.StorageClassOptions{
ObjectMeta: metav1.ObjectMeta{Name: "my-storage-class"},
Provisioner: ptr.String(csiProvider),
Zones: []string{"test-zone-1"}})
ExpectApplied(ctx, env.Client, sc)
var pods []*v1.Pod
for i := 0; i < 6; i++ {
pvcA := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{
StorageClassName: ptr.String("my-storage-class"),
ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("my-claim-a-%d", i)},
})
pvcB := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{
StorageClassName: ptr.String("my-storage-class"),
ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("my-claim-b-%d", i)},
})
ExpectApplied(ctx, env.Client, pvcA, pvcB)
pods = append(pods, test.UnschedulablePod(test.PodOptions{
PersistentVolumeClaims: []string{pvcA.Name, pvcB.Name},
}))
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
var nodeList v1.NodeList
Expect(env.Client.List(ctx, &nodeList)).To(Succeed())
// we need to create a new node as the in-flight one can only contain 5 pods due to the CSINode volume limit
Expect(nodeList.Items).To(HaveLen(2))
})
It("should launch a single node if all pods use the same PVC", func() {
const csiProvider = "fake.csi.provider"
cloudProvider.InstanceTypes = []*cloudprovider.InstanceType{
fake.NewInstanceType(
fake.InstanceTypeOptions{
Name: "instance-type",
Resources: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1024"),
v1.ResourcePods: resource.MustParse("1024"),
},
}),
}
provisioner.Spec.Limits = nil
ExpectApplied(ctx, env.Client, provisioner)
initialPod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, initialPod)
node := ExpectScheduled(ctx, env.Client, initialPod)
csiNode := &storagev1.CSINode{
ObjectMeta: metav1.ObjectMeta{
Name: node.Name,
},
Spec: storagev1.CSINodeSpec{
Drivers: []storagev1.CSINodeDriver{
{
Name: csiProvider,
NodeID: "fake-node-id",
Allocatable: &storagev1.VolumeNodeResources{
Count: ptr.Int32(10),
},
},
},
},
}
ExpectApplied(ctx, env.Client, csiNode)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node))
sc := test.StorageClass(test.StorageClassOptions{
ObjectMeta: metav1.ObjectMeta{Name: "my-storage-class"},
Provisioner: ptr.String(csiProvider),
Zones: []string{"test-zone-1"}})
ExpectApplied(ctx, env.Client, sc)
pv := test.PersistentVolume(test.PersistentVolumeOptions{
ObjectMeta: metav1.ObjectMeta{Name: "my-volume"},
Zones: []string{"test-zone-1"}})
pvc := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{
ObjectMeta: metav1.ObjectMeta{Name: "my-claim"},
StorageClassName: ptr.String("my-storage-class"),
VolumeName: pv.Name,
})
ExpectApplied(ctx, env.Client, pv, pvc)
var pods []*v1.Pod
for i := 0; i < 100; i++ {
pods = append(pods, test.UnschedulablePod(test.PodOptions{
PersistentVolumeClaims: []string{pvc.Name, pvc.Name},
}))
}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
var nodeList v1.NodeList
Expect(env.Client.List(ctx, &nodeList)).To(Succeed())
// 100 of the same PVC should all be schedulable on the same node
Expect(nodeList.Items).To(HaveLen(1))
})
It("should not fail for non-dynamic PVCs", func() {
cloudProvider.InstanceTypes = []*cloudprovider.InstanceType{
fake.NewInstanceType(
fake.InstanceTypeOptions{
Name: "instance-type",
Resources: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1024"),
v1.ResourcePods: resource.MustParse("1024"),
},
}),
}
provisioner.Spec.Limits = nil
ExpectApplied(ctx, env.Client, provisioner)
initialPod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, initialPod)
node := ExpectScheduled(ctx, env.Client, initialPod)
csiNode := &storagev1.CSINode{
ObjectMeta: metav1.ObjectMeta{
Name: node.Name,
},
Spec: storagev1.CSINodeSpec{
Drivers: []storagev1.CSINodeDriver{
{
Name: csiProvider,
NodeID: "fake-node-id",
Allocatable: &storagev1.VolumeNodeResources{
Count: ptr.Int32(10),
},
},
},
},
}
ExpectApplied(ctx, env.Client, csiNode)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node))
sc := test.StorageClass(test.StorageClassOptions{
ObjectMeta: metav1.ObjectMeta{Name: "my-storage-class"},
Provisioner: ptr.String(csiProvider),
Zones: []string{"test-zone-1"}})
ExpectApplied(ctx, env.Client, sc)
pv := test.PersistentVolume(test.PersistentVolumeOptions{
ObjectMeta: metav1.ObjectMeta{Name: "my-volume"},
Driver: csiProvider,
Zones: []string{"test-zone-1"}})
pvc := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{
ObjectMeta: metav1.ObjectMeta{Name: "my-claim"},
VolumeName: pv.Name,
StorageClassName: ptr.String(""),
})
ExpectApplied(ctx, env.Client, pv, pvc)
var pods []*v1.Pod
for i := 0; i < 5; i++ {
pods = append(pods, test.UnschedulablePod(test.PodOptions{
PersistentVolumeClaims: []string{pvc.Name, pvc.Name},
}))
}
ExpectApplied(ctx, env.Client, provisioner)
_ = ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
var nodeList v1.NodeList
Expect(env.Client.List(ctx, &nodeList)).To(Succeed())
// 5 of the same PVC should all be schedulable on the same node
Expect(nodeList.Items).To(HaveLen(1))
})
It("should not fail for NFS volumes", func() {
cloudProvider.InstanceTypes = []*cloudprovider.InstanceType{
fake.NewInstanceType(
fake.InstanceTypeOptions{
Name: "instance-type",
Resources: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1024"),
v1.ResourcePods: resource.MustParse("1024"),
},
}),
}
provisioner.Spec.Limits = nil
ExpectApplied(ctx, env.Client, provisioner)
initialPod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, initialPod)
node := ExpectScheduled(ctx, env.Client, initialPod)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node))
pv := test.PersistentVolume(test.PersistentVolumeOptions{
ObjectMeta: metav1.ObjectMeta{Name: "my-volume"},
StorageClassName: "nfs",
Zones: []string{"test-zone-1"}})
pv.Spec.NFS = &v1.NFSVolumeSource{
Server: "fake.server",
Path: "/some/path",
}
pv.Spec.CSI = nil
pvc := test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{
ObjectMeta: metav1.ObjectMeta{Name: "my-claim"},
VolumeName: pv.Name,
StorageClassName: ptr.String(""),
})
ExpectApplied(ctx, env.Client, pv, pvc)
var pods []*v1.Pod
for i := 0; i < 5; i++ {
pods = append(pods, test.UnschedulablePod(test.PodOptions{
PersistentVolumeClaims: []string{pvc.Name, pvc.Name},
}))
}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
var nodeList v1.NodeList
Expect(env.Client.List(ctx, &nodeList)).To(Succeed())
// 5 of the same PVC should all be schedulable on the same node
Expect(nodeList.Items).To(HaveLen(1))
})
It("should launch nodes for pods with ephemeral volume using the specified storage class name", func() {
// Launch an initial pod onto a node and register the CSI Node with a volume count limit of 1
sc := test.StorageClass(test.StorageClassOptions{
ObjectMeta: metav1.ObjectMeta{
Name: "my-storage-class",
},
Provisioner: ptr.String(csiProvider),
Zones: []string{"test-zone-1"}})
// Create another default storage class that shouldn't be used and has no associated limits
sc2 := test.StorageClass(test.StorageClassOptions{
ObjectMeta: metav1.ObjectMeta{
Name: "default-storage-class",
Annotations: map[string]string{
pscheduling.IsDefaultStorageClassAnnotation: "true",
},
},
Provisioner: ptr.String("other-provider"),
Zones: []string{"test-zone-1"}})
initialPod := test.UnschedulablePod(test.PodOptions{})
// Pod has an ephemeral volume claim that has a specified storage class, so it should use the one specified
initialPod.Spec.Volumes = append(initialPod.Spec.Volumes, v1.Volume{
Name: "tmp-ephemeral",
VolumeSource: v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
Spec: v1.PersistentVolumeClaimSpec{
StorageClassName: lo.ToPtr(sc.Name),
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
},
},
},
})
ExpectApplied(ctx, env.Client, provisioner, sc, sc2, initialPod)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, initialPod)
node := ExpectScheduled(ctx, env.Client, initialPod)
csiNode := &storagev1.CSINode{
ObjectMeta: metav1.ObjectMeta{
Name: node.Name,
},
Spec: storagev1.CSINodeSpec{
Drivers: []storagev1.CSINodeDriver{
{
Name: csiProvider,
NodeID: "fake-node-id",
Allocatable: &storagev1.VolumeNodeResources{
Count: ptr.Int32(1),
},
},
{
Name: "other-provider",
NodeID: "fake-node-id",
Allocatable: &storagev1.VolumeNodeResources{
Count: ptr.Int32(10),
},
},
},
},
}
ExpectApplied(ctx, env.Client, csiNode)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node))
pod := test.UnschedulablePod(test.PodOptions{})
// Pod has an ephemeral volume claim that has a specified storage class, so it should use the one specified
pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{
Name: "tmp-ephemeral",
VolumeSource: v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
Spec: v1.PersistentVolumeClaimSpec{
StorageClassName: lo.ToPtr(sc.Name),
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
},
},
},
})
ExpectApplied(ctx, env.Client, sc, provisioner, pod)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node2 := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Name).ToNot(Equal(node2.Name))
})
It("should launch nodes for pods with ephemeral volume using a default storage class", func() {
// Launch an initial pod onto a node and register the CSI Node with a volume count limit of 1
sc := test.StorageClass(test.StorageClassOptions{
ObjectMeta: metav1.ObjectMeta{
Name: "default-storage-class",
Annotations: map[string]string{
pscheduling.IsDefaultStorageClassAnnotation: "true",
},
},
Provisioner: ptr.String(csiProvider),
Zones: []string{"test-zone-1"}})
initialPod := test.UnschedulablePod(test.PodOptions{})
// Pod has an ephemeral volume claim that has NO storage class, so it should use the default one
initialPod.Spec.Volumes = append(initialPod.Spec.Volumes, v1.Volume{
Name: "tmp-ephemeral",
VolumeSource: v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
},
},
},
})
ExpectApplied(ctx, env.Client, provisioner, sc, initialPod)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, initialPod)
node := ExpectScheduled(ctx, env.Client, initialPod)
csiNode := &storagev1.CSINode{
ObjectMeta: metav1.ObjectMeta{
Name: node.Name,
},
Spec: storagev1.CSINodeSpec{
Drivers: []storagev1.CSINodeDriver{
{
Name: csiProvider,
NodeID: "fake-node-id",
Allocatable: &storagev1.VolumeNodeResources{
Count: ptr.Int32(1),
},
},
},
},
}
ExpectApplied(ctx, env.Client, csiNode)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node))
pod := test.UnschedulablePod(test.PodOptions{})
// Pod has an ephemeral volume claim that has NO storage class, so it should use the default one
pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{
Name: "tmp-ephemeral",
VolumeSource: v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
},
},
},
})
ExpectApplied(ctx, env.Client, sc, provisioner, pod)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node2 := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Name).ToNot(Equal(node2.Name))
})
It("should launch nodes for pods with ephemeral volume using the newest storage class", func() {
// Launch an initial pod onto a node and register the CSI Node with a volume count limit of 1
sc := test.StorageClass(test.StorageClassOptions{
ObjectMeta: metav1.ObjectMeta{
Name: "default-storage-class",
Annotations: map[string]string{
pscheduling.IsDefaultStorageClassAnnotation: "true",
},
},
Provisioner: ptr.String("other-provider"),
Zones: []string{"test-zone-1"}})
sc2 := test.StorageClass(test.StorageClassOptions{
ObjectMeta: metav1.ObjectMeta{
Name: "newer-default-storage-class",
Annotations: map[string]string{
pscheduling.IsDefaultStorageClassAnnotation: "true",
},
},
Provisioner: ptr.String(csiProvider),
Zones: []string{"test-zone-1"}})
ExpectApplied(ctx, env.Client, sc)
// Wait a few seconds to apply the second storage class to get a newer creationTimestamp
time.Sleep(time.Second * 2)
ExpectApplied(ctx, env.Client, sc2)
initialPod := test.UnschedulablePod(test.PodOptions{})
// Pod has an ephemeral volume claim that has NO storage class, so it should use the default one
initialPod.Spec.Volumes = append(initialPod.Spec.Volumes, v1.Volume{
Name: "tmp-ephemeral",
VolumeSource: v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
},
},
},
})
ExpectApplied(ctx, env.Client, provisioner, sc, initialPod)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, initialPod)
node := ExpectScheduled(ctx, env.Client, initialPod)
csiNode := &storagev1.CSINode{
ObjectMeta: metav1.ObjectMeta{
Name: node.Name,
},
Spec: storagev1.CSINodeSpec{
Drivers: []storagev1.CSINodeDriver{
{
Name: csiProvider,
NodeID: "fake-node-id",
Allocatable: &storagev1.VolumeNodeResources{
Count: ptr.Int32(1),
},
},
{
Name: "other-provider",
NodeID: "fake-node-id",
Allocatable: &storagev1.VolumeNodeResources{
Count: ptr.Int32(10),
},
},
},
},
}
ExpectApplied(ctx, env.Client, csiNode)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node))
pod := test.UnschedulablePod(test.PodOptions{})
// Pod has an ephemeral volume claim that has NO storage class, so it should use the default one
pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{
Name: "tmp-ephemeral",
VolumeSource: v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
},
},
},
})
ExpectApplied(ctx, env.Client, sc, provisioner, pod)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node2 := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Name).ToNot(Equal(node2.Name))
})
It("should not launch nodes for pods with ephemeral volume using a non-existent storage classes", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(test.PodOptions{})
pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{
Name: "tmp-ephemeral",
VolumeSource: v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
Spec: v1.PersistentVolumeClaimSpec{
StorageClassName: ptr.String("non-existent"),
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
},
},
},
})
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
var nodeList v1.NodeList
Expect(env.Client.List(ctx, &nodeList)).To(Succeed())
// no nodes should be created as the storage class doesn't eixst
Expect(nodeList.Items).To(HaveLen(0))
})
})
// nolint:gocyclo
func ExpectMaxSkew(ctx context.Context, c client.Client, namespace string, constraint *v1.TopologySpreadConstraint) Assertion {
nodes := &v1.NodeList{}
ExpectWithOffset(1, c.List(ctx, nodes)).To(Succeed())
pods := &v1.PodList{}
ExpectWithOffset(1, c.List(ctx, pods, scheduling.TopologyListOptions(namespace, constraint.LabelSelector))).To(Succeed())
skew := map[string]int{}
nodeMap := map[string]*v1.Node{}
for i, node := range nodes.Items {
nodeMap[node.Name] = &nodes.Items[i]
}
for i, pod := range pods.Items {
if scheduling.IgnoredForTopology(&pods.Items[i]) {
continue
}
node := nodeMap[pod.Spec.NodeName]
if pod.Spec.NodeName == node.Name {
if constraint.TopologyKey == v1.LabelHostname {
skew[node.Name]++ // Check node name since hostname labels aren't applied
}
if constraint.TopologyKey == v1.LabelTopologyZone {
if key, ok := node.Labels[constraint.TopologyKey]; ok {
skew[key]++
}
}
if constraint.TopologyKey == v1alpha5.LabelCapacityType {
if key, ok := node.Labels[constraint.TopologyKey]; ok {
skew[key]++
}
}
}
}
var minCount = math.MaxInt
var maxCount = math.MinInt
for _, count := range skew {
if count < minCount {
minCount = count
}
if count > maxCount {
maxCount = count
}
}
return Expect(maxCount - minCount)
}
| 3,054 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"context"
"fmt"
"math"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/scheduling"
"go.uber.org/multierr"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
utilsets "k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/utils/pod"
)
type Topology struct {
kubeClient client.Client
// Both the topologies and inverseTopologies are maps of the hash from TopologyGroup.Hash() to the topology group
// itself. This is used to allow us to store one topology group that tracks the topology of many pods instead of
// having a 1<->1 mapping between topology groups and pods owned/selected by that group.
topologies map[uint64]*TopologyGroup
// Anti-affinity works both ways (if a zone has a pod foo with anti-affinity to a pod bar, we can't schedule bar to
// that zone, even though bar has no anti affinity terms on it. For this to work, we need to separately track the
// topologies of pods with anti-affinity terms, so we can prevent scheduling the pods they have anti-affinity to
// in some cases.
inverseTopologies map[uint64]*TopologyGroup
// The universe of domains by topology key
domains map[string]utilsets.String
// excludedPods are the pod UIDs of pods that are excluded from counting. This is used so we can simulate
// moving pods to prevent them from being double counted.
excludedPods utilsets.String
cluster *state.Cluster
}
func NewTopology(ctx context.Context, kubeClient client.Client, cluster *state.Cluster, domains map[string]utilsets.String, pods []*v1.Pod) (*Topology, error) {
t := &Topology{
kubeClient: kubeClient,
cluster: cluster,
domains: domains,
topologies: map[uint64]*TopologyGroup{},
inverseTopologies: map[uint64]*TopologyGroup{},
excludedPods: utilsets.NewString(),
}
// these are the pods that we intend to schedule, so if they are currently in the cluster we shouldn't count them for
// topology purposes
for _, p := range pods {
t.excludedPods.Insert(string(p.UID))
}
errs := t.updateInverseAffinities(ctx)
for i := range pods {
errs = multierr.Append(errs, t.Update(ctx, pods[i]))
}
if errs != nil {
return nil, errs
}
return t, nil
}
// Update unregisters the pod as the owner of all affinities and then creates any new topologies based on the pod spec
// registered the pod as the owner of all associated affinities, new or old. This allows Update() to be called after
// relaxation of a preference to properly break the topology <-> owner relationship so that the preferred topology will
// no longer influence scheduling.
func (t *Topology) Update(ctx context.Context, p *v1.Pod) error {
for _, topology := range t.topologies {
topology.RemoveOwner(p.UID)
}
if pod.HasPodAntiAffinity(p) {
if err := t.updateInverseAntiAffinity(ctx, p, nil); err != nil {
return fmt.Errorf("updating inverse anti-affinities, %w", err)
}
}
topologies := t.newForTopologies(p)
affinities, err := t.newForAffinities(ctx, p)
if err != nil {
return fmt.Errorf("updating affinities, %w", err)
}
for _, tg := range append(topologies, affinities...) {
hash := tg.Hash()
// Avoid recomputing topology counts if we've already seen this group
if existing, ok := t.topologies[hash]; !ok {
if err := t.countDomains(ctx, tg); err != nil {
return err
}
t.topologies[hash] = tg
} else {
tg = existing
}
tg.AddOwner(p.UID)
}
return nil
}
// Record records the topology changes given that pod p schedule on a node with the given requirements
func (t *Topology) Record(p *v1.Pod, requirements scheduling.Requirements) {
// once we've committed to a domain, we record the usage in every topology that cares about it
for _, tc := range t.topologies {
if tc.Counts(p, requirements) {
domains := requirements.Get(tc.Key)
if tc.Type == TopologyTypePodAntiAffinity {
// for anti-affinity topologies we need to block out all possible domains that the pod could land in
tc.Record(domains.Values()...)
} else {
// but for affinity & topology spread, we can only record the domain if we know the specific domain we land in
if domains.Len() == 1 {
tc.Record(domains.Values()[0])
}
}
}
}
// for anti-affinities, we record where the pods could be, even if
// requirements haven't collapsed to a single value.
for _, tc := range t.inverseTopologies {
if tc.IsOwnedBy(p.UID) {
tc.Record(requirements.Get(tc.Key).Values()...)
}
}
}
// AddRequirements tightens the input requirements by adding additional requirements that are being enforced by topology spreads
// affinities, anti-affinities or inverse anti-affinities. The nodeHostname is the hostname that we are currently considering
// placing the pod on. It returns these newly tightened requirements, or an error in the case of a set of requirements that
// cannot be satisfied.
func (t *Topology) AddRequirements(podRequirements, nodeRequirements scheduling.Requirements, p *v1.Pod) (scheduling.Requirements, error) {
requirements := scheduling.NewRequirements(nodeRequirements.Values()...)
for _, topology := range t.getMatchingTopologies(p, nodeRequirements) {
podDomains := scheduling.NewRequirement(topology.Key, v1.NodeSelectorOpExists)
if podRequirements.Has(topology.Key) {
podDomains = podRequirements.Get(topology.Key)
}
nodeDomains := scheduling.NewRequirement(topology.Key, v1.NodeSelectorOpExists)
if nodeRequirements.Has(topology.Key) {
nodeDomains = nodeRequirements.Get(topology.Key)
}
domains := topology.Get(p, podDomains, nodeDomains)
if domains.Len() == 0 {
return nil, fmt.Errorf("unsatisfiable topology constraint for %s, key=%s (counts = %v, podDomains = %v, nodeDomains = %v)", topology.Type, topology.Key, topology.domains, podDomains, nodeDomains)
}
requirements.Add(domains)
}
return requirements, nil
}
// Register is used to register a domain as available across topologies for the given topology key.
func (t *Topology) Register(topologyKey string, domain string) {
for _, topology := range t.topologies {
if topology.Key == topologyKey {
topology.Register(domain)
}
}
for _, topology := range t.inverseTopologies {
if topology.Key == topologyKey {
topology.Register(domain)
}
}
}
// updateInverseAffinities is used to identify pods with anti-affinity terms so we can track those topologies. We
// have to look at every pod in the cluster as there is no way to query for a pod with anti-affinity terms.
func (t *Topology) updateInverseAffinities(ctx context.Context) error {
var errs error
t.cluster.ForPodsWithAntiAffinity(func(pod *v1.Pod, node *v1.Node) bool {
// don't count the pod we are excluding
if t.excludedPods.Has(string(pod.UID)) {
return true
}
if err := t.updateInverseAntiAffinity(ctx, pod, node.Labels); err != nil {
errs = multierr.Append(errs, fmt.Errorf("tracking existing pod anti-affinity, %w", err))
}
return true
})
return errs
}
// updateInverseAntiAffinity is used to track topologies of inverse anti-affinities. Here the domains & counts track the
// pods with the anti-affinity.
func (t *Topology) updateInverseAntiAffinity(ctx context.Context, pod *v1.Pod, domains map[string]string) error {
// We intentionally don't track inverse anti-affinity preferences. We're not
// required to enforce them so it just adds complexity for very little
// value. The problem with them comes from the relaxation process, the pod
// we are relaxing is not the pod with the anti-affinity term.
for _, term := range pod.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution {
namespaces, err := t.buildNamespaceList(ctx, pod.Namespace, term.Namespaces, term.NamespaceSelector)
if err != nil {
return err
}
tg := NewTopologyGroup(TopologyTypePodAntiAffinity, term.TopologyKey, pod, namespaces, term.LabelSelector, math.MaxInt32, nil, t.domains[term.TopologyKey])
hash := tg.Hash()
if existing, ok := t.inverseTopologies[hash]; !ok {
t.inverseTopologies[hash] = tg
} else {
tg = existing
}
if domain, ok := domains[tg.Key]; ok {
tg.Record(domain)
}
tg.AddOwner(pod.UID)
}
return nil
}
// countDomains initializes the topology group by registereding any well known domains and performing pod counts
// against the cluster for any existing pods.
func (t *Topology) countDomains(ctx context.Context, tg *TopologyGroup) error {
podList := &v1.PodList{}
// collect the pods from all the specified namespaces (don't see a way to query multiple namespaces
// simultaneously)
var pods []v1.Pod
for _, ns := range tg.namespaces.UnsortedList() {
if err := t.kubeClient.List(ctx, podList, TopologyListOptions(ns, tg.selector)); err != nil {
return fmt.Errorf("listing pods, %w", err)
}
pods = append(pods, podList.Items...)
}
for i, p := range pods {
if IgnoredForTopology(&pods[i]) {
continue
}
// pod is excluded for counting purposes
if t.excludedPods.Has(string(p.UID)) {
continue
}
node := &v1.Node{}
if err := t.kubeClient.Get(ctx, types.NamespacedName{Name: p.Spec.NodeName}, node); err != nil {
return fmt.Errorf("getting node %s, %w", p.Spec.NodeName, err)
}
domain, ok := node.Labels[tg.Key]
// Kubelet sets the hostname label, but the node may not be ready yet so there is no label. We fall back and just
// treat the node name as the label. It probably is in most cases, but even if not we at least count the existence
// of the pods in some domain, even if not in the correct one. This is needed to handle the case of pods with
// self-affinity only fulfilling that affinity if all domains are empty.
if !ok && tg.Key == v1.LabelHostname {
domain = node.Name
ok = true
}
if !ok {
continue // Don't include pods if node doesn't contain domain https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#conventions
}
// nodes may or may not be considered for counting purposes for topology spread constraints depending on if they
// are selected by the pod's node selectors and required node affinities. If these are unset, the node always counts.
if !tg.nodeFilter.Matches(node) {
continue
}
tg.Record(domain)
}
return nil
}
func (t *Topology) newForTopologies(p *v1.Pod) []*TopologyGroup {
var topologyGroups []*TopologyGroup
for _, cs := range p.Spec.TopologySpreadConstraints {
topologyGroups = append(topologyGroups, NewTopologyGroup(TopologyTypeSpread, cs.TopologyKey, p, utilsets.NewString(p.Namespace), cs.LabelSelector, cs.MaxSkew, cs.MinDomains, t.domains[cs.TopologyKey]))
}
return topologyGroups
}
// newForAffinities returns a list of topology groups that have been constructed based on the input pod and required/preferred affinity terms
func (t *Topology) newForAffinities(ctx context.Context, p *v1.Pod) ([]*TopologyGroup, error) {
var topologyGroups []*TopologyGroup
// No affinity defined
if p.Spec.Affinity == nil {
return topologyGroups, nil
}
affinityTerms := map[TopologyType][]v1.PodAffinityTerm{}
// include both soft and hard affinity terms
if p.Spec.Affinity.PodAffinity != nil {
affinityTerms[TopologyTypePodAffinity] = append(affinityTerms[TopologyTypePodAffinity], p.Spec.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution...)
for _, term := range p.Spec.Affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
affinityTerms[TopologyTypePodAffinity] = append(affinityTerms[TopologyTypePodAffinity], term.PodAffinityTerm)
}
}
// include both soft and hard antiaffinity terms
if p.Spec.Affinity.PodAntiAffinity != nil {
affinityTerms[TopologyTypePodAntiAffinity] = append(affinityTerms[TopologyTypePodAntiAffinity], p.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution...)
for _, term := range p.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
affinityTerms[TopologyTypePodAntiAffinity] = append(affinityTerms[TopologyTypePodAntiAffinity], term.PodAffinityTerm)
}
}
// build topologies
for topologyType, terms := range affinityTerms {
for _, term := range terms {
namespaces, err := t.buildNamespaceList(ctx, p.Namespace, term.Namespaces, term.NamespaceSelector)
if err != nil {
return nil, err
}
topologyGroups = append(topologyGroups, NewTopologyGroup(topologyType, term.TopologyKey, p, namespaces, term.LabelSelector, math.MaxInt32, nil, t.domains[term.TopologyKey]))
}
}
return topologyGroups, nil
}
// buildNamespaceList constructs a unique list of namespaces consisting of the pod's namespace and the optional list of
// namespaces and those selected by the namespace selector
func (t *Topology) buildNamespaceList(ctx context.Context, namespace string, namespaces []string, selector *metav1.LabelSelector) (utilsets.String, error) {
if len(namespaces) == 0 && selector == nil {
return utilsets.NewString(namespace), nil
}
if selector == nil {
return utilsets.NewString(namespaces...), nil
}
var namespaceList v1.NamespaceList
labelSelector, err := metav1.LabelSelectorAsSelector(selector)
if err != nil {
return nil, fmt.Errorf("parsing selector, %w", err)
}
if err := t.kubeClient.List(ctx, &namespaceList, &client.ListOptions{LabelSelector: labelSelector}); err != nil {
return nil, fmt.Errorf("listing namespaces, %w", err)
}
selected := utilsets.NewString()
for _, namespace := range namespaceList.Items {
selected.Insert(namespace.Name)
}
selected.Insert(namespaces...)
return selected, nil
}
// getMatchingTopologies returns a sorted list of topologies that either control the scheduling of pod p, or for which
// the topology selects pod p and the scheduling of p affects the count per topology domain
func (t *Topology) getMatchingTopologies(p *v1.Pod, requirements scheduling.Requirements) []*TopologyGroup {
var matchingTopologies []*TopologyGroup
for _, tc := range t.topologies {
if tc.IsOwnedBy(p.UID) {
matchingTopologies = append(matchingTopologies, tc)
}
}
for _, tc := range t.inverseTopologies {
if tc.Counts(p, requirements) {
matchingTopologies = append(matchingTopologies, tc)
}
}
return matchingTopologies
}
func TopologyListOptions(namespace string, labelSelector *metav1.LabelSelector) *client.ListOptions {
selector := labels.Everything()
if labelSelector == nil {
return &client.ListOptions{Namespace: namespace, LabelSelector: selector}
}
for key, value := range labelSelector.MatchLabels {
requirement, err := labels.NewRequirement(key, selection.Equals, []string{value})
if err != nil {
return &client.ListOptions{Namespace: namespace, LabelSelector: labels.Nothing()}
}
selector = selector.Add(*requirement)
}
for _, expression := range labelSelector.MatchExpressions {
requirement, err := labels.NewRequirement(expression.Key, mapOperator(expression.Operator), expression.Values)
if err != nil {
return &client.ListOptions{Namespace: namespace, LabelSelector: labels.Nothing()}
}
selector = selector.Add(*requirement)
}
return &client.ListOptions{Namespace: namespace, LabelSelector: selector}
}
func mapOperator(operator metav1.LabelSelectorOperator) selection.Operator {
switch operator {
case metav1.LabelSelectorOpIn:
return selection.In
case metav1.LabelSelectorOpNotIn:
return selection.NotIn
case metav1.LabelSelectorOpExists:
return selection.Exists
case metav1.LabelSelectorOpDoesNotExist:
return selection.DoesNotExist
}
// this shouldn't occur as we cover all valid cases of LabelSelectorOperator that the API allows. If it still
// does occur somehow we'll panic just later when the requirement throws an error.,
return ""
}
func IgnoredForTopology(p *v1.Pod) bool {
return !pod.IsScheduled(p) || pod.IsTerminal(p) || pod.IsTerminating(p)
}
| 407 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"fmt"
"math"
"github.com/mitchellh/hashstructure/v2"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
utilsets "k8s.io/apimachinery/pkg/util/sets"
"github.com/aws/karpenter-core/pkg/scheduling"
)
type TopologyType byte
const (
TopologyTypeSpread TopologyType = iota
TopologyTypePodAffinity
TopologyTypePodAntiAffinity
)
func (t TopologyType) String() string {
switch t {
case TopologyTypeSpread:
return "topology spread"
case TopologyTypePodAffinity:
return "pod affinity"
case TopologyTypePodAntiAffinity:
return "pod anti-affinity"
}
return ""
}
// TopologyGroup is used to track pod counts that match a selector by the topology domain (e.g. SELECT COUNT(*) FROM pods GROUP BY(topology_ke
type TopologyGroup struct {
// Hashed Fields
Key string
Type TopologyType
maxSkew int32
minDomains *int32
namespaces utilsets.String
selector *metav1.LabelSelector
nodeFilter TopologyNodeFilter
// Index
owners map[types.UID]struct{} // Pods that have this topology as a scheduling rule
domains map[string]int32 // TODO(ellistarn) explore replacing with a minheap
}
func NewTopologyGroup(topologyType TopologyType, topologyKey string, pod *v1.Pod, namespaces utilsets.String, labelSelector *metav1.LabelSelector, maxSkew int32, minDomains *int32, domains utilsets.String) *TopologyGroup {
domainCounts := map[string]int32{}
for domain := range domains {
domainCounts[domain] = 0
}
// the nil *TopologyNodeFilter always passes which is what we need for affinity/anti-affinity
var nodeSelector TopologyNodeFilter
if topologyType == TopologyTypeSpread {
nodeSelector = MakeTopologyNodeFilter(pod)
}
return &TopologyGroup{
Type: topologyType,
Key: topologyKey,
namespaces: namespaces,
selector: labelSelector,
nodeFilter: nodeSelector,
maxSkew: maxSkew,
domains: domainCounts,
owners: map[types.UID]struct{}{},
minDomains: minDomains,
}
}
func (t *TopologyGroup) Get(pod *v1.Pod, podDomains, nodeDomains *scheduling.Requirement) *scheduling.Requirement {
switch t.Type {
case TopologyTypeSpread:
return t.nextDomainTopologySpread(pod, podDomains, nodeDomains)
case TopologyTypePodAffinity:
return t.nextDomainAffinity(pod, podDomains, nodeDomains)
case TopologyTypePodAntiAffinity:
return t.nextDomainAntiAffinity(podDomains)
default:
panic(fmt.Sprintf("Unrecognized topology group type: %s", t.Type))
}
}
func (t *TopologyGroup) Record(domains ...string) {
for _, domain := range domains {
t.domains[domain]++
}
}
// Counts returns true if the pod would count for the topology, given that it schedule to a node with the provided
// requirements
func (t *TopologyGroup) Counts(pod *v1.Pod, requirements scheduling.Requirements) bool {
return t.selects(pod) && t.nodeFilter.MatchesRequirements(requirements)
}
// Register ensures that the topology is aware of the given domain names.
func (t *TopologyGroup) Register(domains ...string) {
for _, domain := range domains {
if _, ok := t.domains[domain]; !ok {
t.domains[domain] = 0
}
}
}
func (t *TopologyGroup) AddOwner(key types.UID) {
t.owners[key] = struct{}{}
}
func (t *TopologyGroup) RemoveOwner(key types.UID) {
delete(t.owners, key)
}
func (t *TopologyGroup) IsOwnedBy(key types.UID) bool {
_, ok := t.owners[key]
return ok
}
// Hash is used so we can track single topologies that affect multiple groups of pods. If a deployment has 100x pods
// with self anti-affinity, we track that as a single topology with 100 owners instead of 100x topologies.
func (t *TopologyGroup) Hash() uint64 {
return lo.Must(hashstructure.Hash(struct {
TopologyKey string
Type TopologyType
Namespaces utilsets.String
LabelSelector *metav1.LabelSelector
MaxSkew int32
NodeFilter TopologyNodeFilter
}{
TopologyKey: t.Key,
Type: t.Type,
Namespaces: t.namespaces,
LabelSelector: t.selector,
MaxSkew: t.maxSkew,
NodeFilter: t.nodeFilter,
}, hashstructure.FormatV2, &hashstructure.HashOptions{SlicesAsSets: true}))
}
// nextDomainTopologySpread returns a scheduling.Requirement that includes a node domain that a pod should be scheduled to.
// If there are multiple eligible domains, we return any random domain that satisfies the `maxSkew` configuration.
// If there are no eligible domains, we return a `DoesNotExist` requirement, implying that we could not satisfy the topologySpread requirement.
func (t *TopologyGroup) nextDomainTopologySpread(pod *v1.Pod, podDomains, nodeDomains *scheduling.Requirement) *scheduling.Requirement {
// min count is calculated across all domains
min := t.domainMinCount(podDomains)
selfSelecting := t.selects(pod)
minDomain := ""
minCount := int32(math.MaxInt32)
for domain := range t.domains {
// but we can only choose from the node domains
if nodeDomains.Has(domain) {
// comment from kube-scheduler regarding the viable choices to schedule to based on skew is:
// 'existing matching num' + 'if self-match (1 or 0)' - 'global min matching num' <= 'maxSkew'
count := t.domains[domain]
if selfSelecting {
count++
}
if count-min <= t.maxSkew && count < minCount {
minDomain = domain
minCount = count
}
}
}
if minDomain == "" {
// avoids an error message about 'zone in [""]', preferring 'zone in []'
return scheduling.NewRequirement(podDomains.Key, v1.NodeSelectorOpDoesNotExist)
}
return scheduling.NewRequirement(podDomains.Key, v1.NodeSelectorOpIn, minDomain)
}
func (t *TopologyGroup) domainMinCount(domains *scheduling.Requirement) int32 {
// hostname based topologies always have a min pod count of zero since we can create one
if t.Key == v1.LabelHostname {
return 0
}
min := int32(math.MaxInt32)
var numPodSupportedDomains int32
// determine our current min count
for domain, count := range t.domains {
if domains.Has(domain) {
numPodSupportedDomains++
if count < min {
min = count
}
}
}
if t.minDomains != nil && numPodSupportedDomains < *t.minDomains {
min = 0
}
return min
}
func (t *TopologyGroup) nextDomainAffinity(pod *v1.Pod, podDomains *scheduling.Requirement, nodeDomains *scheduling.Requirement) *scheduling.Requirement {
options := scheduling.NewRequirement(podDomains.Key, v1.NodeSelectorOpDoesNotExist)
for domain := range t.domains {
if podDomains.Has(domain) && t.domains[domain] > 0 {
options.Insert(domain)
}
}
// If pod is self selecting and no pod has been scheduled yet, we can pick a domain at random to bootstrap scheduling
if options.Len() == 0 && t.selects(pod) {
// First try to find a domain that is within the intersection of pod/node domains. In the case of an in-flight node
// this causes us to pick the domain that the existing in-flight node is already in if possible instead of picking
// a random viable domain.
intersected := podDomains.Intersection(nodeDomains)
for domain := range t.domains {
if intersected.Has(domain) {
options.Insert(domain)
break
}
}
// and if there are no node domains, just return the first random domain that is viable
for domain := range t.domains {
if podDomains.Has(domain) {
options.Insert(domain)
break
}
}
}
return options
}
func (t *TopologyGroup) nextDomainAntiAffinity(domains *scheduling.Requirement) *scheduling.Requirement {
options := scheduling.NewRequirement(domains.Key, v1.NodeSelectorOpDoesNotExist)
for domain := range t.domains {
if domains.Has(domain) && t.domains[domain] == 0 {
options.Insert(domain)
}
}
return options
}
// selects returns true if the given pod is selected by this topology
func (t *TopologyGroup) selects(pod *v1.Pod) bool {
selector, err := metav1.LabelSelectorAsSelector(t.selector)
if err != nil {
selector = labels.Nothing()
}
return t.namespaces.Has(pod.Namespace) && selector.Matches(labels.Set(pod.Labels))
}
| 263 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
v1 "k8s.io/api/core/v1"
"github.com/aws/karpenter-core/pkg/scheduling"
)
// TopologyNodeFilter is used to determine if a given actual node or scheduling node matches the pod's node selectors
// and required node affinity terms. This is used with topology spread constraints to determine if the node should be
// included for topology counting purposes. This is only used with topology spread constraints as affinities/anti-affinities
// always count across all nodes. A nil or zero-value TopologyNodeFilter behaves well and the filter returns true for
// all nodes.
type TopologyNodeFilter []scheduling.Requirements
func MakeTopologyNodeFilter(p *v1.Pod) TopologyNodeFilter {
nodeSelectorRequirements := scheduling.NewLabelRequirements(p.Spec.NodeSelector)
// if we only have a label selector, that's the only requirement that must match
if p.Spec.Affinity == nil || p.Spec.Affinity.NodeAffinity == nil || p.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
return TopologyNodeFilter{nodeSelectorRequirements}
}
// otherwise, we need to match the combination of label selector and any term of the required node affinities since
// those terms are OR'd together
var filter TopologyNodeFilter
for _, term := range p.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms {
requirements := scheduling.NewRequirements()
requirements.Add(nodeSelectorRequirements.Values()...)
requirements.Add(scheduling.NewNodeSelectorRequirements(term.MatchExpressions...).Values()...)
filter = append(filter, requirements)
}
return filter
}
// Matches returns true if the TopologyNodeFilter doesn't prohibit node from the participating in the topology
func (t TopologyNodeFilter) Matches(node *v1.Node) bool {
return t.MatchesRequirements(scheduling.NewLabelRequirements(node.Labels))
}
// MatchesRequirements returns true if the TopologyNodeFilter doesn't prohibit a node with the requirements from
// participating in the topology. This method allows checking the requirements from a scheduling.Machine to see if the
// node we will soon create participates in this topology.
func (t TopologyNodeFilter) MatchesRequirements(requirements scheduling.Requirements) bool {
// no requirements, so it always matches
if len(t) == 0 {
return true
}
// these are an OR, so if any passes the filter passes
for _, req := range t {
if err := requirements.Compatible(req); err == nil {
return true
}
}
return false
}
| 71 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling_test
import (
"context"
"time"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/controller-runtime/pkg/client"
. "github.com/onsi/ginkgo/v2"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider/fake"
"github.com/aws/karpenter-core/pkg/test"
. "github.com/aws/karpenter-core/pkg/test/expectations"
)
var _ = Describe("Topology", func() {
labels := map[string]string{"test": "test"}
It("should ignore unknown topology keys", func() {
ExpectApplied(ctx, env.Client, provisioner)
pods := []*v1.Pod{
test.UnschedulablePod(
test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: []v1.TopologySpreadConstraint{{
TopologyKey: "unknown",
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}},
),
test.UnschedulablePod(),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
ExpectNotScheduled(ctx, env.Client, pods[0])
ExpectScheduled(ctx, env.Client, pods[1])
})
It("should not spread an invalid label selector", func() {
if env.Version.Minor() >= 24 {
Skip("Invalid label selector now is denied by admission in K8s >= 1.27.x")
}
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app.kubernetes.io/name": "{{ zqfmgb }}"}},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 2)...)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(2))
})
Context("Zonal", func() {
It("should balance pods across zones (match labels)", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 4)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 1, 2))
})
It("should balance pods across zones (match expressions)", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "test",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"test"},
},
},
},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 4)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 1, 2))
})
It("should respect provisioner zonal constraints", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3"}}}
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 4)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 1, 2))
})
It("should respect provisioner zonal constraints (subset)", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2"}}}
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 4)...,
)
// should spread the two pods evenly across the only valid zones in our universe (the two zones from our single provisioner)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(2, 2))
})
It("should respect provisioner zonal constraints (existing pod)", func() {
ExpectApplied(ctx, env.Client, provisioner)
// need enough resource requests that the first node we create fills a node and can't act as an in-flight
// node for the other pods
rr := v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1.1"),
},
}
pod := test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels},
ResourceRequirements: rr,
NodeSelector: map[string]string{
v1.LabelTopologyZone: "test-zone-3",
},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectScheduled(ctx, env.Client, pod)
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2"}}}
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, ResourceRequirements: rr, TopologySpreadConstraints: topology}, 6)...,
)
// we should have unschedulable pods now, the provisioner can only schedule to zone-1/zone-2, but because of the existing
// pod in zone-3 it can put a max of two per zone before it would violate max skew
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 2, 2))
})
It("should schedule to the non-minimum domain if its all that's available", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 5,
}}
rr := v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1.1"),
},
}
// force this pod onto zone-1
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels},
ResourceRequirements: rr, TopologySpreadConstraints: topology}))
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1))
// force this pod onto zone-2
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2"}}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels},
ResourceRequirements: rr, TopologySpreadConstraints: topology}))
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 1))
// now only allow scheduling pods on zone-3
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-3"}}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels},
ResourceRequirements: rr, TopologySpreadConstraints: topology}, 10)...,
)
// max skew of 5, so test-zone-1/2 will have 1 pod each, test-zone-3 will have 6, and the rest will fail to schedule
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 1, 6))
})
It("should only schedule to minimum domains if already violating max skew", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
rr := v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1.1"),
},
}
createPods := func(count int) []*v1.Pod {
var pods []*v1.Pod
for i := 0; i < count; i++ {
pods = append(pods, test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels},
ResourceRequirements: rr, TopologySpreadConstraints: topology}))
}
return pods
}
// Spread 9 pods
ExpectApplied(ctx, env.Client, provisioner)
pods := createPods(9)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(3, 3, 3))
// Delete pods to create a skew
for _, pod := range pods {
node := ExpectScheduled(ctx, env.Client, pod)
if node.Labels[v1.LabelTopologyZone] != "test-zone-1" {
ExpectDeleted(ctx, env.Client, pod)
}
}
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(3))
// Create 3 more pods, skew should recover
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, createPods(3)...)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(3, 1, 2))
})
It("should not violate max-skew when unsat = do not schedule", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
rr := v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1.1"),
},
}
// force this pod onto zone-1
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels},
ResourceRequirements: rr, TopologySpreadConstraints: topology}))
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1))
// now only allow scheduling pods on zone-2 and zone-3
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2", "test-zone-3"}}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels},
ResourceRequirements: rr, TopologySpreadConstraints: topology}, 10)...,
)
// max skew of 1, so test-zone-2/3 will have 2 nodes each and the rest of the pods will fail to schedule
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 2, 2))
})
It("should not violate max-skew when unsat = do not schedule (discover domains)", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
rr := v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1.1"),
},
}
// force this pod onto zone-1
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, ResourceRequirements: rr}))
// now only allow scheduling pods on zone-2 and zone-3
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2", "test-zone-3"}}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels},
TopologySpreadConstraints: topology, ResourceRequirements: rr}, 10)...,
)
// max skew of 1, so test-zone-2/3 will have 2 nodes each and the rest of the pods will fail to schedule since
// test-zone-1 has 1 pods in it.
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 2, 2))
})
It("should only count running/scheduled pods with matching labels scheduled to nodes with a corresponding domain", func() {
wrongNamespace := test.RandomName()
firstNode := test.Node(test.NodeOptions{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelTopologyZone: "test-zone-1"}}})
secondNode := test.Node(test.NodeOptions{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelTopologyZone: "test-zone-2"}}})
thirdNode := test.Node(test.NodeOptions{}) // missing topology domain
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, provisioner, firstNode, secondNode, thirdNode, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: wrongNamespace}})
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(firstNode))
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(secondNode))
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(thirdNode))
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.Pod(test.PodOptions{NodeName: firstNode.Name}), // ignored, missing labels
test.Pod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}}), // ignored, pending
test.Pod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, NodeName: thirdNode.Name}), // ignored, no domain on node
test.Pod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels, Namespace: wrongNamespace}, NodeName: firstNode.Name}), // ignored, wrong namespace
test.Pod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels, DeletionTimestamp: &metav1.Time{Time: time.Now().Add(10 * time.Second)}}}), // ignored, terminating
test.Pod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, NodeName: firstNode.Name, Phase: v1.PodFailed}), // ignored, phase=Failed
test.Pod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, NodeName: firstNode.Name, Phase: v1.PodSucceeded}), // ignored, phase=Succeeded
test.Pod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, NodeName: firstNode.Name}),
test.Pod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, NodeName: firstNode.Name}),
test.Pod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, NodeName: secondNode.Name}),
test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}),
test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}),
)
nodes := v1.NodeList{}
Expect(env.Client.List(ctx, &nodes)).To(Succeed())
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(2, 2, 1))
})
It("should match all pods when labelSelector is not specified", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePod(),
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1))
})
It("should handle interdependent selectors", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelHostname,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, provisioner)
pods := test.UnschedulablePods(test.PodOptions{TopologySpreadConstraints: topology}, 5)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
pods...,
)
// This is weird, but the topology label selector is used for determining domain counts. The pod that
// owns the topology is what the spread actually applies to. In this test case, there are no pods matching
// the label selector, so the max skew is zero. This means we can pack all the pods onto the same node since
// it doesn't violate the topology spread constraint (i.e. adding new pods doesn't increase skew since the
// pods we are adding don't count toward skew). This behavior is called out at
// https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ , though it's not
// recommended for users.
nodeNames := sets.NewString()
for _, p := range pods {
nodeNames.Insert(p.Spec.NodeName)
}
Expect(nodeNames).To(HaveLen(1))
})
It("should respect minDomains constraints", func() {
if env.Version.Minor() < 24 {
Skip("MinDomains TopologySpreadConstraint is only available starting in K8s >= 1.24.x")
}
var minDomains int32 = 3
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2"}}}
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
MinDomains: &minDomains,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 3)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 1))
})
It("satisfied minDomains constraints (equal) should allow expected pod scheduling", func() {
if env.Version.Minor() < 24 {
Skip("MinDomains TopologySpreadConstraint is only available starting in K8s >= 1.24.x")
}
var minDomains int32 = 3
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3"}}}
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
MinDomains: &minDomains,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 11)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(4, 4, 3))
})
It("satisfied minDomains constraints (greater than minimum) should allow expected pod scheduling", func() {
if env.Version.Minor() < 24 {
Skip("MinDomains TopologySpreadConstraint is only available starting in K8s >= 1.24.x")
}
var minDomains int32 = 2
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3"}}}
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
MinDomains: &minDomains,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 11)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(4, 4, 3))
})
})
Context("Hostname", func() {
It("should balance pods across nodes", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelHostname,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 4)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 1, 1, 1))
})
It("should balance pods on the same hostname up to maxskew", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelHostname,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 4,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 4)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(4))
})
It("balance multiple deployments with hostname topology spread", func() {
// Issue #1425
spreadPod := func(appName string) test.PodOptions {
return test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": appName,
},
},
TopologySpreadConstraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: v1.LabelHostname,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": appName},
},
},
},
}
}
ExpectApplied(ctx, env.Client, provisioner)
pods := []*v1.Pod{
test.UnschedulablePod(spreadPod("app1")), test.UnschedulablePod(spreadPod("app1")),
test.UnschedulablePod(spreadPod("app2")), test.UnschedulablePod(spreadPod("app2")),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
for _, p := range pods {
ExpectScheduled(ctx, env.Client, p)
}
nodes := v1.NodeList{}
Expect(env.Client.List(ctx, &nodes)).To(Succeed())
// this wasn't part of #1425, but ensures that we launch the minimum number of nodes
Expect(nodes.Items).To(HaveLen(2))
})
It("balance multiple deployments with hostname topology spread & varying arch", func() {
// Issue #1425
spreadPod := func(appName, arch string) test.PodOptions {
return test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": appName,
},
},
NodeRequirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelArchStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{arch},
},
},
TopologySpreadConstraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: v1.LabelHostname,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": appName},
},
},
},
}
}
ExpectApplied(ctx, env.Client, provisioner)
pods := []*v1.Pod{
test.UnschedulablePod(spreadPod("app1", v1alpha5.ArchitectureAmd64)), test.UnschedulablePod(spreadPod("app1", v1alpha5.ArchitectureAmd64)),
test.UnschedulablePod(spreadPod("app2", v1alpha5.ArchitectureArm64)), test.UnschedulablePod(spreadPod("app2", v1alpha5.ArchitectureArm64)),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
for _, p := range pods {
ExpectScheduled(ctx, env.Client, p)
}
nodes := v1.NodeList{}
Expect(env.Client.List(ctx, &nodes)).To(Succeed())
// same test as the previous one, but now the architectures are different so we need four nodes in total
Expect(nodes.Items).To(HaveLen(4))
})
})
Context("CapacityType", func() {
It("should balance pods across capacity types", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1alpha5.LabelCapacityType,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 4)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(2, 2))
})
It("should respect provisioner capacity type constraints", func() {
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1alpha5.LabelCapacityType, Operator: v1.NodeSelectorOpIn, Values: []string{v1alpha5.CapacityTypeSpot, v1alpha5.CapacityTypeOnDemand}}}
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1alpha5.LabelCapacityType,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 4)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(2, 2))
})
It("should not violate max-skew when unsat = do not schedule (capacity type)", func() {
// this test can pass in a flaky manner if we don't restrict our min domain selection to valid choices
// per the provisioner spec
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1alpha5.LabelCapacityType,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
rr := v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1.1"),
},
}
// force this pod onto spot
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1alpha5.LabelCapacityType, Operator: v1.NodeSelectorOpIn, Values: []string{v1alpha5.CapacityTypeSpot}}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels},
ResourceRequirements: rr, TopologySpreadConstraints: topology}))
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1))
// now only allow scheduling pods on on-demand
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1alpha5.LabelCapacityType, Operator: v1.NodeSelectorOpIn, Values: []string{v1alpha5.CapacityTypeOnDemand}}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels},
ResourceRequirements: rr, TopologySpreadConstraints: topology}, 5)...,
)
// max skew of 1, so on-demand will have 2 pods and the rest of the pods will fail to schedule
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 2))
})
It("should violate max-skew when unsat = schedule anyway (capacity type)", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1alpha5.LabelCapacityType,
WhenUnsatisfiable: v1.ScheduleAnyway,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
rr := v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1.1"),
},
}
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1alpha5.LabelCapacityType, Operator: v1.NodeSelectorOpIn, Values: []string{v1alpha5.CapacityTypeSpot}}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels},
ResourceRequirements: rr, TopologySpreadConstraints: topology}))
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1))
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1alpha5.LabelCapacityType, Operator: v1.NodeSelectorOpIn, Values: []string{v1alpha5.CapacityTypeOnDemand}}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels},
ResourceRequirements: rr, TopologySpreadConstraints: topology}, 5)...,
)
// max skew of 1, on-demand will end up with 5 pods even though spot has a single pod
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 5))
})
It("should only count running/scheduled pods with matching labels scheduled to nodes with a corresponding domain", func() {
wrongNamespace := test.RandomName()
firstNode := test.Node(test.NodeOptions{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1alpha5.LabelCapacityType: v1alpha5.CapacityTypeSpot}}})
secondNode := test.Node(test.NodeOptions{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1alpha5.LabelCapacityType: v1alpha5.CapacityTypeOnDemand}}})
thirdNode := test.Node(test.NodeOptions{}) // missing topology capacity type
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1alpha5.LabelCapacityType,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, provisioner, firstNode, secondNode, thirdNode, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: wrongNamespace}})
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(firstNode))
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(secondNode))
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(thirdNode))
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.Pod(test.PodOptions{NodeName: firstNode.Name}), // ignored, missing labels
test.Pod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}}), // ignored, pending
test.Pod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, NodeName: thirdNode.Name}), // ignored, no domain on node
test.Pod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels, Namespace: wrongNamespace}, NodeName: firstNode.Name}), // ignored, wrong namespace
test.Pod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels, DeletionTimestamp: &metav1.Time{Time: time.Now().Add(10 * time.Second)}}}), // ignored, terminating
test.Pod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, NodeName: firstNode.Name, Phase: v1.PodFailed}), // ignored, phase=Failed
test.Pod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, NodeName: firstNode.Name, Phase: v1.PodSucceeded}), // ignored, phase=Succeeded
test.Pod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, NodeName: firstNode.Name}),
test.Pod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, NodeName: firstNode.Name}),
test.Pod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, NodeName: secondNode.Name}),
test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}),
test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}),
)
nodes := v1.NodeList{}
Expect(env.Client.List(ctx, &nodes)).To(Succeed())
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(2, 3))
})
It("should match all pods when labelSelector is not specified", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1alpha5.LabelCapacityType,
WhenUnsatisfiable: v1.DoNotSchedule,
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePod(),
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1))
})
It("should handle interdependent selectors", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelHostname,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, provisioner)
pods := test.UnschedulablePods(test.PodOptions{TopologySpreadConstraints: topology}, 5)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
// This is weird, but the topology label selector is used for determining domain counts. The pod that
// owns the topology is what the spread actually applies to. In this test case, there are no pods matching
// the label selector, so the max skew is zero. This means we can pack all the pods onto the same node since
// it doesn't violate the topology spread constraint (i.e. adding new pods doesn't increase skew since the
// pods we are adding don't count toward skew). This behavior is called out at
// https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ , though it's not
// recommended for users.
nodeNames := sets.NewString()
for _, p := range pods {
nodeNames.Insert(p.Spec.NodeName)
}
Expect(nodeNames).To(HaveLen(1))
})
It("should balance pods across capacity-types (node required affinity constrained)", func() {
ExpectApplied(ctx, env.Client, provisioner)
pods := test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
NodeRequirements: []v1.NodeSelectorRequirement{
// launch this on-demand pod in zone-1
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}},
{Key: v1alpha5.LabelCapacityType, Operator: v1.NodeSelectorOpIn, Values: []string{"on-demand"}},
},
}, 1)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
ExpectScheduled(ctx, env.Client, pods[0])
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1alpha5.LabelCapacityType,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
// Try to run 5 pods, with a node selector restricted to test-zone-2, they should all schedule on the same
// spot node. This doesn't violate the max-skew of 1 as the node selector requirement here excludes the
// existing on-demand pod from counting within this topology.
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
// limit our provisioner to only creating spot nodes
NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2"}},
{Key: v1alpha5.LabelCapacityType, Operator: v1.NodeSelectorOpIn, Values: []string{"spot"}},
},
TopologySpreadConstraints: topology,
}, 5)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 5))
})
It("should balance pods across capacity-types (no constraints)", func() {
rr := v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")},
}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
NodeSelector: map[string]string{v1.LabelInstanceTypeStable: "single-pod-instance-type"},
NodeRequirements: []v1.NodeSelectorRequirement{
{
Key: v1alpha5.LabelCapacityType,
Operator: v1.NodeSelectorOpIn,
Values: []string{"on-demand"},
},
},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectScheduled(ctx, env.Client, pod)
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1alpha5.LabelCapacityType,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
// limit our provisioner to only creating spot nodes
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1alpha5.LabelCapacityType, Operator: v1.NodeSelectorOpIn, Values: []string{"spot"}},
}
// since there is no node selector on this pod, the topology can see the single on-demand node that already
// exists and that limits us to scheduling 2 more spot pods before we would violate max-skew
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
ResourceRequirements: rr,
TopologySpreadConstraints: topology,
}, 5)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 2))
})
It("should balance pods across arch (no constraints)", func() {
rr := v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")},
}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
NodeSelector: map[string]string{v1.LabelInstanceTypeStable: "single-pod-instance-type"},
NodeRequirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelArchStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{"amd64"},
},
},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectScheduled(ctx, env.Client, pod)
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelArchStable,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
// limit our provisioner to only creating arm64 nodes
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelArchStable, Operator: v1.NodeSelectorOpIn, Values: []string{"arm64"}}}
// since there is no node selector on this pod, the topology can see the single arm64 node that already
// exists and that limits us to scheduling 2 more spot pods before we would violate max-skew
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
ResourceRequirements: rr,
TopologySpreadConstraints: topology,
}, 5)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 2))
})
})
Context("Combined Hostname and Zonal Topology", func() {
It("should spread pods while respecting both constraints (hostname and zonal)", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}, {
TopologyKey: v1.LabelHostname,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 3,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 2)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 1))
ExpectSkew(ctx, env.Client, "default", &topology[1]).ToNot(ContainElements(BeNumerically(">", 3)))
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 3)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(2, 2, 1))
ExpectSkew(ctx, env.Client, "default", &topology[1]).ToNot(ContainElements(BeNumerically(">", 3)))
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 5)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(4, 3, 3))
ExpectSkew(ctx, env.Client, "default", &topology[1]).ToNot(ContainElements(BeNumerically(">", 3)))
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 11)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(7, 7, 7))
ExpectSkew(ctx, env.Client, "default", &topology[1]).ToNot(ContainElements(BeNumerically(">", 3)))
})
It("should balance pods across provisioner requirements", func() {
spotProv := test.Provisioner(test.ProvisionerOptions{
Requirements: []v1.NodeSelectorRequirement{
{
Key: v1alpha5.LabelCapacityType,
Operator: v1.NodeSelectorOpIn,
Values: []string{"spot"},
},
{
Key: "capacity.spread.4-1",
Operator: v1.NodeSelectorOpIn,
Values: []string{"2", "3", "4", "5"},
},
},
})
onDemandProv := test.Provisioner(test.ProvisionerOptions{
Requirements: []v1.NodeSelectorRequirement{
{
Key: v1alpha5.LabelCapacityType,
Operator: v1.NodeSelectorOpIn,
Values: []string{"on-demand"},
},
{
Key: "capacity.spread.4-1",
Operator: v1.NodeSelectorOpIn,
Values: []string{"1"},
},
},
})
topology := []v1.TopologySpreadConstraint{{
TopologyKey: "capacity.spread.4-1",
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, spotProv, onDemandProv)
pods := test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
TopologySpreadConstraints: topology,
}, 20)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
for _, p := range pods {
ExpectScheduled(ctx, env.Client, p)
}
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(4, 4, 4, 4, 4))
// due to the spread across provisioners, we've forced a 4:1 spot to on-demand spread
ExpectSkew(ctx, env.Client, "default", &v1.TopologySpreadConstraint{
TopologyKey: v1alpha5.LabelCapacityType,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}).To(ConsistOf(4, 16))
})
It("should spread pods while respecting both constraints", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}, {
TopologyKey: v1.LabelHostname,
WhenUnsatisfiable: v1.ScheduleAnyway,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2"}}}
// create a second provisioner that can't provision at all
provisionerB := test.Provisioner()
provisionerB.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-3"}}}
provisionerB.Spec.Limits = &v1alpha5.Limits{
Resources: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("0"),
},
}
ExpectApplied(ctx, env.Client, provisioner, provisionerB)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 10)...,
)
// should get one pod per zone, can't schedule to test-zone-3 since that provisioner is effectively disabled
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 1))
// and one pod per node
ExpectSkew(ctx, env.Client, "default", &topology[1]).To(ConsistOf(1, 1))
})
It("should spread pods while respecting both constraints", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1alpha5.LabelCapacityType,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}, {
TopologyKey: v1.LabelHostname,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 3,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 2)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(1, 1))
ExpectSkew(ctx, env.Client, "default", &topology[1]).ToNot(ContainElements(BeNumerically(">", 3)))
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 3)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(3, 2))
ExpectSkew(ctx, env.Client, "default", &topology[1]).ToNot(ContainElements(BeNumerically(">", 3)))
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 5)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(5, 5))
ExpectSkew(ctx, env.Client, "default", &topology[1]).ToNot(ContainElements(BeNumerically(">", 3)))
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 11)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(11, 10))
ExpectSkew(ctx, env.Client, "default", &topology[1]).ToNot(ContainElements(BeNumerically(">", 3)))
})
})
Context("Combined Zonal and Capacity Type Topology", func() {
It("should spread pods while respecting both constraints", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1alpha5.LabelCapacityType,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}, {
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 2)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).ToNot(ContainElements(BeNumerically(">", 1)))
ExpectSkew(ctx, env.Client, "default", &topology[1]).ToNot(ContainElements(BeNumerically(">", 1)))
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 3)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).ToNot(ContainElements(BeNumerically(">", 3)))
ExpectSkew(ctx, env.Client, "default", &topology[1]).ToNot(ContainElements(BeNumerically(">", 2)))
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 3)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).ToNot(ContainElements(BeNumerically(">", 5)))
ExpectSkew(ctx, env.Client, "default", &topology[1]).ToNot(ContainElements(BeNumerically(">", 4)))
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, 11)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).ToNot(ContainElements(BeNumerically(">", 11)))
ExpectSkew(ctx, env.Client, "default", &topology[1]).ToNot(ContainElements(BeNumerically(">", 7)))
})
})
Context("Combined Hostname, Zonal, and Capacity Type Topology", func() {
It("should spread pods while respecting all constraints", func() {
// ensure we've got an instance type for every zone/capacity-type pair
cloudProvider.InstanceTypes = fake.InstanceTypesAssorted()
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1alpha5.LabelCapacityType,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}, {
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 2,
}, {
TopologyKey: v1.LabelHostname,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 3,
}}
// add varying numbers of pods, checking after each scheduling to ensure that our max required max skew
// has not been violated for each constraint
for i := 1; i < 15; i++ {
pods := test.UnschedulablePods(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: labels}, TopologySpreadConstraints: topology}, i)
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
ExpectMaxSkew(ctx, env.Client, "default", &topology[0]).To(BeNumerically("<=", 1))
ExpectMaxSkew(ctx, env.Client, "default", &topology[1]).To(BeNumerically("<=", 2))
ExpectMaxSkew(ctx, env.Client, "default", &topology[2]).To(BeNumerically("<=", 3))
for _, pod := range pods {
ExpectScheduled(ctx, env.Client, pod)
}
}
})
})
// https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#interaction-with-node-affinity-and-node-selectors
Context("Combined Zonal Topology and Machine Affinity", func() {
It("should limit spread options by nodeSelector", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
append(
test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
TopologySpreadConstraints: topology,
NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-1"},
}, 5),
test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
TopologySpreadConstraints: topology,
NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-2"},
}, 10)...,
)...,
)
// we limit the zones of each pod via node selectors, which causes the topology spreads to only consider
// the single zone as the only valid domain for the topology spread allowing us to schedule multiple pods per domain
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(5, 10))
})
It("should limit spread options by node requirements", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
TopologySpreadConstraints: topology,
NodeRequirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-1", "test-zone-2"},
},
},
}, 10)...)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(5, 5))
})
It("should limit spread options by node affinity", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
TopologySpreadConstraints: topology,
NodeRequirements: []v1.NodeSelectorRequirement{{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{
"test-zone-1", "test-zone-2",
}}},
}, 6)...)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(3, 3))
// open the provisioner back to up so it can see all zones again
provisioner.Spec.Requirements = []v1.NodeSelectorRequirement{
{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1", "test-zone-2", "test-zone-3"}}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, test.UnschedulablePod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
TopologySpreadConstraints: topology,
NodeRequirements: []v1.NodeSelectorRequirement{{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{
"test-zone-2", "test-zone-3",
}}},
}))
// it will schedule on the currently empty zone-3 even though max-skew is violated as it improves max-skew
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(3, 3, 1))
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
TopologySpreadConstraints: topology,
}, 5)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(4, 4, 4))
})
})
// https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#interaction-with-node-affinity-and-node-selectors
Context("Combined Capacity Type Topology and Machine Affinity", func() {
It("should limit spread options by nodeSelector", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1alpha5.LabelCapacityType,
WhenUnsatisfiable: v1.ScheduleAnyway,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
append(
test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
TopologySpreadConstraints: topology,
NodeSelector: map[string]string{v1alpha5.LabelCapacityType: v1alpha5.CapacityTypeSpot},
}, 5),
test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
TopologySpreadConstraints: topology,
NodeSelector: map[string]string{v1alpha5.LabelCapacityType: v1alpha5.CapacityTypeOnDemand},
}, 5)...,
)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(5, 5))
})
It("should limit spread options by node affinity (capacity type)", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1alpha5.LabelCapacityType,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
// need to limit the rules to spot or else it will know that on-demand has 0 pods and won't violate the max-skew
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
TopologySpreadConstraints: topology,
NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1alpha5.LabelCapacityType, Operator: v1.NodeSelectorOpIn, Values: []string{v1alpha5.CapacityTypeSpot}},
},
}, 3)...)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(3))
// open the rules back to up so it can see all capacity types
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, test.UnschedulablePod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
TopologySpreadConstraints: topology,
NodeRequirements: []v1.NodeSelectorRequirement{
{Key: v1alpha5.LabelCapacityType, Operator: v1.NodeSelectorOpIn, Values: []string{v1alpha5.CapacityTypeOnDemand, v1alpha5.CapacityTypeSpot}},
},
}))
// it will schedule on the currently empty on-demand even though max-skew is violated as it improves max-skew
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(3, 1))
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov,
test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
TopologySpreadConstraints: topology,
}, 5)...,
)
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(5, 4))
})
})
Context("Pod Affinity/Anti-Affinity", func() {
It("should schedule a pod with empty pod affinity and anti-affinity", func() {
ExpectApplied(ctx, env.Client)
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(test.PodOptions{
PodRequirements: []v1.PodAffinityTerm{},
PodAntiRequirements: []v1.PodAffinityTerm{},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectScheduled(ctx, env.Client, pod)
})
It("should respect pod affinity (hostname)", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelHostname,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
affLabels := map[string]string{"security": "s2"}
affPod1 := test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: affLabels}})
// affPod2 will try to get scheduled with affPod1
affPod2 := test.UnschedulablePod(test.PodOptions{PodRequirements: []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelHostname,
}}})
var pods []*v1.Pod
pods = append(pods, test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
TopologySpreadConstraints: topology,
}, 10)...)
pods = append(pods, affPod1)
pods = append(pods, affPod2)
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
n1 := ExpectScheduled(ctx, env.Client, affPod1)
n2 := ExpectScheduled(ctx, env.Client, affPod2)
// should be scheduled on the same node
Expect(n1.Name).To(Equal(n2.Name))
})
It("should respect pod affinity (arch)", func() {
affLabels := map[string]string{"security": "s2"}
tsc := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelHostname,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: affLabels},
MaxSkew: 1,
}}
affPod1 := test.UnschedulablePod(test.PodOptions{
TopologySpreadConstraints: tsc,
ObjectMeta: metav1.ObjectMeta{Labels: affLabels},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")},
},
NodeSelector: map[string]string{
v1.LabelArchStable: "arm64",
}})
// affPod2 will try to get scheduled with affPod1
affPod2 := test.UnschedulablePod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: affLabels},
TopologySpreadConstraints: tsc,
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")},
},
PodRequirements: []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelArchStable,
}}})
pods := []*v1.Pod{affPod1, affPod2}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
n1 := ExpectScheduled(ctx, env.Client, affPod1)
n2 := ExpectScheduled(ctx, env.Client, affPod2)
// should be scheduled on a node with the same arch
Expect(n1.Labels[v1.LabelArchStable]).To(Equal(n2.Labels[v1.LabelArchStable]))
// but due to TSC, not on the same node
Expect(n1.Name).ToNot(Equal(n2.Name))
})
It("should respect self pod affinity (hostname)", func() {
affLabels := map[string]string{"security": "s2"}
pods := test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: affLabels,
},
PodRequirements: []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelHostname,
}},
}, 3)
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
nodeNames := map[string]struct{}{}
for _, p := range pods {
n := ExpectScheduled(ctx, env.Client, p)
nodeNames[n.Name] = struct{}{}
}
Expect(len(nodeNames)).To(Equal(1))
})
It("should respect self pod affinity for first empty topology domain only (hostname)", func() {
affLabels := map[string]string{"security": "s2"}
createPods := func() []*v1.Pod {
return test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: affLabels,
},
PodRequirements: []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelHostname,
}},
}, 10)
}
ExpectApplied(ctx, env.Client, provisioner)
pods := createPods()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
nodeNames := map[string]struct{}{}
unscheduledCount := 0
scheduledCount := 0
for _, p := range pods {
p = ExpectPodExists(ctx, env.Client, p.Name, p.Namespace)
if p.Spec.NodeName == "" {
unscheduledCount++
} else {
nodeNames[p.Spec.NodeName] = struct{}{}
scheduledCount++
}
}
// the node can only hold 5 pods, so we should get a single node with 5 pods and 5 unschedulable pods from that batch
Expect(len(nodeNames)).To(Equal(1))
Expect(scheduledCount).To(BeNumerically("==", 5))
Expect(unscheduledCount).To(BeNumerically("==", 5))
// and pods in a different batch should not schedule as well even if the node is not ready yet
pods = createPods()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
for _, p := range pods {
ExpectNotScheduled(ctx, env.Client, p)
}
})
It("should respect self pod affinity for first empty topology domain only (hostname/constrained zones)", func() {
affLabels := map[string]string{"security": "s2"}
// put one pod in test-zone-1, this does affect pod affinity even though we have different node selectors.
// The node selector and required node affinity restrictions to topology counting only apply to topology spread.
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, test.UnschedulablePod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: affLabels,
},
NodeSelector: map[string]string{
v1.LabelTopologyZone: "test-zone-1",
},
PodRequirements: []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelHostname,
}},
}))
pods := test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: affLabels,
},
NodeRequirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-2", "test-zone-3"},
},
},
PodRequirements: []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelHostname,
}},
}, 10)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
for _, p := range pods {
// none of this should schedule
ExpectNotScheduled(ctx, env.Client, p)
}
})
It("should respect self pod affinity (zone)", func() {
affLabels := map[string]string{"security": "s2"}
pods := test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: affLabels,
},
PodRequirements: []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelTopologyZone,
}},
}, 3)
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
nodeNames := map[string]struct{}{}
for _, p := range pods {
n := ExpectScheduled(ctx, env.Client, p)
nodeNames[n.Name] = struct{}{}
}
Expect(len(nodeNames)).To(Equal(1))
})
It("should respect self pod affinity (zone w/ constraint)", func() {
affLabels := map[string]string{"security": "s2"}
// the pod needs to provide it's own zonal affinity, but we further limit it to only being on test-zone-3
pods := test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: affLabels,
},
PodRequirements: []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelTopologyZone,
}},
NodeRequirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-3"},
},
},
}, 3)
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
nodeNames := map[string]struct{}{}
for _, p := range pods {
n := ExpectScheduled(ctx, env.Client, p)
nodeNames[n.Name] = struct{}{}
Expect(n.Labels[v1.LabelTopologyZone]).To(Equal("test-zone-3"))
}
Expect(len(nodeNames)).To(Equal(1))
})
It("should allow violation of preferred pod affinity", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelHostname,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
affPod2 := test.UnschedulablePod(test.PodOptions{PodPreferences: []v1.WeightedPodAffinityTerm{{
Weight: 50,
PodAffinityTerm: v1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"security": "s2"},
},
TopologyKey: v1.LabelHostname,
},
}}})
var pods []*v1.Pod
pods = append(pods, test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
TopologySpreadConstraints: topology,
}, 10)...)
pods = append(pods, affPod2)
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
// should be scheduled as the pod it has affinity to doesn't exist, but it's only a preference and not a
// hard constraints
ExpectScheduled(ctx, env.Client, affPod2)
})
It("should allow violation of preferred pod anti-affinity", func() {
affPods := test.UnschedulablePods(test.PodOptions{PodAntiPreferences: []v1.WeightedPodAffinityTerm{
{
Weight: 50,
PodAffinityTerm: v1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchLabels: labels,
},
TopologyKey: v1.LabelTopologyZone,
},
},
}}, 10)
var pods []*v1.Pod
pods = append(pods, test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
TopologySpreadConstraints: []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelTopologyZone,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}},
}, 3)...)
pods = append(pods, affPods...)
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
for _, aff := range affPods {
ExpectScheduled(ctx, env.Client, aff)
}
})
It("should separate nodes using simple pod anti-affinity on hostname", func() {
affLabels := map[string]string{"security": "s2"}
// pod affinity/anti-affinity are bidirectional, so run this a few times to ensure we handle it regardless
// of pod scheduling order
ExpectApplied(ctx, env.Client, provisioner)
for i := 0; i < 10; i++ {
affPod1 := test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: affLabels}})
// affPod2 will avoid affPod1
affPod2 := test.UnschedulablePod(test.PodOptions{PodAntiRequirements: []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelHostname,
}}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, affPod2, affPod1)
n1 := ExpectScheduled(ctx, env.Client, affPod1)
n2 := ExpectScheduled(ctx, env.Client, affPod2)
// should not be scheduled on the same node
Expect(n1.Name).ToNot(Equal(n2.Name))
}
})
It("should not violate pod anti-affinity on zone", func() {
affLabels := map[string]string{"security": "s2"}
zone1Pod := test.UnschedulablePod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: affLabels},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")},
},
NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-1"}})
zone2Pod := test.UnschedulablePod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: affLabels},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")},
},
NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-2"}})
zone3Pod := test.UnschedulablePod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: affLabels},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")},
},
NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-3"}})
affPod := test.UnschedulablePod(test.PodOptions{
PodAntiRequirements: []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelTopologyZone,
}}})
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, zone1Pod, zone2Pod, zone3Pod, affPod)
// the three larger zone specific pods should get scheduled first due to first fit descending onto one
// node per zone.
ExpectScheduled(ctx, env.Client, zone1Pod)
ExpectScheduled(ctx, env.Client, zone2Pod)
ExpectScheduled(ctx, env.Client, zone3Pod)
// the pod with anti-affinity
ExpectNotScheduled(ctx, env.Client, affPod)
})
It("should not violate pod anti-affinity on zone (other schedules first)", func() {
affLabels := map[string]string{"security": "s2"}
pod := test.UnschedulablePod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: affLabels},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")},
}})
affPod := test.UnschedulablePod(test.PodOptions{
PodAntiRequirements: []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelTopologyZone,
}}})
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod, affPod)
// the pod we need to avoid schedules first, but we don't know where.
ExpectScheduled(ctx, env.Client, pod)
// the pod with anti-affinity
ExpectNotScheduled(ctx, env.Client, affPod)
})
It("should not violate pod anti-affinity (arch)", func() {
affLabels := map[string]string{"security": "s2"}
tsc := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelHostname,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: affLabels},
MaxSkew: 1,
}}
affPod1 := test.UnschedulablePod(test.PodOptions{
TopologySpreadConstraints: tsc,
ObjectMeta: metav1.ObjectMeta{Labels: affLabels},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")},
},
NodeSelector: map[string]string{
v1.LabelArchStable: "arm64",
}})
// affPod2 will try to get scheduled on a node with a different archi from affPod1. Due to resource
// requests we try to schedule it last
affPod2 := test.UnschedulablePod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: affLabels},
TopologySpreadConstraints: tsc,
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")},
},
PodAntiRequirements: []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelArchStable,
}}})
pods := []*v1.Pod{affPod1, affPod2}
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
n1 := ExpectScheduled(ctx, env.Client, affPod1)
n2 := ExpectScheduled(ctx, env.Client, affPod2)
// should not be scheduled on nodes with the same arch
Expect(n1.Labels[v1.LabelArchStable]).ToNot(Equal(n2.Labels[v1.LabelArchStable]))
})
It("should violate preferred pod anti-affinity on zone (inverse)", func() {
affLabels := map[string]string{"security": "s2"}
anti := []v1.WeightedPodAffinityTerm{
{
Weight: 10,
PodAffinityTerm: v1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelTopologyZone,
},
},
}
rr := v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")},
}
zone1Pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: rr,
PodAntiPreferences: anti,
NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-1"}})
zone2Pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: rr,
PodAntiPreferences: anti,
NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-2"}})
zone3Pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: rr,
PodAntiPreferences: anti,
NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-3"}})
affPod := test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: affLabels}})
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, zone1Pod, zone2Pod, zone3Pod, affPod)
// three pods with anti-affinity will schedule first due to first fit-descending
ExpectScheduled(ctx, env.Client, zone1Pod)
ExpectScheduled(ctx, env.Client, zone2Pod)
ExpectScheduled(ctx, env.Client, zone3Pod)
// the anti-affinity was a preference, so this can schedule
ExpectScheduled(ctx, env.Client, affPod)
})
It("should not violate pod anti-affinity on zone (inverse)", func() {
affLabels := map[string]string{"security": "s2"}
anti := []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelTopologyZone,
}}
rr := v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")},
}
zone1Pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: rr,
PodAntiRequirements: anti,
NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-1"}})
zone2Pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: rr,
PodAntiRequirements: anti,
NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-2"}})
zone3Pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: rr,
PodAntiRequirements: anti,
NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-3"}})
affPod := test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: affLabels}})
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, zone1Pod, zone2Pod, zone3Pod, affPod)
// three pods with anti-affinity will schedule first due to first fit-descending
ExpectScheduled(ctx, env.Client, zone1Pod)
ExpectScheduled(ctx, env.Client, zone2Pod)
ExpectScheduled(ctx, env.Client, zone3Pod)
// this pod with no anti-affinity rules can't schedule. It has no anti-affinity rules, but every zone has a
// pod with anti-affinity rules that prevent it from scheduling
ExpectNotScheduled(ctx, env.Client, affPod)
})
It("should not violate pod anti-affinity on zone (Schrödinger)", func() {
affLabels := map[string]string{"security": "s2"}
anti := []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelTopologyZone,
}}
zoneAnywherePod := test.UnschedulablePod(test.PodOptions{
PodAntiRequirements: anti,
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")},
},
})
affPod := test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: affLabels}})
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, zoneAnywherePod, affPod)
// the pod with anti-affinity will schedule first due to first fit-descending, but we don't know which zone it landed in
node1 := ExpectScheduled(ctx, env.Client, zoneAnywherePod)
// this pod cannot schedule since the pod with anti-affinity could potentially be in any zone
affPod = ExpectNotScheduled(ctx, env.Client, affPod)
// a second batching will now allow the pod to schedule as the zoneAnywherePod has been committed to a zone
// by the actual node creation
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, affPod)
node2 := ExpectScheduled(ctx, env.Client, affPod)
Expect(node1.Labels[v1.LabelTopologyZone]).ToNot(Equal(node2.Labels[v1.LabelTopologyZone]))
})
It("should not violate pod anti-affinity on zone (inverse w/existing nodes)", func() {
affLabels := map[string]string{"security": "s2"}
anti := []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelTopologyZone,
}}
rr := v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")},
}
zone1Pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: rr,
PodAntiRequirements: anti,
NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-1"}})
zone2Pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: rr,
PodAntiRequirements: anti,
NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-2"}})
zone3Pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: rr,
PodAntiRequirements: anti,
NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-3"}})
affPod := test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: affLabels}})
// provision these so we get three nodes that exist in the cluster with anti-affinity to a pod that we will
// then try to schedule
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, zone1Pod, zone2Pod, zone3Pod)
node1 := ExpectScheduled(ctx, env.Client, zone1Pod)
node2 := ExpectScheduled(ctx, env.Client, zone2Pod)
node3 := ExpectScheduled(ctx, env.Client, zone3Pod)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1))
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node2))
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node3))
ExpectReconcileSucceeded(ctx, podStateController, client.ObjectKeyFromObject(zone1Pod))
ExpectReconcileSucceeded(ctx, podStateController, client.ObjectKeyFromObject(zone2Pod))
ExpectReconcileSucceeded(ctx, podStateController, client.ObjectKeyFromObject(zone3Pod))
ExpectReconcileSucceeded(ctx, podStateController, client.ObjectKeyFromObject(zone1Pod))
ExpectReconcileSucceeded(ctx, podStateController, client.ObjectKeyFromObject(zone2Pod))
ExpectReconcileSucceeded(ctx, podStateController, client.ObjectKeyFromObject(zone3Pod))
// this pod with no anti-affinity rules can't schedule. It has no anti-affinity rules, but every zone has an
// existing pod (not from this batch) with anti-affinity rules that prevent it from scheduling
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, affPod)
ExpectNotScheduled(ctx, env.Client, affPod)
})
It("should violate preferred pod anti-affinity on zone (inverse w/existing nodes)", func() {
affLabels := map[string]string{"security": "s2"}
anti := []v1.WeightedPodAffinityTerm{
{
Weight: 10,
PodAffinityTerm: v1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelTopologyZone,
},
},
}
rr := v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("2")},
}
zone1Pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: rr,
PodAntiPreferences: anti,
NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-1"}})
zone2Pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: rr,
PodAntiPreferences: anti,
NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-2"}})
zone3Pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: rr,
PodAntiPreferences: anti,
NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-3"}})
affPod := test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: affLabels}})
// provision these so we get three nodes that exist in the cluster with anti-affinity to a pod that we will
// then try to schedule
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, zone1Pod, zone2Pod, zone3Pod)
node1 := ExpectScheduled(ctx, env.Client, zone1Pod)
node2 := ExpectScheduled(ctx, env.Client, zone2Pod)
node3 := ExpectScheduled(ctx, env.Client, zone3Pod)
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node1))
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node2))
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node3))
ExpectReconcileSucceeded(ctx, podStateController, client.ObjectKeyFromObject(zone1Pod))
ExpectReconcileSucceeded(ctx, podStateController, client.ObjectKeyFromObject(zone2Pod))
ExpectReconcileSucceeded(ctx, podStateController, client.ObjectKeyFromObject(zone3Pod))
// this pod with no anti-affinity rules can schedule, though it couldn't if the anti-affinity were required
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, affPod)
ExpectScheduled(ctx, env.Client, affPod)
})
It("should allow violation of a pod affinity preference with a conflicting required constraint", func() {
affLabels := map[string]string{"security": "s2"}
constraint := v1.TopologySpreadConstraint{
MaxSkew: 1,
TopologyKey: v1.LabelHostname,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{
MatchLabels: labels,
},
}
affPod1 := test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: affLabels}})
affPods := test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
// limit these pods to one per host
TopologySpreadConstraints: []v1.TopologySpreadConstraint{constraint},
// with a preference to the other pod
PodPreferences: []v1.WeightedPodAffinityTerm{{
Weight: 50,
PodAffinityTerm: v1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelHostname,
},
}}}, 3)
ExpectApplied(ctx, env.Client, provisioner)
pods := append(affPods, affPod1)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
// all pods should be scheduled since the affinity term is just a preference
for _, pod := range pods {
ExpectScheduled(ctx, env.Client, pod)
}
// and we'll get three nodes due to the topology spread
ExpectSkew(ctx, env.Client, "", &constraint).To(ConsistOf(1, 1, 1))
})
It("should support pod anti-affinity with a zone topology", func() {
affLabels := map[string]string{"security": "s2"}
// affPods will avoid being scheduled in the same zone
createPods := func() []*v1.Pod {
return test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: affLabels},
PodAntiRequirements: []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelTopologyZone,
}}}, 3)
}
top := &v1.TopologySpreadConstraint{TopologyKey: v1.LabelTopologyZone}
// One of the downsides of late committal is that absent other constraints, it takes multiple batches of
// scheduling for zonal anti-affinities to work themselves out. The first schedule, we know that the pod
// will land in test-zone-1, test-zone-2, or test-zone-3, but don't know which it collapses to until the
// node is actually created.
// one pod pod will schedule
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, createPods()...)
ExpectSkew(ctx, env.Client, "default", top).To(ConsistOf(1))
// delete all of the unscheduled ones as provisioning will only bind pods passed into the provisioning call
// the scheduler looks at all pods though, so it may assume a pod from this batch schedules and no others do
ExpectDeleteAllUnscheduledPods(ctx, env.Client)
// second pod in a second zone
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, createPods()...)
ExpectSkew(ctx, env.Client, "default", top).To(ConsistOf(1, 1))
ExpectDeleteAllUnscheduledPods(ctx, env.Client)
// third pod in the last zone
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, createPods()...)
ExpectSkew(ctx, env.Client, "default", top).To(ConsistOf(1, 1, 1))
ExpectDeleteAllUnscheduledPods(ctx, env.Client)
// and nothing else can schedule
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, createPods()...)
ExpectSkew(ctx, env.Client, "default", top).To(ConsistOf(1, 1, 1))
ExpectDeleteAllUnscheduledPods(ctx, env.Client)
})
It("should not schedule pods with affinity to a non-existent pod", func() {
affLabels := map[string]string{"security": "s2"}
affPods := test.UnschedulablePods(test.PodOptions{
PodRequirements: []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelTopologyZone,
}}}, 10)
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, affPods...)
// the pod we have affinity to is not in the cluster, so all of these pods are unschedulable
for _, p := range affPods {
ExpectNotScheduled(ctx, env.Client, p)
}
})
It("should support pod affinity with zone topology (unconstrained target)", func() {
affLabels := map[string]string{"security": "s2"}
// the pod that the others have an affinity to
targetPod := test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: affLabels}})
// affPods all want to schedule in the same zone as targetPod, but can't as it's zone is undetermined
affPods := test.UnschedulablePods(test.PodOptions{
PodRequirements: []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelTopologyZone,
}}}, 10)
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, append(affPods, targetPod)...)
top := &v1.TopologySpreadConstraint{TopologyKey: v1.LabelTopologyZone}
// these pods can't schedule as the pod they have affinity to isn't limited to any particular zone
for i := range affPods {
ExpectNotScheduled(ctx, env.Client, affPods[i])
affPods[i] = ExpectPodExists(ctx, env.Client, affPods[i].Name, affPods[i].Namespace)
}
ExpectSkew(ctx, env.Client, "default", top).To(ConsistOf(1))
// now that targetPod has been scheduled to a node, it's zone is committed and the pods with affinity to it
// should schedule in the same zone
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, affPods...)
for _, pod := range affPods {
ExpectScheduled(ctx, env.Client, pod)
}
ExpectSkew(ctx, env.Client, "default", top).To(ConsistOf(11))
})
It("should support pod affinity with zone topology (constrained target)", func() {
affLabels := map[string]string{"security": "s2"}
// the pod that the others have an affinity to
affPod1 := test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: affLabels},
NodeRequirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-1"},
},
}})
// affPods will all be scheduled in the same zone as affPod1
affPods := test.UnschedulablePods(test.PodOptions{
PodRequirements: []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelTopologyZone,
}}}, 10)
affPods = append(affPods, affPod1)
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, affPods...)
top := &v1.TopologySpreadConstraint{TopologyKey: v1.LabelTopologyZone}
ExpectSkew(ctx, env.Client, "default", top).To(ConsistOf(11))
})
It("should handle multiple dependent affinities", func() {
dbLabels := map[string]string{"type": "db", "spread": "spread"}
webLabels := map[string]string{"type": "web", "spread": "spread"}
cacheLabels := map[string]string{"type": "cache", "spread": "spread"}
uiLabels := map[string]string{"type": "ui", "spread": "spread"}
for i := 0; i < 50; i++ {
ExpectApplied(ctx, env.Client, provisioner.DeepCopy())
// we have to schedule DB -> Web -> Cache -> UI in that order or else there are pod affinity violations
pods := []*v1.Pod{
test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: dbLabels}}),
test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: webLabels},
PodRequirements: []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{MatchLabels: dbLabels},
TopologyKey: v1.LabelHostname},
}}),
test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: cacheLabels},
PodRequirements: []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{MatchLabels: webLabels},
TopologyKey: v1.LabelHostname},
}}),
test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: uiLabels},
PodRequirements: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{MatchLabels: cacheLabels},
TopologyKey: v1.LabelHostname},
}}),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
for i := range pods {
ExpectScheduled(ctx, env.Client, pods[i])
}
ExpectCleanedUp(ctx, env.Client)
cluster.Reset()
}
})
It("should fail to schedule pods with unsatisfiable dependencies", func() {
dbLabels := map[string]string{"type": "db", "spread": "spread"}
webLabels := map[string]string{"type": "web", "spread": "spread"}
ExpectApplied(ctx, env.Client, provisioner)
// this pods wants to schedule with a non-existent pod, this test just ensures that the scheduling loop
// doesn't infinite loop
pod := test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: dbLabels},
PodRequirements: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{MatchLabels: webLabels},
TopologyKey: v1.LabelHostname,
},
}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)
})
It("should filter pod affinity topologies by namespace, no matching pods", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelHostname,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "other-ns-no-match"}})
affLabels := map[string]string{"security": "s2"}
affPod1 := test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: affLabels, Namespace: "other-ns-no-match"}})
// affPod2 will try to get scheduled with affPod1
affPod2 := test.UnschedulablePod(test.PodOptions{PodRequirements: []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
TopologyKey: v1.LabelHostname,
}}})
var pods []*v1.Pod
// creates 10 nodes due to topo spread
pods = append(pods, test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
TopologySpreadConstraints: topology,
}, 10)...)
pods = append(pods, affPod1)
pods = append(pods, affPod2)
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
// the target pod gets scheduled
ExpectScheduled(ctx, env.Client, affPod1)
// but the one with affinity does not since the target pod is not in the same namespace and doesn't
// match the namespace list or namespace selector
ExpectNotScheduled(ctx, env.Client, affPod2)
})
It("should filter pod affinity topologies by namespace, matching pods namespace list", func() {
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelHostname,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "other-ns-list"}})
affLabels := map[string]string{"security": "s2"}
affPod1 := test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: affLabels, Namespace: "other-ns-list"}})
// affPod2 will try to get scheduled with affPod1
affPod2 := test.UnschedulablePod(test.PodOptions{PodRequirements: []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
Namespaces: []string{"other-ns-list"},
TopologyKey: v1.LabelHostname,
}}})
var pods []*v1.Pod
// create 10 nodes
pods = append(pods, test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
TopologySpreadConstraints: topology,
}, 10)...)
// put our target pod on one of them
pods = append(pods, affPod1)
// and our pod with affinity should schedule on the same node
pods = append(pods, affPod2)
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
n1 := ExpectScheduled(ctx, env.Client, affPod1)
n2 := ExpectScheduled(ctx, env.Client, affPod2)
// should be scheduled on the same node
Expect(n1.Name).To(Equal(n2.Name))
})
It("should filter pod affinity topologies by namespace, empty namespace selector", func() {
if env.Version.Minor() < 21 {
Skip("namespace selector is only supported on K8s >= 1.21.x")
}
topology := []v1.TopologySpreadConstraint{{
TopologyKey: v1.LabelHostname,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "empty-ns-selector", Labels: map[string]string{"foo": "bar"}}})
affLabels := map[string]string{"security": "s2"}
affPod1 := test.UnschedulablePod(test.PodOptions{ObjectMeta: metav1.ObjectMeta{Labels: affLabels, Namespace: "empty-ns-selector"}})
// affPod2 will try to get scheduled with affPod1
affPod2 := test.UnschedulablePod(test.PodOptions{PodRequirements: []v1.PodAffinityTerm{{
LabelSelector: &metav1.LabelSelector{
MatchLabels: affLabels,
},
// select all pods in all namespaces since the selector is empty
NamespaceSelector: &metav1.LabelSelector{MatchLabels: map[string]string{}},
TopologyKey: v1.LabelHostname,
}}})
var pods []*v1.Pod
// create 10 nodes
pods = append(pods, test.UnschedulablePods(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
TopologySpreadConstraints: topology,
}, 10)...)
// put our target pod on one of them
pods = append(pods, affPod1)
// and our pod with affinity should schedule on the same node
pods = append(pods, affPod2)
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
n1 := ExpectScheduled(ctx, env.Client, affPod1)
n2 := ExpectScheduled(ctx, env.Client, affPod2)
// should be scheduled on the same node due to the empty namespace selector
Expect(n1.Name).To(Equal(n2.Name))
})
It("should count topology across multiple provisioners", func() {
ExpectApplied(ctx, env.Client,
test.Provisioner(test.ProvisionerOptions{
Requirements: []v1.NodeSelectorRequirement{{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1"}}},
}),
test.Provisioner(test.ProvisionerOptions{
Requirements: []v1.NodeSelectorRequirement{{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-2", "test-zone-3"}}},
}),
)
labels := map[string]string{"foo": "bar"}
topology := v1.TopologySpreadConstraint{
TopologyKey: v1.LabelTopologyZone,
MaxSkew: 1,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
WhenUnsatisfiable: v1.DoNotSchedule,
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, test.Pods(10, test.UnscheduleablePodOptions(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
TopologySpreadConstraints: []v1.TopologySpreadConstraint{topology},
}))...)
ExpectSkew(ctx, env.Client, "default", &topology).To(ConsistOf(3, 3, 4))
})
})
})
func ExpectDeleteAllUnscheduledPods(ctx2 context.Context, c client.Client) {
var pods v1.PodList
Expect(c.List(ctx2, &pods)).To(Succeed())
for i := range pods.Items {
if pods.Items[i].Spec.NodeName == "" {
ExpectDeleted(ctx2, c, &pods.Items[i])
}
}
}
var _ = Describe("Taints", func() {
It("should taint nodes with provisioner taints", func() {
provisioner.Spec.Taints = []v1.Taint{{Key: "test", Value: "bar", Effect: v1.TaintEffectNoSchedule}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(
test.PodOptions{Tolerations: []v1.Toleration{{Effect: v1.TaintEffectNoSchedule, Operator: v1.TolerationOpExists}}},
)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Spec.Taints).To(ContainElement(provisioner.Spec.Taints[0]))
})
It("should schedule pods that tolerate provisioner constraints", func() {
provisioner.Spec.Taints = []v1.Taint{{Key: "test-key", Value: "test-value", Effect: v1.TaintEffectNoSchedule}}
ExpectApplied(ctx, env.Client, provisioner)
pods := []*v1.Pod{
// Tolerates with OpExists
test.UnschedulablePod(test.PodOptions{Tolerations: []v1.Toleration{{Key: "test-key", Operator: v1.TolerationOpExists, Effect: v1.TaintEffectNoSchedule}}}),
// Tolerates with OpEqual
test.UnschedulablePod(test.PodOptions{Tolerations: []v1.Toleration{{Key: "test-key", Value: "test-value", Operator: v1.TolerationOpEqual, Effect: v1.TaintEffectNoSchedule}}}),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...)
for _, pod := range pods {
ExpectScheduled(ctx, env.Client, pod)
}
ExpectApplied(ctx, env.Client, provisioner)
otherPods := []*v1.Pod{
// Missing toleration
test.UnschedulablePod(),
// key mismatch with OpExists
test.UnschedulablePod(test.PodOptions{Tolerations: []v1.Toleration{{Key: "invalid", Operator: v1.TolerationOpExists}}}),
// value mismatch
test.UnschedulablePod(test.PodOptions{Tolerations: []v1.Toleration{{Key: "test-key", Operator: v1.TolerationOpEqual, Effect: v1.TaintEffectNoSchedule}}}),
}
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, otherPods...)
for _, pod := range otherPods {
ExpectNotScheduled(ctx, env.Client, pod)
}
})
It("should provision nodes with taints and schedule pods if the taint is only a startup taint", func() {
provisioner.Spec.StartupTaints = []v1.Taint{{Key: "ignore-me", Value: "nothing-to-see-here", Effect: v1.TaintEffectNoSchedule}}
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod()
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectScheduled(ctx, env.Client, pod)
})
It("should not generate taints for OpExists", func() {
ExpectApplied(ctx, env.Client, provisioner)
pod := test.UnschedulablePod(test.PodOptions{Tolerations: []v1.Toleration{{Key: "test-key", Operator: v1.TolerationOpExists, Effect: v1.TaintEffectNoExecute}}})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
node := ExpectScheduled(ctx, env.Client, pod)
Expect(node.Spec.Taints).To(HaveLen(1)) // Expect no taints generated beyond the default
})
})
| 2,337 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/types"
"knative.dev/pkg/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func NewVolumeTopology(kubeClient client.Client) *VolumeTopology {
return &VolumeTopology{kubeClient: kubeClient}
}
type VolumeTopology struct {
kubeClient client.Client
}
func (v *VolumeTopology) Inject(ctx context.Context, pod *v1.Pod) error {
var requirements []v1.NodeSelectorRequirement
for _, volume := range pod.Spec.Volumes {
req, err := v.getRequirements(ctx, pod, volume)
if err != nil {
return err
}
requirements = append(requirements, req...)
}
if len(requirements) == 0 {
return nil
}
if pod.Spec.Affinity == nil {
pod.Spec.Affinity = &v1.Affinity{}
}
if pod.Spec.Affinity.NodeAffinity == nil {
pod.Spec.Affinity.NodeAffinity = &v1.NodeAffinity{}
}
if pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{}
}
if len(pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) == 0 {
pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = []v1.NodeSelectorTerm{{}}
}
// We add our volume topology zonal requirement to every node selector term. This causes it to be AND'd with every existing
// requirement so that relaxation won't remove our volume requirement.
for i := 0; i < len(pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms); i++ {
pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[i].MatchExpressions = append(
pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[i].MatchExpressions, requirements...)
}
return nil
}
func (v *VolumeTopology) getRequirements(ctx context.Context, pod *v1.Pod, volume v1.Volume) ([]v1.NodeSelectorRequirement, error) {
// Get PVC
if volume.PersistentVolumeClaim == nil {
return nil, nil
}
pvc, err := v.getPersistentVolumeClaim(ctx, pod, volume)
if err != nil {
return nil, err
}
// Persistent Volume Requirements
if pvc.Spec.VolumeName != "" {
requirements, err := v.getPersistentVolumeRequirements(ctx, pod, pvc)
if err != nil {
return nil, fmt.Errorf("getting existing requirements, %w", err)
}
return requirements, nil
}
// Storage Class Requirements
if ptr.StringValue(pvc.Spec.StorageClassName) != "" {
requirements, err := v.getStorageClassRequirements(ctx, pvc)
if err != nil {
return nil, err
}
return requirements, nil
}
return nil, nil
}
func (v *VolumeTopology) getStorageClassRequirements(ctx context.Context, pvc *v1.PersistentVolumeClaim) ([]v1.NodeSelectorRequirement, error) {
storageClass := &storagev1.StorageClass{}
if err := v.kubeClient.Get(ctx, types.NamespacedName{Name: ptr.StringValue(pvc.Spec.StorageClassName)}, storageClass); err != nil {
return nil, fmt.Errorf("getting storage class %q, %w", ptr.StringValue(pvc.Spec.StorageClassName), err)
}
var requirements []v1.NodeSelectorRequirement
if len(storageClass.AllowedTopologies) > 0 {
// Terms are ORed, only use the first term
for _, requirement := range storageClass.AllowedTopologies[0].MatchLabelExpressions {
requirements = append(requirements, v1.NodeSelectorRequirement{Key: requirement.Key, Operator: v1.NodeSelectorOpIn, Values: requirement.Values})
}
}
return requirements, nil
}
func (v *VolumeTopology) getPersistentVolumeRequirements(ctx context.Context, pod *v1.Pod, pvc *v1.PersistentVolumeClaim) ([]v1.NodeSelectorRequirement, error) {
pv := &v1.PersistentVolume{}
if err := v.kubeClient.Get(ctx, types.NamespacedName{Name: pvc.Spec.VolumeName, Namespace: pod.Namespace}, pv); err != nil {
return nil, fmt.Errorf("getting persistent volume %q, %w", pvc.Spec.VolumeName, err)
}
if pv.Spec.NodeAffinity == nil {
return nil, nil
}
if pv.Spec.NodeAffinity.Required == nil {
return nil, nil
}
var requirements []v1.NodeSelectorRequirement
if len(pv.Spec.NodeAffinity.Required.NodeSelectorTerms) > 0 {
// Terms are ORed, only use the first term
requirements = append(requirements, pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions...)
}
return requirements, nil
}
func (v *VolumeTopology) getPersistentVolumeClaim(ctx context.Context, pod *v1.Pod, volume v1.Volume) (*v1.PersistentVolumeClaim, error) {
if volume.PersistentVolumeClaim == nil {
return nil, nil
}
pvc := &v1.PersistentVolumeClaim{}
if err := v.kubeClient.Get(ctx, types.NamespacedName{Name: volume.PersistentVolumeClaim.ClaimName, Namespace: pod.Namespace}, pvc); err != nil {
return nil, fmt.Errorf("getting persistent volume claim %q, %w", volume.PersistentVolumeClaim.ClaimName, err)
}
return pvc, nil
}
// ValidatePersistentVolumeClaims returns an error if the pod doesn't appear to be valid with respect to
// PVCs (e.g. the PVC is not found or references an unknown storage class).
func (v *VolumeTopology) ValidatePersistentVolumeClaims(ctx context.Context, pod *v1.Pod) error {
for _, volume := range pod.Spec.Volumes {
var storageClassName *string
var volumeName string
if volume.PersistentVolumeClaim != nil {
// validate the PVC if it exists
pvc, err := v.getPersistentVolumeClaim(ctx, pod, volume)
if err != nil {
return err
}
// may not have a PVC
if pvc == nil {
continue
}
storageClassName = pvc.Spec.StorageClassName
volumeName = pvc.Spec.VolumeName
} else if volume.Ephemeral != nil {
storageClassName = volume.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName
}
if err := v.validateStorageClass(ctx, storageClassName); err != nil {
return err
}
if err := v.validateVolume(ctx, volumeName); err != nil {
return err
}
}
return nil
}
func (v *VolumeTopology) validateVolume(ctx context.Context, volumeName string) error {
// we have a volume name, so ensure that it exists
if volumeName != "" {
pv := &v1.PersistentVolume{}
if err := v.kubeClient.Get(ctx, types.NamespacedName{Name: volumeName}, pv); err != nil {
return err
}
}
return nil
}
func (v *VolumeTopology) validateStorageClass(ctx context.Context, storageClassName *string) error {
// we have a storage class name, so ensure that it exists
if ptr.StringValue(storageClassName) != "" {
storageClass := &storagev1.StorageClass{}
if err := v.kubeClient.Get(ctx, types.NamespacedName{Name: ptr.StringValue(storageClassName)}, storageClass); err != nil {
return err
}
}
return nil
}
| 197 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package events
import (
"fmt"
"strings"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/util/flowcontrol"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/events"
)
// PodNominationRateLimiter is a pointer so it rate-limits across events
var PodNominationRateLimiter = flowcontrol.NewTokenBucketRateLimiter(5, 10)
func NominatePod(pod *v1.Pod, node *v1.Node, machine *v1alpha5.Machine) events.Event {
var info []string
if machine != nil {
info = append(info, fmt.Sprintf("machine/%s", machine.Name))
}
if node != nil {
info = append(info, fmt.Sprintf("node/%s", node.Name))
}
return events.Event{
InvolvedObject: pod,
Type: v1.EventTypeNormal,
Reason: "Nominated",
Message: fmt.Sprintf("Pod should schedule on: %s", strings.Join(info, ", ")),
DedupeValues: []string{string(pod.UID)},
RateLimiter: PodNominationRateLimiter,
}
}
func PodFailedToSchedule(pod *v1.Pod, err error) events.Event {
return events.Event{
InvolvedObject: pod,
Type: v1.EventTypeWarning,
Reason: "FailedScheduling",
Message: fmt.Sprintf("Failed to schedule pod, %s", err),
DedupeValues: []string{string(pod.UID)},
DedupeTimeout: 5 * time.Minute,
}
}
| 60 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"context"
"fmt"
"sort"
"sync"
"sync/atomic"
"time"
"github.com/samber/lo"
"go.uber.org/multierr"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/clock"
"knative.dev/pkg/logging"
"knative.dev/pkg/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/scheduling"
podutils "github.com/aws/karpenter-core/pkg/utils/pod"
"github.com/aws/karpenter-core/pkg/utils/sets"
)
// Cluster maintains cluster state that is often needed but expensive to compute.
type Cluster struct {
kubeClient client.Client
cloudProvider cloudprovider.CloudProvider
clock clock.Clock
mu sync.RWMutex
nodes map[string]*StateNode // provider id -> cached node
bindings map[types.NamespacedName]string // pod namespaced named -> node name
nameToProviderID map[string]string // node name -> provider id
daemonSetPods sync.Map // daemonSet -> existing pod
antiAffinityPods sync.Map // pod namespaced name -> *v1.Pod of pods that have required anti affinities
// consolidatedAt is a timestamp marking the last time that we calculated consolidation off of the current cluster state
// This value is overridden with the 0 timestamp if we haven't calculated consolidation off of the current cluster state
consolidatedAt atomic.Int64
}
func NewCluster(clk clock.Clock, client client.Client, cp cloudprovider.CloudProvider) *Cluster {
return &Cluster{
clock: clk,
kubeClient: client,
cloudProvider: cp,
nodes: map[string]*StateNode{},
bindings: map[types.NamespacedName]string{},
daemonSetPods: sync.Map{},
nameToProviderID: map[string]string{},
}
}
// Synced validates that the Machines and the Nodes that are stored in the apiserver
// have the same representation in the cluster state. This is to ensure that our view
// of the cluster is as close to correct as it can be when we begin to perform operations
// utilizing the cluster state as our source of truth
func (c *Cluster) Synced(ctx context.Context) bool {
machineList := &v1alpha5.MachineList{}
if err := c.kubeClient.List(ctx, machineList); err != nil {
logging.FromContext(ctx).Errorf("checking cluster state sync, %v", err)
return false
}
nodeList := &v1.NodeList{}
if err := c.kubeClient.List(ctx, nodeList); err != nil {
logging.FromContext(ctx).Errorf("checking cluster state sync, %v", err)
return false
}
c.mu.RLock()
stateNames := sets.New(lo.Keys(c.nameToProviderID)...)
c.mu.RUnlock()
names := sets.New[string]()
for _, machine := range machineList.Items {
// If the machine hasn't resolved its provider id, then it hasn't resolved its status
if machine.Status.ProviderID == "" {
return false
}
names.Insert(machine.Name)
}
for _, node := range nodeList.Items {
names.Insert(node.Name)
}
// The provider ids tracked in-memory should at least have all the data that is in the api-server
// This doesn't ensure that the two states are exactly aligned (we could still not be tracking a node
// that exists in the cluster state but not in the apiserver) but it ensures that we have a state
// representation for every node/machine that exists on the apiserver
return stateNames.IsSuperset(names)
}
// ForPodsWithAntiAffinity calls the supplied function once for each pod with required anti affinity terms that is
// currently bound to a node. The pod returned may not be up-to-date with respect to status, however since the
// anti-affinity terms can't be modified, they will be correct.
func (c *Cluster) ForPodsWithAntiAffinity(fn func(p *v1.Pod, n *v1.Node) bool) {
c.antiAffinityPods.Range(func(key, value interface{}) bool {
pod := value.(*v1.Pod)
c.mu.RLock()
defer c.mu.RUnlock()
nodeName, ok := c.bindings[client.ObjectKeyFromObject(pod)]
if !ok {
return true
}
node, ok := c.nodes[c.nameToProviderID[nodeName]]
if !ok || node.Node == nil {
// if we receive the node deletion event before the pod deletion event, this can happen
return true
}
return fn(pod, node.Node)
})
}
// ForEachNode calls the supplied function once per node object that is being tracked. It is not safe to store the
// state.StateNode object, it should be only accessed from within the function provided to this method.
func (c *Cluster) ForEachNode(f func(n *StateNode) bool) {
c.mu.RLock()
defer c.mu.RUnlock()
for _, node := range c.nodes {
if !f(node) {
return
}
}
}
// Nodes creates a DeepCopy of all state nodes.
// NOTE: This is very inefficient so this should only be used when DeepCopying is absolutely necessary
func (c *Cluster) Nodes() StateNodes {
c.mu.RLock()
defer c.mu.RUnlock()
return lo.Map(lo.Values(c.nodes), func(n *StateNode, _ int) *StateNode {
return n.DeepCopy()
})
}
// IsNodeNominated returns true if the given node was expected to have a pod bound to it during a recent scheduling
// batch
func (c *Cluster) IsNodeNominated(name string) bool {
c.mu.RLock()
defer c.mu.RUnlock()
if id, ok := c.nameToProviderID[name]; ok {
return c.nodes[id].Nominated()
}
return false
}
// NominateNodeForPod records that a node was the target of a pending pod during a scheduling batch
func (c *Cluster) NominateNodeForPod(ctx context.Context, name string) {
c.mu.Lock()
defer c.mu.Unlock()
if id, ok := c.nameToProviderID[name]; ok {
c.nodes[id].Nominate(ctx) // extends nomination window if already nominated
}
}
// UnmarkForDeletion removes the marking on the node as a node the controller intends to delete
func (c *Cluster) UnmarkForDeletion(names ...string) {
c.mu.Lock()
defer c.mu.Unlock()
for _, name := range names {
if id, ok := c.nameToProviderID[name]; ok {
c.nodes[id].markedForDeletion = false
}
}
}
// MarkForDeletion marks the node as pending deletion in the internal cluster state
func (c *Cluster) MarkForDeletion(names ...string) {
c.mu.Lock()
defer c.mu.Unlock()
for _, name := range names {
if id, ok := c.nameToProviderID[name]; ok {
c.nodes[id].markedForDeletion = true
}
}
}
func (c *Cluster) UpdateMachine(machine *v1alpha5.Machine) {
c.mu.Lock()
defer c.mu.Unlock()
if machine.Status.ProviderID == "" {
return // We can't reconcile machines that don't yet have provider ids
}
n := c.newStateFromMachine(machine, c.nodes[machine.Status.ProviderID])
c.nodes[machine.Status.ProviderID] = n
c.nameToProviderID[machine.Name] = machine.Status.ProviderID
}
func (c *Cluster) DeleteMachine(name string) {
c.mu.Lock()
defer c.mu.Unlock()
c.cleanupMachine(name)
}
func (c *Cluster) UpdateNode(ctx context.Context, node *v1.Node) error {
c.mu.Lock()
defer c.mu.Unlock()
if node.Spec.ProviderID == "" {
node.Spec.ProviderID = node.Name
}
n, err := c.newStateFromNode(ctx, node, c.nodes[node.Spec.ProviderID])
if err != nil {
return err
}
c.nodes[node.Spec.ProviderID] = n
c.nameToProviderID[node.Name] = node.Spec.ProviderID
return nil
}
func (c *Cluster) DeleteNode(name string) {
c.mu.Lock()
defer c.mu.Unlock()
c.cleanupNode(name)
}
func (c *Cluster) UpdatePod(ctx context.Context, pod *v1.Pod) error {
c.mu.Lock()
defer c.mu.Unlock()
var err error
if podutils.IsTerminal(pod) {
c.updateNodeUsageFromPodCompletion(client.ObjectKeyFromObject(pod))
} else {
err = c.updateNodeUsageFromPod(ctx, pod)
}
c.updatePodAntiAffinities(pod)
return err
}
func (c *Cluster) DeletePod(podKey types.NamespacedName) {
c.mu.Lock()
defer c.mu.Unlock()
c.antiAffinityPods.Delete(podKey)
c.updateNodeUsageFromPodCompletion(podKey)
c.SetConsolidated(false)
}
// SetConsolidated updates based on the following conditions:
// 1. consolidated is TRUE: Updates the state to record that we have viewed this cluster state at this time
// 2. consolidated is FALSE: Resets the value to mark that we haven't currently viewed this state or the state is in flux
func (c *Cluster) SetConsolidated(consolidated bool) {
if consolidated {
c.consolidatedAt.Store(c.clock.Now().UnixMilli())
} else {
c.consolidatedAt.Store(0)
}
}
// Consolidated returns whether the current cluster state has been observed for consolidation. If
// consolidation can't occur and the state hasn't changed, there is no point in re-attempting consolidation. This
// allows reducing overall CPU utilization by pausing consolidation when the cluster is in a static state.
func (c *Cluster) Consolidated() bool {
// Either 5 minutes has elapsed since the last fully observed consolidation state OR
// the value is reset to 0 which means that we haven't observed this state.
// In either case, the time since the consolidatedAt value should be greater than the consolidation timeout (5m)
// This ensures that at least once every 5 minutes we consider consolidating our cluster in case something else has
// changed (e.g. instance type availability) that we can't detect which would allow consolidation to occur.
return c.clock.Since(time.UnixMilli(c.consolidatedAt.Load())) < time.Minute*5
}
// Reset the cluster state for unit testing
func (c *Cluster) Reset() {
c.mu.Lock()
defer c.mu.Unlock()
c.nodes = map[string]*StateNode{}
c.nameToProviderID = map[string]string{}
c.bindings = map[types.NamespacedName]string{}
c.antiAffinityPods = sync.Map{}
c.daemonSetPods = sync.Map{}
}
func (c *Cluster) GetDaemonSetPod(daemonset *appsv1.DaemonSet) *v1.Pod {
if pod, ok := c.daemonSetPods.Load(client.ObjectKeyFromObject(daemonset)); ok {
return pod.(*v1.Pod).DeepCopy()
}
return nil
}
func (c *Cluster) UpdateDaemonSet(ctx context.Context, daemonset *appsv1.DaemonSet) error {
pods := &v1.PodList{}
err := c.kubeClient.List(ctx, pods, client.InNamespace(daemonset.Namespace))
if err != nil {
return err
}
sort.Slice(pods.Items, func(i, j int) bool {
return pods.Items[i].CreationTimestamp.Unix() > pods.Items[j].CreationTimestamp.Unix()
})
for i := range pods.Items {
if metav1.IsControlledBy(&pods.Items[i], daemonset) {
c.daemonSetPods.Store(client.ObjectKeyFromObject(daemonset), &pods.Items[i])
break
}
}
return nil
}
func (c *Cluster) DeleteDaemonSet(key types.NamespacedName) {
c.daemonSetPods.Delete(key)
}
// WARNING
// Everything under this section of code assumes that you have already held a lock when you are calling into these functions
// and explicitly modifying the cluster state. If you do not hold the cluster state lock before calling any of these helpers
// you will hit race conditions and data corruption
func (c *Cluster) newStateFromMachine(machine *v1alpha5.Machine, oldNode *StateNode) *StateNode {
if oldNode == nil {
oldNode = NewNode()
}
n := &StateNode{
Node: oldNode.Node,
Machine: machine,
hostPortUsage: oldNode.hostPortUsage,
volumeUsage: oldNode.volumeUsage,
daemonSetRequests: oldNode.daemonSetRequests,
daemonSetLimits: oldNode.daemonSetLimits,
podRequests: oldNode.podRequests,
podLimits: oldNode.podLimits,
markedForDeletion: oldNode.markedForDeletion,
nominatedUntil: oldNode.nominatedUntil,
}
// Cleanup the old machine with its old providerID if its providerID changes
// This can happen since nodes don't get created with providerIDs. Rather, CCM picks up the
// created node and injects the providerID into the spec.providerID
if id, ok := c.nameToProviderID[machine.Name]; ok && id != machine.Status.ProviderID {
c.cleanupMachine(machine.Name)
}
c.triggerConsolidationOnChange(oldNode, n)
return n
}
func (c *Cluster) cleanupMachine(name string) {
if id := c.nameToProviderID[name]; id != "" {
if c.nodes[id].Node == nil {
delete(c.nodes, id)
} else {
c.nodes[id].Machine = nil
}
delete(c.nameToProviderID, name)
c.SetConsolidated(false)
}
}
func (c *Cluster) newStateFromNode(ctx context.Context, node *v1.Node, oldNode *StateNode) (*StateNode, error) {
if oldNode == nil {
oldNode = NewNode()
}
n := &StateNode{
Node: node,
Machine: oldNode.Machine,
inflightAllocatable: oldNode.inflightAllocatable,
inflightCapacity: oldNode.inflightCapacity,
startupTaints: oldNode.startupTaints,
daemonSetRequests: map[types.NamespacedName]v1.ResourceList{},
daemonSetLimits: map[types.NamespacedName]v1.ResourceList{},
podRequests: map[types.NamespacedName]v1.ResourceList{},
podLimits: map[types.NamespacedName]v1.ResourceList{},
hostPortUsage: scheduling.NewHostPortUsage(),
volumeUsage: scheduling.NewVolumeUsage(),
volumeLimits: scheduling.VolumeCount{},
markedForDeletion: oldNode.markedForDeletion,
nominatedUntil: oldNode.nominatedUntil,
}
if err := multierr.Combine(
c.populateStartupTaints(ctx, n),
c.populateInflight(ctx, n),
c.populateResourceRequests(ctx, n),
c.populateVolumeLimits(ctx, n),
); err != nil {
return nil, err
}
// Cleanup the old node with its old providerID if its providerID changes
// This can happen since nodes don't get created with providerIDs. Rather, CCM picks up the
// created node and injects the providerID into the spec.providerID
if id, ok := c.nameToProviderID[node.Name]; ok && id != node.Spec.ProviderID {
c.cleanupNode(node.Name)
}
c.triggerConsolidationOnChange(oldNode, n)
return n, nil
}
func (c *Cluster) cleanupNode(name string) {
if id := c.nameToProviderID[name]; id != "" {
if c.nodes[id].Machine == nil {
delete(c.nodes, id)
} else {
c.nodes[id].Node = nil
}
delete(c.nameToProviderID, name)
c.SetConsolidated(false)
}
}
func (c *Cluster) populateStartupTaints(ctx context.Context, n *StateNode) error {
if !n.Owned() {
return nil
}
provisioner := &v1alpha5.Provisioner{}
if err := c.kubeClient.Get(ctx, client.ObjectKey{Name: n.Labels()[v1alpha5.ProvisionerNameLabelKey]}, provisioner); err != nil {
return client.IgnoreNotFound(fmt.Errorf("getting provisioner, %w", err))
}
n.startupTaints = provisioner.Spec.StartupTaints
return nil
}
func (c *Cluster) populateInflight(ctx context.Context, n *StateNode) error {
if !n.Owned() {
return nil
}
provisioner := &v1alpha5.Provisioner{}
if err := c.kubeClient.Get(ctx, client.ObjectKey{Name: n.Labels()[v1alpha5.ProvisionerNameLabelKey]}, provisioner); err != nil {
return client.IgnoreNotFound(fmt.Errorf("getting provisioner, %w", err))
}
instanceTypes, err := c.cloudProvider.GetInstanceTypes(ctx, provisioner)
if err != nil {
return err
}
instanceType, ok := lo.Find(instanceTypes, func(it *cloudprovider.InstanceType) bool {
return it.Name == n.Labels()[v1.LabelInstanceTypeStable]
})
if !ok {
return fmt.Errorf("instance type '%s' not found", n.Labels()[v1.LabelInstanceTypeStable])
}
n.inflightCapacity = instanceType.Capacity
n.inflightAllocatable = instanceType.Allocatable()
return nil
}
func (c *Cluster) populateVolumeLimits(ctx context.Context, n *StateNode) error {
var csiNode storagev1.CSINode
if err := c.kubeClient.Get(ctx, client.ObjectKey{Name: n.Node.Name}, &csiNode); err != nil {
return client.IgnoreNotFound(fmt.Errorf("getting CSINode to determine volume limit for %s, %w", n.Node.Name, err))
}
for _, driver := range csiNode.Spec.Drivers {
if driver.Allocatable == nil {
continue
}
n.volumeLimits[driver.Name] = int(ptr.Int32Value(driver.Allocatable.Count))
}
return nil
}
func (c *Cluster) populateResourceRequests(ctx context.Context, n *StateNode) error {
var pods v1.PodList
if err := c.kubeClient.List(ctx, &pods, client.MatchingFields{"spec.nodeName": n.Node.Name}); err != nil {
return fmt.Errorf("listing pods, %w", err)
}
for i := range pods.Items {
pod := &pods.Items[i]
if podutils.IsTerminal(pod) {
continue
}
c.cleanupOldBindings(pod)
n.updateForPod(ctx, c.kubeClient, pod)
c.bindings[client.ObjectKeyFromObject(pod)] = pod.Spec.NodeName
}
return nil
}
// updateNodeUsageFromPod is called every time a reconcile event occurs for the pod. If the pods binding has changed
// (unbound to bound), we need to update the resource requests on the node.
func (c *Cluster) updateNodeUsageFromPod(ctx context.Context, pod *v1.Pod) error {
// nothing to do if the pod isn't bound, checking early allows avoiding unnecessary locking
if pod.Spec.NodeName == "" {
return nil
}
n, ok := c.nodes[c.nameToProviderID[pod.Spec.NodeName]]
if !ok {
// the node must exist for us to update the resource requests on the node
return errors.NewNotFound(schema.GroupResource{Resource: "Machine"}, pod.Spec.NodeName)
}
c.cleanupOldBindings(pod)
n.updateForPod(ctx, c.kubeClient, pod)
c.bindings[client.ObjectKeyFromObject(pod)] = pod.Spec.NodeName
return nil
}
func (c *Cluster) updateNodeUsageFromPodCompletion(podKey types.NamespacedName) {
nodeName, bindingKnown := c.bindings[podKey]
if !bindingKnown {
// we didn't think the pod was bound, so we weren't tracking it and don't need to do anything
return
}
delete(c.bindings, podKey)
n, ok := c.nodes[c.nameToProviderID[nodeName]]
if !ok {
// we weren't tracking the node yet, so nothing to do
return
}
n.cleanupForPod(podKey)
}
func (c *Cluster) cleanupOldBindings(pod *v1.Pod) {
if oldNodeName, bindingKnown := c.bindings[client.ObjectKeyFromObject(pod)]; bindingKnown {
if oldNodeName == pod.Spec.NodeName {
// we are already tracking the pod binding, so nothing to update
return
}
// the pod has switched nodes, this can occur if a pod name was re-used, and it was deleted/re-created rapidly,
// binding to a different node the second time
if oldNode, ok := c.nodes[c.nameToProviderID[oldNodeName]]; ok {
// we were tracking the old node, so we need to reduce its capacity by the amount of the pod that left
oldNode.cleanupForPod(client.ObjectKeyFromObject(pod))
delete(c.bindings, client.ObjectKeyFromObject(pod))
}
}
// new pod binding has occurred
c.SetConsolidated(false)
}
func (c *Cluster) updatePodAntiAffinities(pod *v1.Pod) {
// We intentionally don't track inverse anti-affinity preferences. We're not
// required to enforce them so it just adds complexity for very little
// value. The problem with them comes from the relaxation process, the pod
// we are relaxing is not the pod with the anti-affinity term.
if podKey := client.ObjectKeyFromObject(pod); podutils.HasRequiredPodAntiAffinity(pod) {
c.antiAffinityPods.Store(podKey, pod)
} else {
c.antiAffinityPods.Delete(podKey)
}
}
func (c *Cluster) triggerConsolidationOnChange(old, new *StateNode) {
if old == nil || new == nil {
c.SetConsolidated(false)
return
}
if old.Initialized() != new.Initialized() {
c.SetConsolidated(false)
return
}
if old.MarkedForDeletion() != new.MarkedForDeletion() {
c.SetConsolidated(false)
return
}
}
| 575 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state
import (
"context"
"time"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/apis/settings"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/scheduling"
nodeutils "github.com/aws/karpenter-core/pkg/utils/node"
podutils "github.com/aws/karpenter-core/pkg/utils/pod"
"github.com/aws/karpenter-core/pkg/utils/resources"
)
// StateNodes is a typed version of a list of *Node
// nolint: revive
type StateNodes []*StateNode
// Active filters StateNodes that are not in a MarkedForDeletion state
func (n StateNodes) Active() StateNodes {
return lo.Filter(n, func(node *StateNode, _ int) bool {
return !node.MarkedForDeletion()
})
}
// Deleting filters StateNodes that are in a MarkedForDeletion state
func (n StateNodes) Deleting() StateNodes {
return lo.Filter(n, func(node *StateNode, _ int) bool {
return node.MarkedForDeletion()
})
}
// Pods gets the pods assigned to all StateNodes based on the kubernetes api-server bindings
func (n StateNodes) Pods(ctx context.Context, c client.Client) ([]*v1.Pod, error) {
var pods []*v1.Pod
for _, node := range n {
p, err := node.Pods(ctx, c)
if err != nil {
return nil, err
}
pods = append(pods, p...)
}
return pods, nil
}
// StateNode is a cached version of a node in the cluster that maintains state which is expensive to compute every time it's
// needed. This currently contains node utilization across all the allocatable resources, but will soon be used to
// compute topology information.
// +k8s:deepcopy-gen=true
// nolint: revive
type StateNode struct {
Node *v1.Node
Machine *v1alpha5.Machine
inflightAllocatable v1.ResourceList // TODO @joinnis: This can be removed when machine is added
inflightCapacity v1.ResourceList // TODO @joinnis: This can be removed when machine is added
startupTaints []v1.Taint // TODO: @joinnis: This can be removed when machine is added
// daemonSetRequests is the total amount of resources that have been requested by daemon sets. This allows users
// of the Node to identify the remaining resources that we expect future daemonsets to consume.
daemonSetRequests map[types.NamespacedName]v1.ResourceList
daemonSetLimits map[types.NamespacedName]v1.ResourceList
podRequests map[types.NamespacedName]v1.ResourceList
podLimits map[types.NamespacedName]v1.ResourceList
hostPortUsage *scheduling.HostPortUsage
volumeUsage *scheduling.VolumeUsage
volumeLimits scheduling.VolumeCount
markedForDeletion bool
nominatedUntil metav1.Time
}
func NewNode() *StateNode {
return &StateNode{
inflightAllocatable: v1.ResourceList{},
inflightCapacity: v1.ResourceList{},
startupTaints: []v1.Taint{},
daemonSetRequests: map[types.NamespacedName]v1.ResourceList{},
daemonSetLimits: map[types.NamespacedName]v1.ResourceList{},
podRequests: map[types.NamespacedName]v1.ResourceList{},
podLimits: map[types.NamespacedName]v1.ResourceList{},
hostPortUsage: &scheduling.HostPortUsage{},
volumeUsage: scheduling.NewVolumeUsage(),
volumeLimits: scheduling.VolumeCount{},
}
}
func (in *StateNode) Name() string {
if in.Node == nil {
return in.Machine.Name
}
if in.Machine == nil {
return in.Node.Name
}
if !in.Machine.StatusConditions().GetCondition(v1alpha5.MachineRegistered).IsTrue() {
return in.Machine.Name
}
return in.Node.Name
}
// Pods gets the pods assigned to the Node based on the kubernetes api-server bindings
func (in *StateNode) Pods(ctx context.Context, c client.Client) ([]*v1.Pod, error) {
if in.Node == nil {
return nil, nil
}
return nodeutils.GetNodePods(ctx, c, in.Node)
}
func (in *StateNode) HostName() string {
if in.Labels()[v1.LabelHostname] == "" {
return in.Name()
}
return in.Labels()[v1.LabelHostname]
}
func (in *StateNode) Annotations() map[string]string {
// If the machine exists and the state node isn't initialized
// use the machine representation of the annotations
if in.Node == nil {
return in.Machine.Annotations
}
if in.Machine == nil {
return in.Node.Annotations
}
if !in.Machine.StatusConditions().GetCondition(v1alpha5.MachineRegistered).IsTrue() {
return in.Machine.Annotations
}
return in.Node.Annotations
}
func (in *StateNode) Labels() map[string]string {
// If the machine exists and the state node isn't initialized
// use the machine representation of the labels
if in.Node == nil {
return in.Machine.Labels
}
if in.Machine == nil {
return in.Node.Labels
}
if !in.Machine.StatusConditions().GetCondition(v1alpha5.MachineRegistered).IsTrue() {
return in.Machine.Labels
}
return in.Node.Labels
}
func (in *StateNode) Taints() []v1.Taint {
// Only consider startup taints until the node is initialized. Without this, if the startup taint is generic and
// re-appears on the node for a different reason (e.g. the node is cordoned) we will assume that pods can
// schedule against the node in the future incorrectly.
ephemeralTaints := scheduling.KnownEphemeralTaints
if !in.Initialized() && in.Owned() {
if in.Machine != nil {
ephemeralTaints = append(ephemeralTaints, in.Machine.Spec.StartupTaints...)
} else {
ephemeralTaints = append(ephemeralTaints, in.startupTaints...)
}
}
var taints []v1.Taint
if !in.Initialized() && in.Machine != nil {
taints = in.Machine.Spec.Taints
} else {
taints = in.Node.Spec.Taints
}
return lo.Reject(taints, func(taint v1.Taint, _ int) bool {
_, found := lo.Find(ephemeralTaints, func(t v1.Taint) bool {
return t.MatchTaint(&taint)
})
return found
})
}
func (in *StateNode) Initialized() bool {
if in.Machine != nil {
if in.Node != nil && in.Machine.StatusConditions().GetCondition(v1alpha5.MachineInitialized).IsTrue() {
return true
}
return false
}
if in.Node != nil {
return in.Node.Labels[v1alpha5.LabelNodeInitialized] == "true"
}
return false
}
func (in *StateNode) Capacity() v1.ResourceList {
if !in.Initialized() && in.Machine != nil {
// Override any zero quantity values in the node status
if in.Node != nil {
ret := lo.Assign(in.Node.Status.Capacity)
for resourceName, quantity := range in.Machine.Status.Capacity {
if resources.IsZero(ret[resourceName]) {
ret[resourceName] = quantity
}
}
return ret
}
return in.Machine.Status.Capacity
}
// TODO @joinnis: Remove this when machine migration is complete
if !in.Initialized() && in.Owned() {
// Override any zero quantity values in the node status
ret := lo.Assign(in.Node.Status.Capacity)
for resourceName, quantity := range in.inflightCapacity {
if resources.IsZero(ret[resourceName]) {
ret[resourceName] = quantity
}
}
return ret
}
return in.Node.Status.Capacity
}
func (in *StateNode) Allocatable() v1.ResourceList {
if !in.Initialized() && in.Machine != nil {
// Override any zero quantity values in the node status
if in.Node != nil {
ret := lo.Assign(in.Node.Status.Allocatable)
for resourceName, quantity := range in.Machine.Status.Allocatable {
if resources.IsZero(ret[resourceName]) {
ret[resourceName] = quantity
}
}
return ret
}
return in.Machine.Status.Allocatable
}
// TODO @joinnis: Remove this when machine migration is complete
if !in.Initialized() && in.Owned() {
// Override any zero quantity values in the node status
ret := lo.Assign(in.Node.Status.Allocatable)
for resourceName, quantity := range in.inflightAllocatable {
if resources.IsZero(ret[resourceName]) {
ret[resourceName] = quantity
}
}
return ret
}
return in.Node.Status.Allocatable
}
// Available is allocatable minus anything allocated to pods.
func (in *StateNode) Available() v1.ResourceList {
return resources.Subtract(in.Allocatable(), in.PodRequests())
}
func (in *StateNode) DaemonSetRequests() v1.ResourceList {
return resources.Merge(lo.Values(in.daemonSetRequests)...)
}
func (in *StateNode) DaemonSetLimits() v1.ResourceList {
return resources.Merge(lo.Values(in.daemonSetLimits)...)
}
func (in *StateNode) HostPortUsage() *scheduling.HostPortUsage {
return in.hostPortUsage
}
func (in *StateNode) VolumeUsage() *scheduling.VolumeUsage {
return in.volumeUsage
}
func (in *StateNode) VolumeLimits() scheduling.VolumeCount {
return in.volumeLimits
}
func (in *StateNode) PodRequests() v1.ResourceList {
return resources.Merge(lo.Values(in.podRequests)...)
}
func (in *StateNode) PodLimits() v1.ResourceList {
return resources.Merge(lo.Values(in.podLimits)...)
}
func (in *StateNode) MarkedForDeletion() bool {
// The Node is marked for the Deletion if:
// 1. The Node has explicitly MarkedForDeletion
// 2. The Node has a Machine counterpart and is actively deleting
// 3. The Node has no Machine counterpart and is actively deleting
return in.markedForDeletion ||
(in.Machine != nil && !in.Machine.DeletionTimestamp.IsZero()) ||
(in.Node != nil && in.Machine == nil && !in.Node.DeletionTimestamp.IsZero())
}
func (in *StateNode) Nominate(ctx context.Context) {
in.nominatedUntil = metav1.Time{Time: time.Now().Add(nominationWindow(ctx))}
}
func (in *StateNode) Nominated() bool {
return in.nominatedUntil.After(time.Now())
}
func (in *StateNode) Owned() bool {
return in.Labels()[v1alpha5.ProvisionerNameLabelKey] != ""
}
func (in *StateNode) updateForPod(ctx context.Context, kubeClient client.Client, pod *v1.Pod) {
podKey := client.ObjectKeyFromObject(pod)
in.podRequests[podKey] = resources.RequestsForPods(pod)
in.podLimits[podKey] = resources.LimitsForPods(pod)
// if it's a daemonset, we track what it has requested separately
if podutils.IsOwnedByDaemonSet(pod) {
in.daemonSetRequests[podKey] = resources.RequestsForPods(pod)
in.daemonSetLimits[podKey] = resources.LimitsForPods(pod)
}
in.hostPortUsage.Add(ctx, pod)
in.volumeUsage.Add(ctx, kubeClient, pod)
}
func (in *StateNode) cleanupForPod(podKey types.NamespacedName) {
in.hostPortUsage.DeletePod(podKey)
in.volumeUsage.DeletePod(podKey)
delete(in.podRequests, podKey)
delete(in.podLimits, podKey)
delete(in.daemonSetRequests, podKey)
delete(in.daemonSetLimits, podKey)
}
func nominationWindow(ctx context.Context) time.Duration {
nominationPeriod := 2 * settings.FromContext(ctx).BatchMaxDuration.Duration
if nominationPeriod < 10*time.Second {
nominationPeriod = 10 * time.Second
}
return nominationPeriod
}
| 349 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package state_test
import (
"context"
"fmt"
"math/rand"
"testing"
"time"
cloudproviderapi "k8s.io/cloud-provider/api"
clock "k8s.io/utils/clock/testing"
"knative.dev/pkg/ptr"
"github.com/aws/karpenter-core/pkg/apis"
"github.com/aws/karpenter-core/pkg/apis/settings"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/controllers/state/informer"
"github.com/aws/karpenter-core/pkg/operator/controller"
"github.com/aws/karpenter-core/pkg/operator/scheme"
"github.com/aws/karpenter-core/pkg/cloudprovider/fake"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/controllers/state"
"github.com/aws/karpenter-core/pkg/test"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "knative.dev/pkg/logging/testing"
. "github.com/aws/karpenter-core/pkg/test/expectations"
)
var ctx context.Context
var env *test.Environment
var fakeClock *clock.FakeClock
var cluster *state.Cluster
var machineController controller.Controller
var nodeController controller.Controller
var podController controller.Controller
var provisionerController controller.Controller
var daemonsetController controller.Controller
var cloudProvider *fake.CloudProvider
var provisioner *v1alpha5.Provisioner
func TestAPIs(t *testing.T) {
ctx = TestContextWithLogger(t)
RegisterFailHandler(Fail)
RunSpecs(t, "Controllers/State")
}
var _ = BeforeSuite(func() {
env = test.NewEnvironment(scheme.Scheme, test.WithCRDs(apis.CRDs...))
})
var _ = AfterSuite(func() {
Expect(env.Stop()).To(Succeed(), "Failed to stop environment")
})
var _ = BeforeEach(func() {
ctx = settings.ToContext(ctx, test.Settings())
cloudProvider = fake.NewCloudProvider()
cloudProvider.InstanceTypes = fake.InstanceTypesAssorted()
fakeClock = clock.NewFakeClock(time.Now())
cluster = state.NewCluster(fakeClock, env.Client, cloudProvider)
machineController = informer.NewMachineController(env.Client, cluster)
nodeController = informer.NewNodeController(env.Client, cluster)
podController = informer.NewPodController(env.Client, cluster)
provisionerController = informer.NewProvisionerController(env.Client, cluster)
daemonsetController = informer.NewDaemonSetController(env.Client, cluster)
provisioner = test.Provisioner(test.ProvisionerOptions{ObjectMeta: metav1.ObjectMeta{Name: "default"}})
ExpectApplied(ctx, env.Client, provisioner)
})
var _ = AfterEach(func() {
ExpectCleanedUp(ctx, env.Client)
})
var _ = Describe("Inflight Nodes", func() {
It("should consider the node capacity/allocatable based on the instance type", func() {
instanceType := cloudProvider.InstanceTypes[0]
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: instanceType.Name,
}},
})
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectStateNodeCount("==", 1)
ExpectResources(instanceType.Allocatable(), ExpectStateNodeExists(node).Allocatable())
ExpectResources(instanceType.Capacity, ExpectStateNodeExists(node).Capacity())
})
It("should consider the node capacity/allocatable as a combination of instance type and current node", func() {
instanceType := cloudProvider.InstanceTypes[0]
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: instanceType.Name,
}},
Allocatable: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("100Mi"),
},
Capacity: v1.ResourceList{
v1.ResourceEphemeralStorage: resource.MustParse("100Gi"),
},
})
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectStateNodeCount("==", 1)
ExpectResources(v1.ResourceList{
v1.ResourceMemory: resource.MustParse("100Mi"), // pulled from the node's real allocatable
v1.ResourceCPU: *instanceType.Capacity.Cpu(),
v1.ResourceEphemeralStorage: *instanceType.Capacity.StorageEphemeral(),
}, ExpectStateNodeExists(node).Allocatable())
ExpectResources(v1.ResourceList{
v1.ResourceMemory: *instanceType.Capacity.Memory(),
v1.ResourceCPU: *instanceType.Capacity.Cpu(),
v1.ResourceEphemeralStorage: resource.MustParse("100Gi"), // pulled from the node's real capacity
}, ExpectStateNodeExists(node).Capacity())
})
It("should ignore machines that don't yet have provider id", func() {
machine := test.Machine(v1alpha5.Machine{
Spec: v1alpha5.MachineSpec{
Taints: []v1.Taint{
{
Key: "custom-taint",
Value: "custom-value",
Effect: v1.TaintEffectNoSchedule,
},
},
Requirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelInstanceTypeStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{cloudProvider.InstanceTypes[0].Name},
},
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-1"},
},
},
Resources: v1alpha5.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("2Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("1Gi"),
},
},
MachineTemplateRef: &v1alpha5.MachineTemplateRef{
Name: "default",
},
},
})
machine.Status.ProviderID = ""
ExpectApplied(ctx, env.Client, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
ExpectStateNodeNotFoundForMachine(machine)
})
It("should model the inflight data as machine with no node", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
v1alpha5.DoNotConsolidateNodeAnnotationKey: "true",
},
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: "default",
v1.LabelInstanceTypeStable: cloudProvider.InstanceTypes[0].Name,
v1.LabelTopologyZone: "test-zone-1",
v1.LabelTopologyRegion: "test-region",
v1.LabelHostname: "custom-host-name",
},
},
Spec: v1alpha5.MachineSpec{
Taints: []v1.Taint{
{
Key: "custom-taint",
Value: "custom-value",
Effect: v1.TaintEffectNoSchedule,
},
},
Requirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelInstanceTypeStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{cloudProvider.InstanceTypes[0].Name},
},
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-1"},
},
},
Resources: v1alpha5.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("2Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("1Gi"),
},
},
MachineTemplateRef: &v1alpha5.MachineTemplateRef{
Name: "default",
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
},
})
ExpectApplied(ctx, env.Client, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
ExpectStateNodeCount("==", 1)
stateNode := ExpectStateNodeExistsForMachine(machine)
Expect(stateNode.Labels()).To(HaveKeyWithValue(v1alpha5.ProvisionerNameLabelKey, "default"))
Expect(stateNode.Labels()).To(HaveKeyWithValue(v1.LabelInstanceTypeStable, cloudProvider.InstanceTypes[0].Name))
Expect(stateNode.Labels()).To(HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-1"))
Expect(stateNode.Labels()).To(HaveKeyWithValue(v1.LabelTopologyRegion, "test-region"))
Expect(stateNode.Labels()).To(HaveKeyWithValue(v1.LabelHostname, "custom-host-name"))
Expect(stateNode.HostName()).To(Equal("custom-host-name"))
Expect(stateNode.Annotations()).To(HaveKeyWithValue(v1alpha5.DoNotConsolidateNodeAnnotationKey, "true"))
Expect(stateNode.Initialized()).To(BeFalse())
Expect(stateNode.Owned()).To(BeTrue())
})
It("should model the inflight capacity/allocatable as the machine capacity/allocatable", func() {
machine := test.Machine(v1alpha5.Machine{
Spec: v1alpha5.MachineSpec{
Requirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelInstanceTypeStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{cloudProvider.InstanceTypes[0].Name},
},
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-1"},
},
},
MachineTemplateRef: &v1alpha5.MachineTemplateRef{
Name: "default",
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("32Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("20Gi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("30Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("18Gi"),
},
},
})
ExpectApplied(ctx, env.Client, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
ExpectStateNodeCount("==", 1)
ExpectResources(v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("32Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("20Gi"),
}, ExpectStateNodeExistsForMachine(machine).Capacity())
ExpectResources(v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("30Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("18Gi"),
}, ExpectStateNodeExistsForMachine(machine).Allocatable())
})
It("should model the inflight capacity of the machine until the node registers and is initialized", func() {
machine := test.Machine(v1alpha5.Machine{
Spec: v1alpha5.MachineSpec{
Requirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelInstanceTypeStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{cloudProvider.InstanceTypes[0].Name},
},
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-1"},
},
},
MachineTemplateRef: &v1alpha5.MachineTemplateRef{
Name: "default",
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("32Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("20Gi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("30Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("18Gi"),
},
},
})
ExpectApplied(ctx, env.Client, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
ExpectStateNodeCount("==", 1)
ExpectResources(v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("32Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("20Gi"),
}, ExpectStateNodeExistsForMachine(machine).Capacity())
ExpectResources(v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("30Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("18Gi"),
}, ExpectStateNodeExistsForMachine(machine).Allocatable())
node := test.Node(test.NodeOptions{
ProviderID: machine.Status.ProviderID,
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1800m"),
v1.ResourceMemory: resource.MustParse("30500Mi"),
v1.ResourceEphemeralStorage: resource.MustParse("19000Mi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("900m"),
v1.ResourceMemory: resource.MustParse("29250Mi"),
v1.ResourceEphemeralStorage: resource.MustParse("17800Mi"),
},
})
ExpectApplied(ctx, env.Client, node)
ExpectStateNodeCount("==", 1)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
// Update machine to be initialized
machine.StatusConditions().MarkTrue(v1alpha5.MachineInitialized)
ExpectApplied(ctx, env.Client, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
ExpectStateNodeCount("==", 1)
ExpectStateNodeExistsForMachine(machine)
ExpectResources(v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1800m"),
v1.ResourceMemory: resource.MustParse("30500Mi"),
v1.ResourceEphemeralStorage: resource.MustParse("19000Mi"),
}, ExpectStateNodeExists(node).Capacity())
ExpectResources(v1.ResourceList{
v1.ResourceCPU: resource.MustParse("900m"),
v1.ResourceMemory: resource.MustParse("29250Mi"),
v1.ResourceEphemeralStorage: resource.MustParse("17800Mi"),
}, ExpectStateNodeExists(node).Allocatable())
})
It("should not return startup taints when the node isn't initialized", func() {
machine := test.Machine(v1alpha5.Machine{
Spec: v1alpha5.MachineSpec{
Requirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelInstanceTypeStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{cloudProvider.InstanceTypes[0].Name},
},
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-1"},
},
},
StartupTaints: []v1.Taint{
{
Key: "custom-taint",
Value: "custom-value",
Effect: v1.TaintEffectNoSchedule,
},
{
Key: "custom-taint2",
Value: "custom-value2",
Effect: v1.TaintEffectNoExecute,
},
},
MachineTemplateRef: &v1alpha5.MachineTemplateRef{
Name: "default",
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("32Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("20Gi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("30Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("18Gi"),
},
},
})
ExpectApplied(ctx, env.Client, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
ExpectStateNodeCount("==", 1)
stateNode := ExpectStateNodeExistsForMachine(machine)
Expect(stateNode.Taints()).To(HaveLen(0))
node := test.Node(test.NodeOptions{
ProviderID: machine.Status.ProviderID,
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1800m"),
v1.ResourceMemory: resource.MustParse("30500Mi"),
v1.ResourceEphemeralStorage: resource.MustParse("19000Mi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("900m"),
v1.ResourceMemory: resource.MustParse("29250Mi"),
v1.ResourceEphemeralStorage: resource.MustParse("17800Mi"),
},
Taints: []v1.Taint{
{
Key: "custom-taint",
Value: "custom-value",
Effect: v1.TaintEffectNoSchedule,
},
{
Key: "custom-taint2",
Value: "custom-value2",
Effect: v1.TaintEffectNoExecute,
},
},
})
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectStateNodeCount("==", 1)
stateNode = ExpectStateNodeExists(node)
Expect(stateNode.Taints()).To(HaveLen(0))
})
It("should return startup taints when the node is initialized", func() {
machine := test.Machine(v1alpha5.Machine{
Spec: v1alpha5.MachineSpec{
Requirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelInstanceTypeStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{cloudProvider.InstanceTypes[0].Name},
},
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-1"},
},
},
StartupTaints: []v1.Taint{
{
Key: "custom-taint",
Value: "custom-value",
Effect: v1.TaintEffectNoSchedule,
},
{
Key: "custom-taint2",
Value: "custom-value2",
Effect: v1.TaintEffectNoExecute,
},
},
MachineTemplateRef: &v1alpha5.MachineTemplateRef{
Name: "default",
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("32Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("20Gi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("30Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("18Gi"),
},
},
})
ExpectApplied(ctx, env.Client, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
node := test.Node(test.NodeOptions{
ProviderID: machine.Status.ProviderID,
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1800m"),
v1.ResourceMemory: resource.MustParse("30500Mi"),
v1.ResourceEphemeralStorage: resource.MustParse("19000Mi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("900m"),
v1.ResourceMemory: resource.MustParse("29250Mi"),
v1.ResourceEphemeralStorage: resource.MustParse("17800Mi"),
},
Taints: []v1.Taint{
{
Key: "custom-taint",
Value: "custom-value",
Effect: v1.TaintEffectNoSchedule,
},
{
Key: "custom-taint2",
Value: "custom-value2",
Effect: v1.TaintEffectNoExecute,
},
},
})
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectMakeMachinesInitialized(ctx, env.Client, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
ExpectStateNodeCount("==", 1)
stateNode := ExpectStateNodeExists(node)
Expect(stateNode.Taints()).To(HaveLen(2))
Expect(stateNode.Taints()).To(Equal([]v1.Taint{
{
Key: "custom-taint",
Value: "custom-value",
Effect: v1.TaintEffectNoSchedule,
},
{
Key: "custom-taint2",
Value: "custom-value2",
Effect: v1.TaintEffectNoExecute,
},
}))
})
It("should not return known ephemeral taints", func() {
machine := test.Machine(v1alpha5.Machine{
Spec: v1alpha5.MachineSpec{
Requirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelInstanceTypeStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{cloudProvider.InstanceTypes[0].Name},
},
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-1"},
},
},
MachineTemplateRef: &v1alpha5.MachineTemplateRef{
Name: "default",
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("32Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("20Gi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("30Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("18Gi"),
},
},
})
ExpectApplied(ctx, env.Client, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
node := test.Node(test.NodeOptions{
ProviderID: machine.Status.ProviderID,
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1800m"),
v1.ResourceMemory: resource.MustParse("30500Mi"),
v1.ResourceEphemeralStorage: resource.MustParse("19000Mi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("900m"),
v1.ResourceMemory: resource.MustParse("29250Mi"),
v1.ResourceEphemeralStorage: resource.MustParse("17800Mi"),
},
Taints: []v1.Taint{
{
Key: v1.TaintNodeNotReady,
Effect: v1.TaintEffectNoSchedule,
},
{
Key: v1.TaintNodeUnreachable,
Effect: v1.TaintEffectNoSchedule,
},
{
Key: cloudproviderapi.TaintExternalCloudProvider,
Effect: v1.TaintEffectNoSchedule,
Value: "true",
},
},
})
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectStateNodeCount("==", 1)
stateNode := ExpectStateNodeExists(node)
Expect(stateNode.Taints()).To(HaveLen(0))
})
It("should combine the inflight capacity with node while node isn't initialized", func() {
machine := test.Machine(v1alpha5.Machine{
Spec: v1alpha5.MachineSpec{
Requirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelInstanceTypeStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{cloudProvider.InstanceTypes[0].Name},
},
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-1"},
},
},
MachineTemplateRef: &v1alpha5.MachineTemplateRef{
Name: "default",
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("32Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("20Gi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("30Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("18Gi"),
},
},
})
ExpectApplied(ctx, env.Client, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
ExpectStateNodeCount("==", 1)
node := test.Node(test.NodeOptions{
ProviderID: machine.Status.ProviderID,
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1800m"),
v1.ResourceMemory: resource.MustParse("0"), // Should use the inflight capacity for this value
v1.ResourceEphemeralStorage: resource.MustParse("19000Mi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("0"), // Should use the inflight allocatable for this value
v1.ResourceMemory: resource.MustParse("29250Mi"),
v1.ResourceEphemeralStorage: resource.MustParse("0"), // Should use the inflight allocatable for this value
},
})
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectStateNodeCount("==", 1)
// Machine isn't initialized yet so the resources should remain as the in-flight resources
ExpectResources(v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1800m"),
v1.ResourceMemory: resource.MustParse("32Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("19000Mi"),
}, ExpectStateNodeExistsForMachine(machine).Capacity())
ExpectResources(v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("29250Mi"),
v1.ResourceEphemeralStorage: resource.MustParse("18Gi"),
}, ExpectStateNodeExistsForMachine(machine).Allocatable())
})
It("should continue node nomination when an inflight node becomes a real node", func() {
machine := test.Machine(v1alpha5.Machine{
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
},
})
ExpectApplied(ctx, env.Client, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
ExpectStateNodeCount("==", 1)
cluster.NominateNodeForPod(ctx, machine.Name)
Expect(ExpectStateNodeExistsForMachine(machine).Nominated()).To(BeTrue())
node := test.Node(test.NodeOptions{
ProviderID: machine.Status.ProviderID,
})
node.Spec.ProviderID = machine.Status.ProviderID
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectStateNodeCount("==", 1)
Expect(ExpectStateNodeExists(node).Nominated()).To(BeTrue())
})
It("should continue MarkedForDeletion when an inflight node becomes a real node", func() {
machine := test.Machine(v1alpha5.Machine{
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
},
})
ExpectApplied(ctx, env.Client, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
ExpectStateNodeCount("==", 1)
cluster.MarkForDeletion(machine.Name)
Expect(ExpectStateNodeExistsForMachine(machine).MarkedForDeletion()).To(BeTrue())
node := test.Node(test.NodeOptions{
ProviderID: machine.Status.ProviderID,
})
node.Spec.ProviderID = machine.Status.ProviderID
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectStateNodeCount("==", 1)
Expect(ExpectStateNodeExists(node).MarkedForDeletion()).To(BeTrue())
})
})
var _ = Describe("Node Resource Level", func() {
It("should not count pods not bound to nodes", func() {
pod1 := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1.5"),
}},
})
pod2 := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("2"),
}},
})
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: cloudProvider.InstanceTypes[0].Name,
}},
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("4"),
}})
ExpectApplied(ctx, env.Client, pod1, pod2)
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod1))
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod2))
// two pods, but neither is bound to the node so the node's CPU requests should be zero
ExpectResources(v1.ResourceList{v1.ResourceCPU: resource.MustParse("0.0")}, ExpectStateNodeExists(node).PodRequests())
})
It("should count new pods bound to nodes", func() {
pod1 := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1.5"),
}},
})
pod2 := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("2"),
}},
})
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: cloudProvider.InstanceTypes[0].Name,
}},
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("4"),
}})
ExpectApplied(ctx, env.Client, pod1, pod2)
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod1))
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod2))
ExpectManualBinding(ctx, env.Client, pod1, node)
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod1))
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod2))
ExpectResources(v1.ResourceList{v1.ResourceCPU: resource.MustParse("1.5")}, ExpectStateNodeExists(node).PodRequests())
ExpectManualBinding(ctx, env.Client, pod2, node)
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod2))
ExpectResources(v1.ResourceList{v1.ResourceCPU: resource.MustParse("3.5")}, ExpectStateNodeExists(node).PodRequests())
})
It("should count existing pods bound to nodes", func() {
pod1 := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1.5"),
}},
})
pod2 := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("2"),
}},
})
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: cloudProvider.InstanceTypes[0].Name,
}},
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("4"),
}})
// simulate a node that already exists in our cluster
ExpectApplied(ctx, env.Client, pod1, pod2)
ExpectApplied(ctx, env.Client, node)
ExpectManualBinding(ctx, env.Client, pod1, node)
ExpectManualBinding(ctx, env.Client, pod2, node)
// that we just noticed
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectResources(v1.ResourceList{v1.ResourceCPU: resource.MustParse("3.5")}, ExpectStateNodeExists(node).PodRequests())
})
It("should subtract requests if the pod is deleted", func() {
pod1 := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1.5"),
}},
})
pod2 := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("2"),
}},
})
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: cloudProvider.InstanceTypes[0].Name,
}},
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("4"),
}})
ExpectApplied(ctx, env.Client, pod1, pod2)
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod1))
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod2))
ExpectManualBinding(ctx, env.Client, pod1, node)
ExpectManualBinding(ctx, env.Client, pod2, node)
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod1))
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod2))
ExpectResources(v1.ResourceList{v1.ResourceCPU: resource.MustParse("3.5")}, ExpectStateNodeExists(node).PodRequests())
// delete the pods and the CPU usage should go down
ExpectDeleted(ctx, env.Client, pod2)
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod2))
ExpectResources(v1.ResourceList{v1.ResourceCPU: resource.MustParse("1.5")}, ExpectStateNodeExists(node).PodRequests())
ExpectDeleted(ctx, env.Client, pod1)
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod1))
ExpectResources(v1.ResourceList{v1.ResourceCPU: resource.MustParse("0")}, ExpectStateNodeExists(node).PodRequests())
})
It("should not add requests if the pod is terminal", func() {
pod1 := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1.5"),
}},
Phase: v1.PodFailed,
})
pod2 := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("2"),
}},
Phase: v1.PodSucceeded,
})
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: cloudProvider.InstanceTypes[0].Name,
}},
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("4"),
},
})
ExpectApplied(ctx, env.Client, pod1, pod2)
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod1))
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod2))
ExpectManualBinding(ctx, env.Client, pod1, node)
ExpectManualBinding(ctx, env.Client, pod2, node)
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod1))
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod2))
ExpectResources(v1.ResourceList{v1.ResourceCPU: resource.MustParse("0")}, ExpectStateNodeExists(node).PodRequests())
})
It("should stop tracking nodes that are deleted", func() {
pod1 := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1.5"),
}},
})
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: cloudProvider.InstanceTypes[0].Name,
}},
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("4"),
}})
ExpectApplied(ctx, env.Client, pod1)
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod1))
ExpectManualBinding(ctx, env.Client, pod1, node)
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod1))
cluster.ForEachNode(func(n *state.StateNode) bool {
ExpectResources(v1.ResourceList{v1.ResourceCPU: resource.MustParse("2.5")}, n.Available())
ExpectResources(v1.ResourceList{v1.ResourceCPU: resource.MustParse("1.5")}, n.PodRequests())
return true
})
// delete the node and the internal state should disappear as well
ExpectDeleted(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
cluster.ForEachNode(func(n *state.StateNode) bool {
Fail("shouldn't be called as the node was deleted")
return true
})
})
It("should track pods correctly if we miss events or they are consolidated", func() {
pod1 := test.UnschedulablePod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Name: "stateful-set-pod"},
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1.5"),
}},
})
node1 := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: cloudProvider.InstanceTypes[0].Name,
}},
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("4"),
}})
ExpectApplied(ctx, env.Client, pod1, node1)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node1))
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod1))
ExpectManualBinding(ctx, env.Client, pod1, node1)
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod1))
cluster.ForEachNode(func(n *state.StateNode) bool {
ExpectResources(v1.ResourceList{v1.ResourceCPU: resource.MustParse("2.5")}, n.Available())
ExpectResources(v1.ResourceList{v1.ResourceCPU: resource.MustParse("1.5")}, n.PodRequests())
return true
})
ExpectDeleted(ctx, env.Client, pod1)
// second node has more capacity
node2 := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: cloudProvider.InstanceTypes[0].Name,
}},
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("8"),
}})
// and the pod can only bind to node2 due to the resource request
pod2 := test.UnschedulablePod(test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Name: "stateful-set-pod"},
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("5.0"),
}},
})
ExpectApplied(ctx, env.Client, pod2, node2)
ExpectManualBinding(ctx, env.Client, pod2, node2)
// deleted the pod and then recreated it, but simulated only receiving an event on the new pod after it has
// bound and not getting the new node event entirely
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node2))
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod2))
cluster.ForEachNode(func(n *state.StateNode) bool {
if n.Node.Name == node1.Name {
// not on node1 any longer, so it should be fully free
ExpectResources(v1.ResourceList{v1.ResourceCPU: resource.MustParse("4")}, n.Available())
ExpectResources(v1.ResourceList{v1.ResourceCPU: resource.MustParse("0")}, n.PodRequests())
} else {
ExpectResources(v1.ResourceList{v1.ResourceCPU: resource.MustParse("3")}, n.Available())
ExpectResources(v1.ResourceList{v1.ResourceCPU: resource.MustParse("5")}, n.PodRequests())
}
return true
})
})
// nolint:gosec
It("should maintain a correct count of resource usage as pods are deleted/added", func() {
var pods []*v1.Pod
for i := 0; i < 100; i++ {
pods = append(pods, test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%1.1f", rand.Float64()*2)),
}},
}))
}
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: cloudProvider.InstanceTypes[0].Name,
}},
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("200"),
v1.ResourcePods: resource.MustParse("500"),
}})
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectResources(v1.ResourceList{
v1.ResourceCPU: resource.MustParse("0"),
v1.ResourcePods: resource.MustParse("0"),
}, ExpectStateNodeExists(node).PodRequests())
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
sum := 0.0
podCount := 0
for _, pod := range pods {
ExpectApplied(ctx, env.Client, pod)
ExpectManualBinding(ctx, env.Client, pod, node)
podCount++
// extra reconciles shouldn't cause it to be multiply counted
nReconciles := rand.Intn(3) + 1 // 1 to 3 reconciles
for i := 0; i < nReconciles; i++ {
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod))
}
sum += pod.Spec.Containers[0].Resources.Requests.Cpu().AsApproximateFloat64()
ExpectResources(v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%1.1f", sum)),
v1.ResourcePods: resource.MustParse(fmt.Sprintf("%d", podCount)),
}, ExpectStateNodeExists(node).PodRequests())
}
for _, pod := range pods {
ExpectDeleted(ctx, env.Client, pod)
nReconciles := rand.Intn(3) + 1
// or multiply removed
for i := 0; i < nReconciles; i++ {
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod))
}
sum -= pod.Spec.Containers[0].Resources.Requests.Cpu().AsApproximateFloat64()
podCount--
ExpectResources(v1.ResourceList{
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%1.1f", sum)),
v1.ResourcePods: resource.MustParse(fmt.Sprintf("%d", podCount)),
}, ExpectStateNodeExists(node).PodRequests())
}
ExpectResources(v1.ResourceList{
v1.ResourceCPU: resource.MustParse("0"),
v1.ResourcePods: resource.MustParse("0"),
}, ExpectStateNodeExists(node).PodRequests())
})
It("should track daemonset requested resources separately", func() {
ds := test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("2Gi")}},
}},
)
ExpectApplied(ctx, env.Client, ds)
Expect(env.Client.Get(ctx, client.ObjectKeyFromObject(ds), ds)).To(Succeed())
pod1 := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1.5"),
}},
})
dsPod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("2Gi"),
}},
})
dsPod.OwnerReferences = append(dsPod.OwnerReferences, metav1.OwnerReference{
APIVersion: "apps/v1",
Kind: "DaemonSet",
Name: ds.Name,
UID: ds.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
})
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: cloudProvider.InstanceTypes[0].Name,
}},
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("8Gi"),
}})
ExpectApplied(ctx, env.Client, pod1, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectManualBinding(ctx, env.Client, pod1, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod1))
// daemonset pod isn't bound yet
ExpectResources(v1.ResourceList{
v1.ResourceCPU: resource.MustParse("0"),
v1.ResourceMemory: resource.MustParse("0"),
}, ExpectStateNodeExists(node).DaemonSetRequests())
ExpectResources(v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1.5"),
}, ExpectStateNodeExists(node).PodRequests())
ExpectApplied(ctx, env.Client, dsPod)
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(dsPod))
ExpectManualBinding(ctx, env.Client, dsPod, node)
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(dsPod))
// just the DS request portion
ExpectResources(v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("2Gi"),
}, ExpectStateNodeExists(node).DaemonSetRequests())
// total request
ExpectResources(v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2.5"),
v1.ResourceMemory: resource.MustParse("2Gi"),
}, ExpectStateNodeExists(node).PodRequests())
})
It("should mark node for deletion when node is deleted", func() {
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: cloudProvider.InstanceTypes[0].Name,
},
Finalizers: []string{v1alpha5.TerminationFinalizer},
},
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("4"),
}},
)
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectStateNodeCount("==", 1)
Expect(env.Client.Delete(ctx, node)).To(Succeed())
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectNodeExists(ctx, env.Client, node.Name)
Expect(ExpectStateNodeExists(node).MarkedForDeletion()).To(BeTrue())
})
It("should mark node for deletion when machine is deleted", func() {
machine := test.Machine(v1alpha5.Machine{
ObjectMeta: metav1.ObjectMeta{
Finalizers: []string{v1alpha5.TerminationFinalizer},
},
Spec: v1alpha5.MachineSpec{
Requirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelInstanceTypeStable,
Operator: v1.NodeSelectorOpIn,
Values: []string{cloudProvider.InstanceTypes[0].Name},
},
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-1"},
},
},
MachineTemplateRef: &v1alpha5.MachineTemplateRef{
Name: "default",
},
},
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
Capacity: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("32Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("20Gi"),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("30Gi"),
v1.ResourceEphemeralStorage: resource.MustParse("18Gi"),
},
},
})
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: cloudProvider.InstanceTypes[0].Name,
},
Finalizers: []string{v1alpha5.TerminationFinalizer},
},
ProviderID: machine.Status.ProviderID,
})
ExpectApplied(ctx, env.Client, machine, node)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectStateNodeCount("==", 1)
Expect(env.Client.Delete(ctx, machine)).To(Succeed())
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
ExpectExists(ctx, env.Client, machine)
Expect(ExpectStateNodeExistsForMachine(machine).MarkedForDeletion()).To(BeTrue())
Expect(ExpectStateNodeExists(node).MarkedForDeletion()).To(BeTrue())
})
It("should nominate the node until the nomination time passes", func() {
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: cloudProvider.InstanceTypes[0].Name,
},
Finalizers: []string{v1alpha5.TerminationFinalizer},
},
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("4"),
}},
)
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
cluster.NominateNodeForPod(ctx, node.Name)
// Expect that the node is now nominated
Expect(ExpectStateNodeExists(node).Nominated()).To(BeTrue())
time.Sleep(time.Second * 5) // nomination window is 10s so it should still be nominated
Expect(ExpectStateNodeExists(node).Nominated()).To(BeTrue())
time.Sleep(time.Second * 6) // past 10s, node should no longer be nominated
Expect(ExpectStateNodeExists(node).Nominated()).To(BeFalse())
})
It("should handle a node changing from no providerID to registering a providerID", func() {
node := test.Node()
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectStateNodeCount("==", 1)
ExpectStateNodeExists(node)
// Change the providerID; this mocks CCM adding the providerID onto the node after registration
node.Spec.ProviderID = fmt.Sprintf("fake://%s", node.Name)
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectStateNodeCount("==", 1)
ExpectStateNodeExists(node)
})
})
var _ = Describe("Pod Anti-Affinity", func() {
It("should track pods with required anti-affinity", func() {
pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1.5"),
}},
PodAntiRequirements: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"},
},
TopologyKey: v1.LabelTopologyZone,
},
},
})
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: cloudProvider.InstanceTypes[0].Name,
}},
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("4"),
}})
ExpectApplied(ctx, env.Client, pod)
ExpectApplied(ctx, env.Client, node)
ExpectManualBinding(ctx, env.Client, pod, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod))
foundPodCount := 0
cluster.ForPodsWithAntiAffinity(func(p *v1.Pod, n *v1.Node) bool {
foundPodCount++
Expect(p.Name).To(Equal(pod.Name))
return true
})
Expect(foundPodCount).To(BeNumerically("==", 1))
})
It("should not track pods with preferred anti-affinity", func() {
pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1.5"),
}},
PodAntiPreferences: []v1.WeightedPodAffinityTerm{
{
Weight: 15,
PodAffinityTerm: v1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"},
},
TopologyKey: v1.LabelTopologyZone,
},
},
},
})
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: cloudProvider.InstanceTypes[0].Name,
}},
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("4"),
}})
ExpectApplied(ctx, env.Client, pod)
ExpectApplied(ctx, env.Client, node)
ExpectManualBinding(ctx, env.Client, pod, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod))
foundPodCount := 0
cluster.ForPodsWithAntiAffinity(func(p *v1.Pod, n *v1.Node) bool {
foundPodCount++
Fail("shouldn't track pods with preferred anti-affinity")
return true
})
Expect(foundPodCount).To(BeNumerically("==", 0))
})
It("should stop tracking pods with required anti-affinity if the pod is deleted", func() {
pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1.5"),
}},
PodAntiRequirements: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"},
},
TopologyKey: v1.LabelTopologyZone,
},
},
})
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: cloudProvider.InstanceTypes[0].Name,
}},
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("4"),
}})
ExpectApplied(ctx, env.Client, pod)
ExpectApplied(ctx, env.Client, node)
ExpectManualBinding(ctx, env.Client, pod, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod))
foundPodCount := 0
cluster.ForPodsWithAntiAffinity(func(p *v1.Pod, n *v1.Node) bool {
foundPodCount++
Expect(p.Name).To(Equal(pod.Name))
return true
})
Expect(foundPodCount).To(BeNumerically("==", 1))
ExpectDeleted(ctx, env.Client, client.Object(pod))
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod))
foundPodCount = 0
cluster.ForPodsWithAntiAffinity(func(p *v1.Pod, n *v1.Node) bool {
foundPodCount++
Fail("should not be called as the pod was deleted")
return true
})
Expect(foundPodCount).To(BeNumerically("==", 0))
})
It("should handle events out of order", func() {
pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1.5"),
}},
PodAntiRequirements: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"},
},
TopologyKey: v1.LabelTopologyZone,
},
},
})
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: provisioner.Name,
v1.LabelInstanceTypeStable: cloudProvider.InstanceTypes[0].Name,
}},
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("4"),
}})
ExpectApplied(ctx, env.Client, pod)
ExpectApplied(ctx, env.Client, node)
ExpectManualBinding(ctx, env.Client, pod, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectReconcileSucceeded(ctx, podController, client.ObjectKeyFromObject(pod))
// simulate receiving the node deletion before the pod deletion
ExpectDeleted(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
foundPodCount := 0
cluster.ForPodsWithAntiAffinity(func(p *v1.Pod, n *v1.Node) bool {
foundPodCount++
return true
})
Expect(foundPodCount).To(BeNumerically("==", 0))
})
})
var _ = Describe("Provisioner Spec Updates", func() {
It("should cause consolidation state to change when a provisioner is updated", func() {
cluster.SetConsolidated(true)
fakeClock.Step(time.Minute)
provisioner.Spec.Consolidation = &v1alpha5.Consolidation{Enabled: ptr.Bool(true)}
ExpectApplied(ctx, env.Client, provisioner)
ExpectReconcileSucceeded(ctx, provisionerController, client.ObjectKeyFromObject(provisioner))
Expect(cluster.Consolidated()).To(BeFalse())
})
})
var _ = Describe("Cluster State Sync", func() {
It("should consider the cluster state synced when all nodes are tracked", func() {
// Deploy 1000 nodes and sync them all with the cluster
for i := 0; i < 1000; i++ {
node := test.Node(test.NodeOptions{
ProviderID: test.RandomProviderID(),
})
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
}
Expect(cluster.Synced(ctx)).To(BeTrue())
})
It("should consider the cluster state synced when nodes don't have provider id", func() {
// Deploy 1000 nodes and sync them all with the cluster
for i := 0; i < 1000; i++ {
node := test.Node()
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
}
Expect(cluster.Synced(ctx)).To(BeTrue())
})
It("should consider the cluster state synced when nodes register provider id", func() {
// Deploy 1000 nodes and sync them all with the cluster
var nodes []*v1.Node
for i := 0; i < 1000; i++ {
nodes = append(nodes, test.Node())
ExpectApplied(ctx, env.Client, nodes[i])
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(nodes[i]))
}
Expect(cluster.Synced(ctx)).To(BeTrue())
for i := 0; i < 1000; i++ {
nodes[i].Spec.ProviderID = test.RandomProviderID()
ExpectApplied(ctx, env.Client, nodes[i])
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(nodes[i]))
}
Expect(cluster.Synced(ctx)).To(BeTrue())
})
It("should consider the cluster state synced when all machines are tracked", func() {
// Deploy 1000 machines and sync them all with the cluster
for i := 0; i < 1000; i++ {
machine := test.Machine(v1alpha5.Machine{
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
},
})
ExpectApplied(ctx, env.Client, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
}
Expect(cluster.Synced(ctx)).To(BeTrue())
})
It("should consider the cluster state synced when a combination of machines and nodes are tracked", func() {
// Deploy 250 nodes to the cluster that also have machines
for i := 0; i < 250; i++ {
node := test.Node(test.NodeOptions{
ProviderID: test.RandomProviderID(),
})
machine := test.Machine(v1alpha5.Machine{
Status: v1alpha5.MachineStatus{
ProviderID: node.Spec.ProviderID,
},
})
ExpectApplied(ctx, env.Client, node, machine)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
}
// Deploy 250 nodes to the cluster
for i := 0; i < 250; i++ {
node := test.Node(test.NodeOptions{
ProviderID: test.RandomProviderID(),
})
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
}
// Deploy 500 machines and sync them all with the cluster
for i := 0; i < 500; i++ {
machine := test.Machine(v1alpha5.Machine{
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
},
})
ExpectApplied(ctx, env.Client, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
}
Expect(cluster.Synced(ctx)).To(BeTrue())
})
It("should consider the cluster state synced when the representation of nodes is the same", func() {
// Deploy 500 machines to the cluster, apply the linked nodes, but don't sync them
for i := 0; i < 500; i++ {
machine := test.Machine(v1alpha5.Machine{
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
},
})
node := test.Node(test.NodeOptions{
ProviderID: machine.Status.ProviderID,
})
ExpectApplied(ctx, env.Client, machine)
ExpectApplied(ctx, env.Client, node)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
}
Expect(cluster.Synced(ctx)).To(BeTrue())
})
It("shouldn't consider the cluster state synced if a machine hasn't resolved its provider id", func() {
// Deploy 1000 machines and sync them all with the cluster
for i := 0; i < 1000; i++ {
machine := test.Machine(v1alpha5.Machine{
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
},
})
// One of them doesn't have its providerID
if i == 900 {
machine.Status.ProviderID = ""
}
ExpectApplied(ctx, env.Client, machine)
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
}
Expect(cluster.Synced(ctx)).To(BeFalse())
})
It("shouldn't consider the cluster state synced if a machine isn't tracked", func() {
// Deploy 1000 machines and sync them all with the cluster
for i := 0; i < 1000; i++ {
machine := test.Machine(v1alpha5.Machine{
Status: v1alpha5.MachineStatus{
ProviderID: test.RandomProviderID(),
},
})
ExpectApplied(ctx, env.Client, machine)
// One of them doesn't get synced with the reconciliation
if i != 900 {
ExpectReconcileSucceeded(ctx, machineController, client.ObjectKeyFromObject(machine))
}
}
Expect(cluster.Synced(ctx)).To(BeFalse())
})
It("shouldn't consider the cluster state synced if a node isn't tracked", func() {
// Deploy 1000 nodes and sync them all with the cluster
for i := 0; i < 1000; i++ {
node := test.Node(test.NodeOptions{
ProviderID: test.RandomProviderID(),
})
ExpectApplied(ctx, env.Client, node)
// One of them doesn't get synced with the reconciliation
if i != 900 {
ExpectReconcileSucceeded(ctx, nodeController, client.ObjectKeyFromObject(node))
}
}
Expect(cluster.Synced(ctx)).To(BeFalse())
})
})
var _ = Describe("DaemonSet Controller", func() {
It("should not update daemonsetCache when daemonset pod is not present", func() {
daemonset := test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}},
}},
)
ExpectApplied(ctx, env.Client, daemonset)
ExpectReconcileSucceeded(ctx, daemonsetController, client.ObjectKeyFromObject(daemonset))
daemonsetPod := cluster.GetDaemonSetPod(daemonset)
Expect(daemonsetPod).To(BeNil())
})
It("should update daemonsetCache when daemonset pod is created", func() {
daemonset := test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}},
}},
)
ExpectApplied(ctx, env.Client, daemonset)
daemonsetPod := test.UnschedulablePod(
test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "DaemonSet",
Name: daemonset.Name,
UID: daemonset.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
},
},
})
daemonsetPod.Spec = daemonset.Spec.Template.Spec
ExpectApplied(ctx, env.Client, daemonsetPod)
ExpectReconcileSucceeded(ctx, daemonsetController, client.ObjectKeyFromObject(daemonset))
Expect(cluster.GetDaemonSetPod(daemonset)).To(Equal(daemonsetPod))
})
It("should update daemonsetCache with the newest created pod", func() {
daemonset := test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}},
}},
)
ExpectApplied(ctx, env.Client, daemonset)
daemonsetPod1 := test.UnschedulablePod(
test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "DaemonSet",
Name: daemonset.Name,
UID: daemonset.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
},
},
})
daemonsetPod1.Spec = daemonset.Spec.Template.Spec
ExpectApplied(ctx, env.Client, daemonsetPod1)
ExpectReconcileSucceeded(ctx, daemonsetController, client.ObjectKeyFromObject(daemonset))
Expect(cluster.GetDaemonSetPod(daemonset)).To(Equal(daemonsetPod1))
daemonsetPod2 := test.UnschedulablePod(
test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "DaemonSet",
Name: daemonset.Name,
UID: daemonset.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
},
},
})
time.Sleep(time.Second) // Making sure the two pods have different creationTime
daemonsetPod2.Spec = daemonset.Spec.Template.Spec
ExpectApplied(ctx, env.Client, daemonsetPod2)
ExpectReconcileSucceeded(ctx, daemonsetController, client.ObjectKeyFromObject(daemonset))
Expect(cluster.GetDaemonSetPod(daemonset)).To(Equal(daemonsetPod2))
})
It("should delete daemonset in cache when daemonset is deleted", func() {
daemonset := test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}},
}},
)
ExpectApplied(ctx, env.Client, daemonset)
daemonsetPod := test.UnschedulablePod(
test.PodOptions{
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "DaemonSet",
Name: daemonset.Name,
UID: daemonset.UID,
Controller: ptr.Bool(true),
BlockOwnerDeletion: ptr.Bool(true),
},
},
},
})
daemonsetPod.Spec = daemonset.Spec.Template.Spec
ExpectApplied(ctx, env.Client, daemonsetPod)
ExpectReconcileSucceeded(ctx, daemonsetController, client.ObjectKeyFromObject(daemonset))
Expect(cluster.GetDaemonSetPod(daemonset)).To(Equal(daemonsetPod))
ExpectDeleted(ctx, env.Client, daemonset, daemonsetPod)
ExpectReconcileSucceeded(ctx, daemonsetController, client.ObjectKeyFromObject(daemonset))
Expect(cluster.GetDaemonSetPod(daemonset)).To(BeNil())
})
})
var _ = Describe("Consolidated State", func() {
It("should update the consolidated value when setting consolidation", func() {
cluster.SetConsolidated(true)
Expect(cluster.Consolidated()).To(BeTrue())
cluster.SetConsolidated(false)
Expect(cluster.Consolidated()).To(BeFalse())
})
It("should update the consolidated value when consolidation timeout (5m) has passed and state hasn't changed", func() {
cluster.SetConsolidated(true)
fakeClock.Step(time.Minute)
Expect(cluster.Consolidated()).To(BeTrue())
fakeClock.Step(time.Minute * 3)
Expect(cluster.Consolidated()).To(BeTrue())
fakeClock.Step(time.Minute * 2)
Expect(cluster.Consolidated()).To(BeFalse())
})
})
func ExpectStateNodeCount(comparator string, count int) int {
c := 0
cluster.ForEachNode(func(n *state.StateNode) bool {
c++
return true
})
ExpectWithOffset(1, count).To(BeNumerically(comparator, count))
return c
}
func ExpectStateNodeExistsWithOffset(offset int, node *v1.Node) *state.StateNode {
var ret *state.StateNode
cluster.ForEachNode(func(n *state.StateNode) bool {
if n.Node.Name != node.Name {
return true
}
ret = n.DeepCopy()
return false
})
ExpectWithOffset(offset+1, ret).ToNot(BeNil())
return ret
}
func ExpectStateNodeExists(node *v1.Node) *state.StateNode {
return ExpectStateNodeExistsWithOffset(1, node)
}
func ExpectStateNodeExistsForMachine(machine *v1alpha5.Machine) *state.StateNode {
var ret *state.StateNode
cluster.ForEachNode(func(n *state.StateNode) bool {
if n.Machine.Name != machine.Name {
return true
}
ret = n.DeepCopy()
return false
})
ExpectWithOffset(1, ret).ToNot(BeNil())
return ret
}
func ExpectStateNodeNotFoundForMachine(machine *v1alpha5.Machine) *state.StateNode {
var ret *state.StateNode
cluster.ForEachNode(func(n *state.StateNode) bool {
if n.Machine.Name != machine.Name {
return true
}
ret = n.DeepCopy()
return false
})
ExpectWithOffset(1, ret).To(BeNil())
return ret
}
| 1,834 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//go:build !ignore_autogenerated
// Code generated by controller-gen. DO NOT EDIT.
package state
import (
"github.com/aws/karpenter-core/pkg/scheduling"
"k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StateNode) DeepCopyInto(out *StateNode) {
*out = *in
if in.Node != nil {
in, out := &in.Node, &out.Node
*out = new(v1.Node)
(*in).DeepCopyInto(*out)
}
if in.inflightAllocatable != nil {
in, out := &in.inflightAllocatable, &out.inflightAllocatable
*out = make(v1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.inflightCapacity != nil {
in, out := &in.inflightCapacity, &out.inflightCapacity
*out = make(v1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.startupTaints != nil {
in, out := &in.startupTaints, &out.startupTaints
*out = make([]v1.Taint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.daemonSetRequests != nil {
in, out := &in.daemonSetRequests, &out.daemonSetRequests
*out = make(map[types.NamespacedName]v1.ResourceList, len(*in))
for key, val := range *in {
var outVal map[v1.ResourceName]resource.Quantity
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make(v1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
(*out)[key] = outVal
}
}
if in.daemonSetLimits != nil {
in, out := &in.daemonSetLimits, &out.daemonSetLimits
*out = make(map[types.NamespacedName]v1.ResourceList, len(*in))
for key, val := range *in {
var outVal map[v1.ResourceName]resource.Quantity
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make(v1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
(*out)[key] = outVal
}
}
if in.podRequests != nil {
in, out := &in.podRequests, &out.podRequests
*out = make(map[types.NamespacedName]v1.ResourceList, len(*in))
for key, val := range *in {
var outVal map[v1.ResourceName]resource.Quantity
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make(v1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
(*out)[key] = outVal
}
}
if in.podLimits != nil {
in, out := &in.podLimits, &out.podLimits
*out = make(map[types.NamespacedName]v1.ResourceList, len(*in))
for key, val := range *in {
var outVal map[v1.ResourceName]resource.Quantity
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make(v1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
(*out)[key] = outVal
}
}
if in.hostPortUsage != nil {
in, out := &in.hostPortUsage, &out.hostPortUsage
*out = (*in).DeepCopy()
}
if in.volumeUsage != nil {
in, out := &in.volumeUsage, &out.volumeUsage
*out = (*in).DeepCopy()
}
if in.volumeLimits != nil {
in, out := &in.volumeLimits, &out.volumeLimits
*out = make(scheduling.VolumeCount, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
in.nominatedUntil.DeepCopyInto(&out.nominatedUntil)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node.
func (in *StateNode) DeepCopy() *StateNode {
if in == nil {
return nil
}
out := new(StateNode)
in.DeepCopyInto(out)
return out
}
| 152 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package informer
import (
"context"
"time"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/api/errors"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/karpenter-core/pkg/controllers/state"
corecontroller "github.com/aws/karpenter-core/pkg/operator/controller"
)
// Controller for the resource
type Controller struct {
kubeClient client.Client
cluster *state.Cluster
}
// NewController constructs a controller instance
func NewDaemonSetController(kubeClient client.Client, cluster *state.Cluster) corecontroller.Controller {
return &Controller{
kubeClient: kubeClient,
cluster: cluster,
}
}
func (c *Controller) Name() string {
return "daemonset"
}
// Reconcile the resource
func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
daemonSet := appsv1.DaemonSet{}
if err := c.kubeClient.Get(ctx, req.NamespacedName, &daemonSet); err != nil {
if errors.IsNotFound(err) {
// notify cluster state of the daemonset deletion
c.cluster.DeleteDaemonSet(req.NamespacedName)
}
return reconcile.Result{}, client.IgnoreNotFound(err)
}
return reconcile.Result{RequeueAfter: time.Minute}, c.cluster.UpdateDaemonSet(ctx, &daemonSet)
}
func (c *Controller) Builder(_ context.Context, m manager.Manager) corecontroller.Builder {
return corecontroller.Adapt(controllerruntime.
NewControllerManagedBy(m).
For(&appsv1.DaemonSet{}).
WithOptions(controller.Options{MaxConcurrentReconciles: 10}),
)
}
| 72 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package informer
import (
"context"
"k8s.io/apimachinery/pkg/api/errors"
"knative.dev/pkg/logging"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/controllers/state"
corecontroller "github.com/aws/karpenter-core/pkg/operator/controller"
)
// MachineController reconciles machine for the purpose of maintaining state.
type MachineController struct {
kubeClient client.Client
cluster *state.Cluster
}
// NewMachineController constructs a controller instance
func NewMachineController(kubeClient client.Client, cluster *state.Cluster) corecontroller.Controller {
return &MachineController{
kubeClient: kubeClient,
cluster: cluster,
}
}
func (c *MachineController) Name() string {
return "machine-state"
}
func (c *MachineController) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).Named(c.Name()).With("machine", req.NamespacedName.Name))
machine := &v1alpha5.Machine{}
if err := c.kubeClient.Get(ctx, req.NamespacedName, machine); err != nil {
if errors.IsNotFound(err) {
// notify cluster state of the node deletion
c.cluster.DeleteMachine(req.Name)
}
return reconcile.Result{}, client.IgnoreNotFound(err)
}
c.cluster.UpdateMachine(machine)
// ensure it's aware of any nodes we discover, this is a no-op if the node is already known to our cluster state
return reconcile.Result{RequeueAfter: stateRetryPeriod}, nil
}
func (c *MachineController) Builder(_ context.Context, m manager.Manager) corecontroller.Builder {
return corecontroller.Adapt(controllerruntime.
NewControllerManagedBy(m).
For(&v1alpha5.Machine{}).
WithOptions(controller.Options{MaxConcurrentReconciles: 10}))
}
| 72 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package informer
import (
"context"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"knative.dev/pkg/logging"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/karpenter-core/pkg/controllers/state"
corecontroller "github.com/aws/karpenter-core/pkg/operator/controller"
)
// NodeController reconciles nodes for the purpose of maintaining state regarding nodes that is expensive to compute.
type NodeController struct {
kubeClient client.Client
cluster *state.Cluster
}
// NewNodeController constructs a controller instance
func NewNodeController(kubeClient client.Client, cluster *state.Cluster) corecontroller.Controller {
return &NodeController{
kubeClient: kubeClient,
cluster: cluster,
}
}
func (c *NodeController) Name() string {
return "node_state"
}
func (c *NodeController) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).Named(c.Name()).With("node", req.NamespacedName.Name))
node := &v1.Node{}
if err := c.kubeClient.Get(ctx, req.NamespacedName, node); err != nil {
if errors.IsNotFound(err) {
// notify cluster state of the node deletion
c.cluster.DeleteNode(req.Name)
}
return reconcile.Result{}, client.IgnoreNotFound(err)
}
if err := c.cluster.UpdateNode(ctx, node); err != nil {
return reconcile.Result{}, err
}
// ensure it's aware of any nodes we discover, this is a no-op if the node is already known to our cluster state
return reconcile.Result{RequeueAfter: stateRetryPeriod}, nil
}
func (c *NodeController) Builder(_ context.Context, m manager.Manager) corecontroller.Builder {
return corecontroller.Adapt(controllerruntime.
NewControllerManagedBy(m).
For(&v1.Node{}).
WithOptions(controller.Options{MaxConcurrentReconciles: 10}))
}
| 74 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package informer
import (
"context"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"knative.dev/pkg/logging"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/karpenter-core/pkg/controllers/state"
corecontroller "github.com/aws/karpenter-core/pkg/operator/controller"
)
var stateRetryPeriod = 1 * time.Minute
// PodController reconciles pods for the purpose of maintaining state regarding pods that is expensive to compute.
type PodController struct {
kubeClient client.Client
cluster *state.Cluster
}
func NewPodController(kubeClient client.Client, cluster *state.Cluster) corecontroller.Controller {
return &PodController{
kubeClient: kubeClient,
cluster: cluster,
}
}
func (c *PodController) Name() string {
return "pod_state"
}
func (c *PodController) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).Named(c.Name()).With("pod", req.NamespacedName))
pod := &v1.Pod{}
if err := c.kubeClient.Get(ctx, req.NamespacedName, pod); err != nil {
if errors.IsNotFound(err) {
// notify cluster state of the node deletion
c.cluster.DeletePod(req.NamespacedName)
}
return reconcile.Result{}, client.IgnoreNotFound(err)
}
if err := c.cluster.UpdatePod(ctx, pod); err != nil {
// We requeue here since the NotFound error is from finding the node for the binding
if errors.IsNotFound(err) {
return reconcile.Result{Requeue: true}, nil
}
return reconcile.Result{}, err
}
return reconcile.Result{RequeueAfter: stateRetryPeriod}, nil
}
func (c *PodController) Builder(_ context.Context, m manager.Manager) corecontroller.Builder {
return corecontroller.Adapt(controllerruntime.
NewControllerManagedBy(m).
For(&v1.Pod{}).
WithOptions(controller.Options{MaxConcurrentReconciles: 10}))
}
| 79 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package informer
import (
"context"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/controllers/state"
corecontroller "github.com/aws/karpenter-core/pkg/operator/controller"
)
var _ corecontroller.TypedController[*v1alpha5.Provisioner] = (*ProvisionerController)(nil)
// ProvisionerController reconciles provisioners to re-trigger consolidation on change.
type ProvisionerController struct {
kubeClient client.Client
cluster *state.Cluster
}
func NewProvisionerController(kubeClient client.Client, cluster *state.Cluster) corecontroller.Controller {
return corecontroller.Typed[*v1alpha5.Provisioner](kubeClient, &ProvisionerController{
kubeClient: kubeClient,
cluster: cluster,
})
}
func (c *ProvisionerController) Name() string {
return "provisioner_state"
}
func (c *ProvisionerController) Reconcile(_ context.Context, _ *v1alpha5.Provisioner) (reconcile.Result, error) {
// Something changed in the provisioner so we should re-consider consolidation
c.cluster.SetConsolidated(false)
return reconcile.Result{}, nil
}
func (c *ProvisionerController) Builder(_ context.Context, m manager.Manager) corecontroller.Builder {
return corecontroller.Adapt(controllerruntime.
NewControllerManagedBy(m).
For(&v1alpha5.Provisioner{}).
WithOptions(controller.Options{MaxConcurrentReconciles: 10}).
WithEventFilter(predicate.GenerationChangedPredicate{}).
WithEventFilter(predicate.Funcs{DeleteFunc: func(event event.DeleteEvent) bool { return false }}),
)
}
| 67 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package termination
import (
"context"
"fmt"
"time"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/time/rate"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/client-go/util/workqueue"
"knative.dev/pkg/logging"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider"
"github.com/aws/karpenter-core/pkg/controllers/termination/terminator"
terminatorevents "github.com/aws/karpenter-core/pkg/controllers/termination/terminator/events"
"github.com/aws/karpenter-core/pkg/events"
"github.com/aws/karpenter-core/pkg/metrics"
corecontroller "github.com/aws/karpenter-core/pkg/operator/controller"
machineutil "github.com/aws/karpenter-core/pkg/utils/machine"
)
var _ corecontroller.FinalizingTypedController[*v1.Node] = (*Controller)(nil)
// Controller for the resource
type Controller struct {
kubeClient client.Client
cloudProvider cloudprovider.CloudProvider
terminator *terminator.Terminator
recorder events.Recorder
}
// NewController constructs a controller instance
func NewController(kubeClient client.Client, cloudProvider cloudprovider.CloudProvider, terminator *terminator.Terminator, recorder events.Recorder) corecontroller.Controller {
return corecontroller.Typed[*v1.Node](kubeClient, &Controller{
kubeClient: kubeClient,
cloudProvider: cloudProvider,
terminator: terminator,
recorder: recorder,
})
}
func (c *Controller) Name() string {
return "termination"
}
func (c *Controller) Reconcile(_ context.Context, _ *v1.Node) (reconcile.Result, error) {
return reconcile.Result{}, nil
}
//nolint:gocyclo
func (c *Controller) Finalize(ctx context.Context, node *v1.Node) (reconcile.Result, error) {
if !controllerutil.ContainsFinalizer(node, v1alpha5.TerminationFinalizer) {
return reconcile.Result{}, nil
}
if err := c.deleteAllMachines(ctx, node); err != nil {
return reconcile.Result{}, fmt.Errorf("deleting machines, %w", err)
}
if err := c.terminator.Cordon(ctx, node); err != nil {
return reconcile.Result{}, fmt.Errorf("cordoning node, %w", err)
}
if err := c.terminator.Drain(ctx, node); err != nil {
if !terminator.IsNodeDrainError(err) {
return reconcile.Result{}, fmt.Errorf("draining node, %w", err)
}
c.recorder.Publish(terminatorevents.NodeFailedToDrain(node, err))
// If the underlying machine no longer exists.
if _, err := c.cloudProvider.Get(ctx, node.Spec.ProviderID); err != nil {
if cloudprovider.IsMachineNotFoundError(err) {
return reconcile.Result{}, c.removeFinalizer(ctx, node)
}
return reconcile.Result{}, fmt.Errorf("getting machine, %w", err)
}
return reconcile.Result{RequeueAfter: 1 * time.Second}, nil
}
if err := c.cloudProvider.Delete(ctx, machineutil.NewFromNode(node)); cloudprovider.IgnoreMachineNotFoundError(err) != nil {
return reconcile.Result{}, fmt.Errorf("terminating cloudprovider instance, %w", err)
}
return reconcile.Result{}, c.removeFinalizer(ctx, node)
}
func (c *Controller) deleteAllMachines(ctx context.Context, node *v1.Node) error {
machineList := &v1alpha5.MachineList{}
if err := c.kubeClient.List(ctx, machineList, client.MatchingFields{"status.providerID": node.Spec.ProviderID}); err != nil {
return err
}
for i := range machineList.Items {
if err := c.kubeClient.Delete(ctx, &machineList.Items[i]); err != nil {
return client.IgnoreNotFound(err)
}
}
return nil
}
func (c *Controller) removeFinalizer(ctx context.Context, n *v1.Node) error {
stored := n.DeepCopy()
controllerutil.RemoveFinalizer(n, v1alpha5.TerminationFinalizer)
if !equality.Semantic.DeepEqual(stored, n) {
if err := c.kubeClient.Patch(ctx, n, client.MergeFrom(stored)); err != nil {
return client.IgnoreNotFound(fmt.Errorf("patching node, %w", err))
}
metrics.NodesTerminatedCounter.With(prometheus.Labels{
metrics.ProvisionerLabel: n.Labels[v1alpha5.ProvisionerNameLabelKey],
}).Inc()
// We use stored.DeletionTimestamp since the api-server may give back a node after the patch without a deletionTimestamp
TerminationSummary.With(prometheus.Labels{
metrics.ProvisionerLabel: n.Labels[v1alpha5.ProvisionerNameLabelKey],
}).Observe(time.Since(stored.DeletionTimestamp.Time).Seconds())
logging.FromContext(ctx).Infof("deleted node")
}
return nil
}
func (c *Controller) Builder(_ context.Context, m manager.Manager) corecontroller.Builder {
return corecontroller.Adapt(controllerruntime.
NewControllerManagedBy(m).
For(&v1.Node{}).
WithOptions(
controller.Options{
RateLimiter: workqueue.NewMaxOfRateLimiter(
workqueue.NewItemExponentialFailureRateLimiter(100*time.Millisecond, 10*time.Second),
// 10 qps, 100 bucket size
&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
),
MaxConcurrentReconciles: 100,
},
))
}
| 152 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package termination
import (
"github.com/prometheus/client_golang/prometheus"
crmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
"github.com/aws/karpenter-core/pkg/metrics"
)
var (
TerminationSummary = prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Namespace: "karpenter",
Subsystem: "nodes",
Name: "termination_time_seconds",
Help: "The time taken between a node's deletion request and the removal of its finalizer",
Objectives: metrics.SummaryObjectives(),
},
[]string{metrics.ProvisionerLabel},
)
)
func init() {
crmetrics.Registry.MustRegister(TerminationSummary)
}
| 40 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package termination_test
import (
"context"
"fmt"
"sync"
"testing"
"time"
"k8s.io/client-go/tools/record"
clock "k8s.io/utils/clock/testing"
"sigs.k8s.io/controller-runtime/pkg/cache"
"github.com/samber/lo"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/apis"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/cloudprovider/fake"
"github.com/aws/karpenter-core/pkg/controllers/termination"
"github.com/aws/karpenter-core/pkg/controllers/termination/terminator"
"github.com/aws/karpenter-core/pkg/events"
"github.com/aws/karpenter-core/pkg/metrics"
"github.com/aws/karpenter-core/pkg/operator/controller"
"github.com/aws/karpenter-core/pkg/operator/scheme"
"github.com/aws/karpenter-core/pkg/test"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
. "knative.dev/pkg/logging/testing"
. "github.com/aws/karpenter-core/pkg/test/expectations"
)
var ctx context.Context
var terminationController controller.Controller
var evictionQueue *terminator.EvictionQueue
var env *test.Environment
var defaultOwnerRefs = []metav1.OwnerReference{{Kind: "ReplicaSet", APIVersion: "appsv1", Name: "rs", UID: "1234567890"}}
var fakeClock *clock.FakeClock
var cloudProvider *fake.CloudProvider
func TestAPIs(t *testing.T) {
ctx = TestContextWithLogger(t)
RegisterFailHandler(Fail)
RunSpecs(t, "Termination")
}
var _ = BeforeSuite(func() {
fakeClock = clock.NewFakeClock(time.Now())
env = test.NewEnvironment(scheme.Scheme, test.WithCRDs(apis.CRDs...), test.WithFieldIndexers(func(c cache.Cache) error {
return c.IndexField(ctx, &v1alpha5.Machine{}, "status.providerID", func(obj client.Object) []string {
return []string{obj.(*v1alpha5.Machine).Status.ProviderID}
})
}))
cloudProvider = fake.NewCloudProvider()
evictionQueue = terminator.NewEvictionQueue(ctx, env.KubernetesInterface.CoreV1(), events.NewRecorder(&record.FakeRecorder{}))
terminationController = termination.NewController(env.Client, cloudProvider, terminator.NewTerminator(fakeClock, env.Client, evictionQueue), events.NewRecorder(&record.FakeRecorder{}))
})
var _ = AfterSuite(func() {
Expect(env.Stop()).To(Succeed(), "Failed to stop environment")
})
var _ = Describe("Termination", func() {
var node *v1.Node
var machine *v1alpha5.Machine
BeforeEach(func() {
machine, node = test.MachineAndNode(v1alpha5.Machine{ObjectMeta: metav1.ObjectMeta{Finalizers: []string{v1alpha5.TerminationFinalizer}}})
cloudProvider.CreatedMachines[node.Spec.ProviderID] = machine
})
AfterEach(func() {
ExpectCleanedUp(ctx, env.Client)
fakeClock.SetTime(time.Now())
cloudProvider.Reset()
// Reset the metrics collectors
metrics.NodesTerminatedCounter.Reset()
termination.TerminationSummary.Reset()
})
Context("Reconciliation", func() {
It("should delete nodes", func() {
ExpectApplied(ctx, env.Client, node)
Expect(env.Client.Delete(ctx, node)).To(Succeed())
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
ExpectNotFound(ctx, env.Client, node)
})
It("should delete machines associated with nodes", func() {
ExpectApplied(ctx, env.Client, node, machine)
Expect(env.Client.Delete(ctx, node)).To(Succeed())
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
ExpectExists(ctx, env.Client, machine)
ExpectFinalizersRemoved(ctx, env.Client, machine)
ExpectNotFound(ctx, env.Client, node, machine)
})
It("should not race if deleting nodes in parallel", func() {
var nodes []*v1.Node
for i := 0; i < 10; i++ {
node = test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{
Finalizers: []string{v1alpha5.TerminationFinalizer},
},
})
ExpectApplied(ctx, env.Client, node)
Expect(env.Client.Delete(ctx, node)).To(Succeed())
node = ExpectNodeExists(ctx, env.Client, node.Name)
nodes = append(nodes, node)
}
var wg sync.WaitGroup
// this is enough to trip the race detector
for i := 0; i < 10; i++ {
wg.Add(1)
go func(node *v1.Node) {
defer GinkgoRecover()
defer wg.Done()
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
}(nodes[i])
}
wg.Wait()
ExpectNotFound(ctx, env.Client, lo.Map(nodes, func(n *v1.Node, _ int) client.Object { return n })...)
})
It("should exclude nodes from load balancers when terminating", func() {
// This is a kludge to prevent the node from being deleted before we can
// inspect its labels
podNoEvict := test.Pod(test.PodOptions{
NodeName: node.Name,
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{v1alpha5.DoNotEvictPodAnnotationKey: "true"},
OwnerReferences: defaultOwnerRefs,
},
})
ExpectApplied(ctx, env.Client, node, podNoEvict)
Expect(env.Client.Delete(ctx, node)).To(Succeed())
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
node = ExpectNodeExists(ctx, env.Client, node.Name)
Expect(node.Labels[v1.LabelNodeExcludeBalancers]).Should(Equal("karpenter"))
})
It("should not evict pods that tolerate unschedulable taint", func() {
podEvict := test.Pod(test.PodOptions{NodeName: node.Name, ObjectMeta: metav1.ObjectMeta{OwnerReferences: defaultOwnerRefs}})
podSkip := test.Pod(test.PodOptions{
NodeName: node.Name,
Tolerations: []v1.Toleration{{Key: v1.TaintNodeUnschedulable, Operator: v1.TolerationOpExists, Effect: v1.TaintEffectNoSchedule}},
ObjectMeta: metav1.ObjectMeta{OwnerReferences: defaultOwnerRefs},
})
ExpectApplied(ctx, env.Client, node, podEvict, podSkip)
// Trigger Termination Controller
Expect(env.Client.Delete(ctx, node)).To(Succeed())
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
// Expect node to exist and be draining
ExpectNodeDraining(env.Client, node.Name)
// Expect podEvict to be evicting, and delete it
ExpectEvicted(env.Client, podEvict)
ExpectDeleted(ctx, env.Client, podEvict)
// Reconcile to delete node
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
ExpectNotFound(ctx, env.Client, node)
})
It("should delete nodes that have pods without an ownerRef", func() {
pod := test.Pod(test.PodOptions{
NodeName: node.Name,
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: nil,
},
})
ExpectApplied(ctx, env.Client, node, pod)
Expect(env.Client.Delete(ctx, node)).To(Succeed())
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
// Expect pod with no owner ref to be enqueued for eviction
ExpectEvicted(env.Client, pod)
// Expect node to exist and be draining
ExpectNodeDraining(env.Client, node.Name)
// Delete no owner refs pod to simulate successful eviction
ExpectDeleted(ctx, env.Client, pod)
// Reconcile node to evict pod
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
// Reconcile to delete node
ExpectNotFound(ctx, env.Client, node)
})
It("should delete nodes with terminal pods", func() {
podEvictPhaseSucceeded := test.Pod(test.PodOptions{
NodeName: node.Name,
Phase: v1.PodSucceeded,
})
podEvictPhaseFailed := test.Pod(test.PodOptions{
NodeName: node.Name,
Phase: v1.PodFailed,
})
ExpectApplied(ctx, env.Client, node, podEvictPhaseSucceeded, podEvictPhaseFailed)
Expect(env.Client.Delete(ctx, node)).To(Succeed())
node = ExpectNodeExists(ctx, env.Client, node.Name)
// Trigger Termination Controller, which should ignore these pods and delete the node
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
ExpectNotFound(ctx, env.Client, node)
})
It("should fail to evict pods that violate a PDB", func() {
minAvailable := intstr.FromInt(1)
labelSelector := map[string]string{test.RandomName(): test.RandomName()}
pdb := test.PodDisruptionBudget(test.PDBOptions{
Labels: labelSelector,
// Don't let any pod evict
MinAvailable: &minAvailable,
})
podNoEvict := test.Pod(test.PodOptions{
NodeName: node.Name,
ObjectMeta: metav1.ObjectMeta{
Labels: labelSelector,
OwnerReferences: defaultOwnerRefs,
},
Phase: v1.PodRunning,
})
ExpectApplied(ctx, env.Client, node, podNoEvict, pdb)
// Trigger Termination Controller
Expect(env.Client.Delete(ctx, node)).To(Succeed())
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
// Expect node to exist and be draining
ExpectNodeDraining(env.Client, node.Name)
// Expect podNoEvict to fail eviction due to PDB, and be retried
Eventually(func() int {
return evictionQueue.NumRequeues(client.ObjectKeyFromObject(podNoEvict))
}).Should(BeNumerically(">=", 1))
// Delete pod to simulate successful eviction
ExpectDeleted(ctx, env.Client, podNoEvict)
ExpectNotFound(ctx, env.Client, podNoEvict)
// Reconcile to delete node
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
ExpectNotFound(ctx, env.Client, node)
})
It("should evict non-critical pods first", func() {
podEvict := test.Pod(test.PodOptions{NodeName: node.Name, ObjectMeta: metav1.ObjectMeta{OwnerReferences: defaultOwnerRefs}})
podNodeCritical := test.Pod(test.PodOptions{NodeName: node.Name, PriorityClassName: "system-node-critical", ObjectMeta: metav1.ObjectMeta{OwnerReferences: defaultOwnerRefs}})
podClusterCritical := test.Pod(test.PodOptions{NodeName: node.Name, PriorityClassName: "system-cluster-critical", ObjectMeta: metav1.ObjectMeta{OwnerReferences: defaultOwnerRefs}})
ExpectApplied(ctx, env.Client, node, podEvict, podNodeCritical, podClusterCritical)
// Trigger Termination Controller
Expect(env.Client.Delete(ctx, node)).To(Succeed())
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
// Expect node to exist and be draining
ExpectNodeDraining(env.Client, node.Name)
// Expect podEvict to be evicting, and delete it
ExpectEvicted(env.Client, podEvict)
ExpectDeleted(ctx, env.Client, podEvict)
// Expect the critical pods to be evicted and deleted
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
ExpectEvicted(env.Client, podNodeCritical)
ExpectDeleted(ctx, env.Client, podNodeCritical)
ExpectEvicted(env.Client, podClusterCritical)
ExpectDeleted(ctx, env.Client, podClusterCritical)
// Reconcile to delete node
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
ExpectNotFound(ctx, env.Client, node)
})
It("should not evict static pods", func() {
ExpectApplied(ctx, env.Client, node)
podEvict := test.Pod(test.PodOptions{NodeName: node.Name, ObjectMeta: metav1.ObjectMeta{OwnerReferences: defaultOwnerRefs}})
ExpectApplied(ctx, env.Client, node, podEvict)
podNoEvict := test.Pod(test.PodOptions{
NodeName: node.Name,
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{{
APIVersion: "v1",
Kind: "Node",
Name: node.Name,
UID: node.UID,
}},
},
})
ExpectApplied(ctx, env.Client, podNoEvict)
Expect(env.Client.Delete(ctx, node)).To(Succeed())
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
// Expect mirror pod to not be queued for eviction
ExpectNotEnqueuedForEviction(evictionQueue, podNoEvict)
// Expect podEvict to be enqueued for eviction then be successful
ExpectEvicted(env.Client, podEvict)
// Expect node to exist and be draining
ExpectNodeDraining(env.Client, node.Name)
// Reconcile node to evict pod
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
// Delete pod to simulate successful eviction
ExpectDeleted(ctx, env.Client, podEvict)
// Reconcile to delete node
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
ExpectNotFound(ctx, env.Client, node)
})
It("should not delete nodes until all pods are deleted", func() {
pods := []*v1.Pod{
test.Pod(test.PodOptions{NodeName: node.Name, ObjectMeta: metav1.ObjectMeta{OwnerReferences: defaultOwnerRefs}}),
test.Pod(test.PodOptions{NodeName: node.Name, ObjectMeta: metav1.ObjectMeta{OwnerReferences: defaultOwnerRefs}}),
}
ExpectApplied(ctx, env.Client, node, pods[0], pods[1])
// Trigger Termination Controller
Expect(env.Client.Delete(ctx, node)).To(Succeed())
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
// Expect the pods to be evicted
ExpectEvicted(env.Client, pods[0], pods[1])
// Expect node to exist and be draining, but not deleted
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
ExpectNodeDraining(env.Client, node.Name)
ExpectDeleted(ctx, env.Client, pods[1])
// Expect node to exist and be draining, but not deleted
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
ExpectNodeDraining(env.Client, node.Name)
ExpectDeleted(ctx, env.Client, pods[0])
// Reconcile to delete node
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
ExpectNotFound(ctx, env.Client, node)
})
It("should delete nodes with no underlying machine even if not fully drained", func() {
pods := []*v1.Pod{
test.Pod(test.PodOptions{NodeName: node.Name, ObjectMeta: metav1.ObjectMeta{OwnerReferences: defaultOwnerRefs}}),
test.Pod(test.PodOptions{NodeName: node.Name, ObjectMeta: metav1.ObjectMeta{OwnerReferences: defaultOwnerRefs}}),
}
ExpectApplied(ctx, env.Client, node, pods[0], pods[1])
// Trigger Termination Controller
Expect(env.Client.Delete(ctx, node)).To(Succeed())
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
// Expect the pods to be evicted
ExpectEvicted(env.Client, pods[0], pods[1])
// Expect node to exist and be draining, but not deleted
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
ExpectNodeDraining(env.Client, node.Name)
// After this, the node still has one pod that is evicting.
ExpectDeleted(ctx, env.Client, pods[1])
// Remove the node from created machines so that the cloud provider returns DNE
cloudProvider.CreatedMachines = map[string]*v1alpha5.Machine{}
// Reconcile to delete node
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
ExpectNotFound(ctx, env.Client, node)
})
It("should wait for pods to terminate", func() {
pod := test.Pod(test.PodOptions{NodeName: node.Name, ObjectMeta: metav1.ObjectMeta{OwnerReferences: defaultOwnerRefs}})
fakeClock.SetTime(time.Now()) // make our fake clock match the pod creation time
ExpectApplied(ctx, env.Client, node, pod)
// Before grace period, node should not delete
Expect(env.Client.Delete(ctx, node)).To(Succeed())
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
ExpectNodeExists(ctx, env.Client, node.Name)
ExpectEvicted(env.Client, pod)
// After grace period, node should delete. The deletion timestamps are from etcd which we can't control, so
// to eliminate test-flakiness we reset the time to current time + 90 seconds instead of just advancing
// the clock by 90 seconds.
fakeClock.SetTime(time.Now().Add(90 * time.Second))
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
ExpectNotFound(ctx, env.Client, node)
})
})
Context("Metrics", func() {
It("should fire the terminationSummary metric when deleting nodes", func() {
ExpectApplied(ctx, env.Client, node)
Expect(env.Client.Delete(ctx, node)).To(Succeed())
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
m, ok := FindMetricWithLabelValues("karpenter_nodes_termination_time_seconds", map[string]string{"provisioner": ""})
Expect(ok).To(BeTrue())
Expect(m.GetSummary().GetSampleCount()).To(BeNumerically("==", 1))
})
It("should fire the nodesTerminated counter metric when deleting nodes", func() {
ExpectApplied(ctx, env.Client, node)
Expect(env.Client.Delete(ctx, node)).To(Succeed())
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, terminationController, client.ObjectKeyFromObject(node))
m, ok := FindMetricWithLabelValues("karpenter_nodes_terminated", map[string]string{"provisioner": ""})
Expect(ok).To(BeTrue())
Expect(lo.FromPtr(m.GetCounter().Value)).To(BeNumerically("==", 1))
})
})
})
func ExpectNotEnqueuedForEviction(e *terminator.EvictionQueue, pods ...*v1.Pod) {
for _, pod := range pods {
ExpectWithOffset(1, e.Contains(client.ObjectKeyFromObject(pod))).To(BeFalse())
}
}
func ExpectEvicted(c client.Client, pods ...*v1.Pod) {
for _, pod := range pods {
EventuallyWithOffset(1, func() bool {
return ExpectPodExists(ctx, c, pod.Name, pod.Namespace).GetDeletionTimestamp().IsZero()
}, ReconcilerPropagationTime, RequestInterval).Should(BeFalse(), func() string {
return fmt.Sprintf("expected %s/%s to be evicting, but it isn't", pod.Namespace, pod.Name)
})
}
}
func ExpectNodeDraining(c client.Client, nodeName string) *v1.Node {
node := ExpectNodeExistsWithOffset(1, ctx, c, nodeName)
ExpectWithOffset(1, node.Spec.Unschedulable).To(BeTrue())
ExpectWithOffset(1, lo.Contains(node.Finalizers, v1alpha5.TerminationFinalizer)).To(BeTrue())
ExpectWithOffset(1, node.DeletionTimestamp.IsZero()).To(BeFalse())
return node
}
| 485 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package terminator
import (
"context"
"errors"
"fmt"
"time"
set "github.com/deckarep/golang-set"
v1 "k8s.io/api/core/v1"
"k8s.io/api/policy/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/util/workqueue"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/client"
terminatorevents "github.com/aws/karpenter-core/pkg/controllers/termination/terminator/events"
"github.com/aws/karpenter-core/pkg/events"
)
const (
evictionQueueBaseDelay = 100 * time.Millisecond
evictionQueueMaxDelay = 10 * time.Second
)
type NodeDrainError struct {
error
}
func NewNodeDrainError(err error) *NodeDrainError {
return &NodeDrainError{error: err}
}
func IsNodeDrainError(err error) bool {
if err == nil {
return false
}
var nodeDrainErr *NodeDrainError
return errors.As(err, &nodeDrainErr)
}
type EvictionQueue struct {
workqueue.RateLimitingInterface
set.Set
coreV1Client corev1.CoreV1Interface
recorder events.Recorder
}
func NewEvictionQueue(ctx context.Context, coreV1Client corev1.CoreV1Interface, recorder events.Recorder) *EvictionQueue {
queue := &EvictionQueue{
RateLimitingInterface: workqueue.NewRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(evictionQueueBaseDelay, evictionQueueMaxDelay)),
Set: set.NewSet(),
coreV1Client: coreV1Client,
recorder: recorder,
}
go queue.Start(logging.WithLogger(ctx, logging.FromContext(ctx).Named("eviction")))
return queue
}
// Add adds pods to the EvictionQueue
func (e *EvictionQueue) Add(pods ...*v1.Pod) {
for _, pod := range pods {
if nn := client.ObjectKeyFromObject(pod); !e.Set.Contains(nn) {
e.Set.Add(nn)
e.RateLimitingInterface.Add(nn)
}
}
}
func (e *EvictionQueue) Start(ctx context.Context) {
for {
// Get pod from queue. This waits until queue is non-empty.
item, shutdown := e.RateLimitingInterface.Get()
if shutdown {
break
}
nn := item.(types.NamespacedName)
// Evict pod
if e.evict(ctx, nn) {
e.RateLimitingInterface.Forget(nn)
e.Set.Remove(nn)
e.RateLimitingInterface.Done(nn)
continue
}
e.RateLimitingInterface.Done(nn)
// Requeue pod if eviction failed
e.RateLimitingInterface.AddRateLimited(nn)
}
logging.FromContext(ctx).Errorf("EvictionQueue is broken and has shutdown")
}
// evict returns true if successful eviction call, and false if not an eviction-related error
func (e *EvictionQueue) evict(ctx context.Context, nn types.NamespacedName) bool {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("pod", nn))
err := e.coreV1Client.Pods(nn.Namespace).Evict(ctx, &v1beta1.Eviction{
ObjectMeta: metav1.ObjectMeta{Name: nn.Name, Namespace: nn.Namespace},
})
// status codes for the eviction API are defined here:
// https://kubernetes.io/docs/concepts/scheduling-eviction/api-eviction/#how-api-initiated-eviction-works
if apierrors.IsNotFound(err) { // 404
return true
}
if apierrors.IsTooManyRequests(err) { // 429 - PDB violation
e.recorder.Publish(terminatorevents.NodeFailedToDrain(&v1.Node{ObjectMeta: metav1.ObjectMeta{
Name: nn.Name,
Namespace: nn.Namespace,
}}, fmt.Errorf("evicting pod %s/%s violates a PDB", nn.Namespace, nn.Name)))
return false
}
if err != nil {
logging.FromContext(ctx).Errorf("evicting pod, %s", err)
return false
}
e.recorder.Publish(terminatorevents.EvictPod(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: nn.Name, Namespace: nn.Namespace}}))
return true
}
| 135 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package terminator
import (
"context"
"fmt"
"time"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/utils/clock"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/client"
podutil "github.com/aws/karpenter-core/pkg/utils/pod"
)
type Terminator struct {
clock clock.Clock
kubeClient client.Client
evictionQueue *EvictionQueue
}
func NewTerminator(clk clock.Clock, kubeClient client.Client, eq *EvictionQueue) *Terminator {
return &Terminator{
clock: clk,
kubeClient: kubeClient,
evictionQueue: eq,
}
}
// Cordon cordons a node
func (t *Terminator) Cordon(ctx context.Context, node *v1.Node) error {
stored := node.DeepCopy()
node.Spec.Unschedulable = true
// Adding this label to the node ensures that the node is removed from the load-balancer target group
// while it is draining and before it is terminated. This prevents 500s coming prior to health check
// when the load balancer controller hasn't yet determined that the node and underlying connections are gone
// https://github.com/aws/aws-node-termination-handler/issues/316
// https://github.com/aws/karpenter/pull/2518
node.Labels = lo.Assign(node.Labels, map[string]string{
v1.LabelNodeExcludeBalancers: "karpenter",
})
if !equality.Semantic.DeepEqual(node, stored) {
if err := t.kubeClient.Patch(ctx, node, client.MergeFrom(stored)); err != nil {
return err
}
logging.FromContext(ctx).Infof("cordoned node")
}
return nil
}
// Drain evicts pods from the node and returns true when all pods are evicted
// https://kubernetes.io/docs/concepts/architecture/nodes/#graceful-node-shutdown
func (t *Terminator) Drain(ctx context.Context, node *v1.Node) error {
// Get evictable pods
pods, err := t.getPods(ctx, node)
if err != nil {
return fmt.Errorf("listing pods for node, %w", err)
}
var podsToEvict []*v1.Pod
// Skip node due to pods that are not able to be evicted
for _, p := range pods {
// Ignore if unschedulable is tolerated, since they will reschedule
if podutil.ToleratesUnschedulableTaint(p) {
continue
}
// Ignore static mirror pods
if podutil.IsOwnedByNode(p) {
continue
}
podsToEvict = append(podsToEvict, p)
}
// Enqueue for eviction
t.evict(podsToEvict)
if len(podsToEvict) > 0 {
return NewNodeDrainError(fmt.Errorf("%d pods are waiting to be evicted", len(podsToEvict)))
}
return nil
}
// getPods returns a list of evictable pods for the node
func (t *Terminator) getPods(ctx context.Context, node *v1.Node) ([]*v1.Pod, error) {
podList := &v1.PodList{}
if err := t.kubeClient.List(ctx, podList, client.MatchingFields{"spec.nodeName": node.Name}); err != nil {
return nil, fmt.Errorf("listing pods on node, %w", err)
}
var pods []*v1.Pod
for _, p := range podList.Items {
// Ignore if the pod is complete and doesn't need to be evicted
if podutil.IsTerminal(lo.ToPtr(p)) {
continue
}
// Ignore if kubelet is partitioned and pods are beyond graceful termination window
if t.isStuckTerminating(lo.ToPtr(p)) {
continue
}
pods = append(pods, lo.ToPtr(p))
}
return pods, nil
}
func (t *Terminator) evict(pods []*v1.Pod) {
// 1. Prioritize noncritical pods https://kubernetes.io/docs/concepts/architecture/nodes/#graceful-node-shutdown
critical := []*v1.Pod{}
nonCritical := []*v1.Pod{}
for _, pod := range pods {
if !pod.DeletionTimestamp.IsZero() {
continue
}
if pod.Spec.PriorityClassName == "system-cluster-critical" || pod.Spec.PriorityClassName == "system-node-critical" {
critical = append(critical, pod)
} else {
nonCritical = append(nonCritical, pod)
}
}
// 2. Evict critical pods if all noncritical are evicted
if len(nonCritical) == 0 {
t.evictionQueue.Add(critical...)
} else {
t.evictionQueue.Add(nonCritical...)
}
}
func (t *Terminator) isStuckTerminating(pod *v1.Pod) bool {
if pod.DeletionTimestamp == nil {
return false
}
return t.clock.Now().After(pod.DeletionTimestamp.Time.Add(1 * time.Minute))
}
| 146 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package events
import (
"fmt"
v1 "k8s.io/api/core/v1"
"github.com/aws/karpenter-core/pkg/events"
)
func EvictPod(pod *v1.Pod) events.Event {
return events.Event{
InvolvedObject: pod,
Type: v1.EventTypeNormal,
Reason: "Evicted",
Message: "Evicted pod",
DedupeValues: []string{pod.Name},
}
}
func NodeFailedToDrain(node *v1.Node, err error) events.Event {
return events.Event{
InvolvedObject: node,
Type: v1.EventTypeWarning,
Reason: "FailedDraining",
Message: fmt.Sprintf("Failed to drain node, %s", err),
DedupeValues: []string{node.Name},
}
}
| 44 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package events
import (
"fmt"
"strings"
"time"
"github.com/patrickmn/go-cache"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol"
)
type Event struct {
InvolvedObject runtime.Object
Type string
Reason string
Message string
DedupeValues []string
DedupeTimeout time.Duration
RateLimiter flowcontrol.RateLimiter
}
func (e Event) dedupeKey() string {
return fmt.Sprintf("%s-%s",
strings.ToLower(e.Reason),
strings.Join(e.DedupeValues, "-"),
)
}
type Recorder interface {
Publish(...Event)
}
type recorder struct {
rec record.EventRecorder
cache *cache.Cache
}
const defaultDedupeTimeout = 2 * time.Minute
func NewRecorder(r record.EventRecorder) Recorder {
return &recorder{
rec: r,
cache: cache.New(defaultDedupeTimeout, 10*time.Second),
}
}
// Publish creates a Kubernetes event using the passed event struct
func (r *recorder) Publish(evts ...Event) {
for _, evt := range evts {
r.publishEvent(evt)
}
}
func (r *recorder) publishEvent(evt Event) {
// Override the timeout if one is set for an event
timeout := defaultDedupeTimeout
if evt.DedupeTimeout != 0 {
timeout = evt.DedupeTimeout
}
// Dedupe same events that involve the same object and are close together
if len(evt.DedupeValues) > 0 && !r.shouldCreateEvent(evt.dedupeKey(), timeout) {
return
}
// If the event is rate-limited, then validate we should create the event
if evt.RateLimiter != nil && !evt.RateLimiter.TryAccept() {
return
}
r.rec.Event(evt.InvolvedObject, evt.Type, evt.Reason, evt.Message)
}
func (r *recorder) shouldCreateEvent(key string, timeout time.Duration) bool {
if _, exists := r.cache.Get(key); exists {
return false
}
r.cache.Set(key, nil, timeout)
return true
}
| 94 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package events_test
import (
"fmt"
"sync"
"testing"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/util/flowcontrol"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
schedulingevents "github.com/aws/karpenter-core/pkg/controllers/provisioning/scheduling/events"
terminatorevents "github.com/aws/karpenter-core/pkg/controllers/termination/terminator/events"
"github.com/aws/karpenter-core/pkg/events"
"github.com/aws/karpenter-core/pkg/test"
)
var eventRecorder events.Recorder
var internalRecorder *InternalRecorder
type InternalRecorder struct {
mu sync.RWMutex
calls map[string]int
}
func NewInternalRecorder() *InternalRecorder {
return &InternalRecorder{
calls: map[string]int{},
}
}
func (i *InternalRecorder) Event(_ runtime.Object, _, reason, _ string) {
i.mu.Lock()
defer i.mu.Unlock()
i.calls[reason]++
}
func (i *InternalRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, _ ...interface{}) {
i.Event(object, eventtype, reason, messageFmt)
}
func (i *InternalRecorder) AnnotatedEventf(object runtime.Object, _ map[string]string, eventtype, reason, messageFmt string, _ ...interface{}) {
i.Event(object, eventtype, reason, messageFmt)
}
func (i *InternalRecorder) Calls(reason string) int {
i.mu.RLock()
defer i.mu.RUnlock()
return i.calls[reason]
}
func TestRecorder(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "EventRecorder")
}
var _ = BeforeEach(func() {
internalRecorder = NewInternalRecorder()
eventRecorder = events.NewRecorder(internalRecorder)
schedulingevents.PodNominationRateLimiter = flowcontrol.NewTokenBucketRateLimiter(5, 10)
})
var _ = Describe("Event Creation", func() {
It("should create a NominatePod event", func() {
eventRecorder.Publish(schedulingevents.NominatePod(PodWithUID(), NodeWithUID(), MachineWithUID()))
Expect(internalRecorder.Calls(schedulingevents.NominatePod(PodWithUID(), NodeWithUID(), MachineWithUID()).Reason)).To(Equal(1))
})
It("should create a EvictPod event", func() {
eventRecorder.Publish(terminatorevents.EvictPod(PodWithUID()))
Expect(internalRecorder.Calls(terminatorevents.EvictPod(PodWithUID()).Reason)).To(Equal(1))
})
It("should create a PodFailedToSchedule event", func() {
eventRecorder.Publish(schedulingevents.PodFailedToSchedule(PodWithUID(), fmt.Errorf("")))
Expect(internalRecorder.Calls(schedulingevents.PodFailedToSchedule(PodWithUID(), fmt.Errorf("")).Reason)).To(Equal(1))
})
It("should create a NodeFailedToDrain event", func() {
eventRecorder.Publish(terminatorevents.NodeFailedToDrain(NodeWithUID(), fmt.Errorf("")))
Expect(internalRecorder.Calls(terminatorevents.NodeFailedToDrain(NodeWithUID(), fmt.Errorf("")).Reason)).To(Equal(1))
})
})
var _ = Describe("Dedupe", func() {
It("should only create a single event when many events are created quickly", func() {
pod := PodWithUID()
for i := 0; i < 100; i++ {
eventRecorder.Publish(terminatorevents.EvictPod(pod))
}
Expect(internalRecorder.Calls(terminatorevents.EvictPod(PodWithUID()).Reason)).To(Equal(1))
})
It("should allow the dedupe timeout to be overridden", func() {
pod := PodWithUID()
evt := terminatorevents.EvictPod(pod)
evt.DedupeTimeout = time.Second * 2
// Generate a set of events within the dedupe timeout
for i := 0; i < 10; i++ {
eventRecorder.Publish(evt)
}
Expect(internalRecorder.Calls(terminatorevents.EvictPod(PodWithUID()).Reason)).To(Equal(1))
// Wait until after the overridden dedupe timeout
time.Sleep(time.Second * 3)
eventRecorder.Publish(evt)
Expect(internalRecorder.Calls(terminatorevents.EvictPod(PodWithUID()).Reason)).To(Equal(2))
})
It("should allow events with different entities to be created", func() {
for i := 0; i < 100; i++ {
eventRecorder.Publish(terminatorevents.EvictPod(PodWithUID()))
}
Expect(internalRecorder.Calls(terminatorevents.EvictPod(PodWithUID()).Reason)).To(Equal(100))
})
})
var _ = Describe("Rate Limiting", func() {
It("should only create max-burst when many events are created quickly", func() {
for i := 0; i < 100; i++ {
eventRecorder.Publish(schedulingevents.NominatePod(PodWithUID(), NodeWithUID(), MachineWithUID()))
}
Expect(internalRecorder.Calls(schedulingevents.NominatePod(PodWithUID(), NodeWithUID(), MachineWithUID()).Reason)).To(Equal(10))
})
It("should allow many events over time due to smoothed rate limiting", func() {
for i := 0; i < 3; i++ {
for j := 0; j < 5; j++ {
eventRecorder.Publish(schedulingevents.NominatePod(PodWithUID(), NodeWithUID(), MachineWithUID()))
}
time.Sleep(time.Second)
}
Expect(internalRecorder.Calls(schedulingevents.NominatePod(PodWithUID(), NodeWithUID(), MachineWithUID()).Reason)).To(Equal(15))
})
})
func PodWithUID() *v1.Pod {
p := test.Pod()
p.UID = uuid.NewUUID()
return p
}
func NodeWithUID() *v1.Node {
n := test.Node()
n.UID = uuid.NewUUID()
return n
}
func MachineWithUID() *v1alpha5.Machine {
m := test.Machine()
m.UID = uuid.NewUUID()
return m
}
| 170 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"time"
"github.com/prometheus/client_golang/prometheus"
)
const (
// Common namespace for application metrics.
Namespace = "karpenter"
ProvisionerLabel = "provisioner"
ReasonLabel = "reason"
// Reasons for CREATE/DELETE shared metrics
ConsolidationReason = "consolidation"
ProvisioningReason = "provisioning"
ExpirationReason = "expiration"
EmptinessReason = "emptiness"
DriftReason = "drift"
)
// DurationBuckets returns a []float64 of default threshold values for duration histograms.
// Each returned slice is new and may be modified without impacting other bucket definitions.
func DurationBuckets() []float64 {
// Use same bucket thresholds as controller-runtime.
// https://github.com/kubernetes-sigs/controller-runtime/blob/v0.10.0/pkg/internal/controller/metrics/metrics.go#L47-L48
return []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50, 60}
}
// Returns a map of summary objectives (quantile-error pairs)
func SummaryObjectives() map[float64]float64 {
const epsilon = 0.01
objectives := make(map[float64]float64)
for _, quantile := range []float64{0.0, 0.5, 0.9, 0.99, 1.0} {
objectives[quantile] = epsilon
}
return objectives
}
// Measure returns a deferrable function that observes the duration between the
// defer statement and the end of the function.
func Measure(observer prometheus.Observer) func() {
start := time.Now()
return func() { observer.Observe(time.Since(start).Seconds()) }
}
| 63 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
crmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
)
const (
nodeSubsystem = "nodes"
machineSubsystem = "machines"
)
var (
MachinesCreatedCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: machineSubsystem,
Name: "created",
Help: "Number of machines created in total by Karpenter. Labeled by reason the machine was created and the owning provisioner.",
},
[]string{
ReasonLabel,
ProvisionerLabel,
},
)
MachinesTerminatedCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: machineSubsystem,
Name: "terminated",
Help: "Number of machines terminated in total by Karpenter. Labeled by reason the machine was terminated.",
},
[]string{
ReasonLabel,
ProvisionerLabel,
},
)
MachinesLaunchedCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: machineSubsystem,
Name: "launched",
Help: "Number of machines launched in total by Karpenter. Labeled by the owning provisioner.",
},
[]string{
ProvisionerLabel,
},
)
MachinesRegisteredCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: machineSubsystem,
Name: "registered",
Help: "Number of machines registered in total by Karpenter. Labeled by the owning provisioner.",
},
[]string{
ProvisionerLabel,
},
)
MachinesInitializedCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: machineSubsystem,
Name: "initialized",
Help: "Number of machines initialized in total by Karpenter. Labeled by the owning provisioner.",
},
[]string{
ProvisionerLabel,
},
)
NodesCreatedCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: nodeSubsystem,
Name: "created",
Help: "Number of nodes created in total by Karpenter. Labeled by owning provisioner.",
},
[]string{
ProvisionerLabel,
},
)
NodesTerminatedCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: nodeSubsystem,
Name: "terminated",
Help: "Number of nodes terminated in total by Karpenter. Labeled by owning provisioner.",
},
[]string{
ProvisionerLabel,
},
)
)
func init() {
crmetrics.Registry.MustRegister(MachinesCreatedCounter, MachinesTerminatedCounter, MachinesLaunchedCounter,
MachinesRegisteredCounter, MachinesInitializedCounter, NodesCreatedCounter, NodesTerminatedCounter)
}
| 113 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package operator
import (
"context"
"log"
"github.com/go-logr/logr"
"github.com/go-logr/zapr"
"go.uber.org/zap"
"go.uber.org/zap/zapio"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
"knative.dev/pkg/configmap/informer"
"knative.dev/pkg/injection"
"knative.dev/pkg/injection/sharedmain"
"knative.dev/pkg/logging"
)
// NewLogger returns a configured *zap.SugaredLogger. The logger is
// configured by the ConfigMap `config-logging` and live updates the level.
func NewLogger(ctx context.Context, componentName string, config *rest.Config, cmw *informer.InformedWatcher) *zap.SugaredLogger {
ctx, startInformers := injection.EnableInjectionOrDie(logging.WithLogger(ctx, zap.NewNop().Sugar()), config)
logger, atomicLevel := sharedmain.SetupLoggerOrDie(ctx, componentName)
rest.SetDefaultWarningHandler(&logging.WarningHandler{Logger: logger})
sharedmain.WatchLoggingConfigOrDie(ctx, cmw, logger, atomicLevel, componentName)
startInformers()
return logger
}
// ConfigureGlobalLoggers sets up any package-wide loggers like "log" or "klog" that are utilized by other packages
// to use the configured *zap.SugaredLogger from the context
func ConfigureGlobalLoggers(ctx context.Context) {
klog.SetLogger(zapr.NewLogger(logging.FromContext(ctx).Desugar()))
w := &zapio.Writer{Log: logging.FromContext(ctx).Desugar(), Level: zap.DebugLevel}
log.SetFlags(0)
log.SetOutput(w)
}
type ignoreDebugEventsSink struct {
name string
sink logr.LogSink
}
func (i ignoreDebugEventsSink) Init(ri logr.RuntimeInfo) {
i.sink.Init(ri)
}
func (i ignoreDebugEventsSink) Enabled(level int) bool { return i.sink.Enabled(level) }
func (i ignoreDebugEventsSink) Info(level int, msg string, keysAndValues ...interface{}) {
// ignore debug "events" logs
if level == 1 && i.name == "events" {
return
}
i.sink.Info(level, msg, keysAndValues...)
}
func (i ignoreDebugEventsSink) Error(err error, msg string, keysAndValues ...interface{}) {
i.sink.Error(err, msg, keysAndValues...)
}
func (i ignoreDebugEventsSink) WithValues(keysAndValues ...interface{}) logr.LogSink {
return i.sink.WithValues(keysAndValues...)
}
func (i ignoreDebugEventsSink) WithName(name string) logr.LogSink {
return &ignoreDebugEventsSink{name: name, sink: i.sink.WithName(name)}
}
// ignoreDebugEvents wraps the logger with one that ignores any debug logs coming from a logger named "events". This
// prevents every event we write from creating a debug log which spams the log file during scale-ups due to recording
// pod scheduling decisions as events for visibility.
func ignoreDebugEvents(logger logr.Logger) logr.Logger {
return logr.New(&ignoreDebugEventsSink{sink: logger.GetSink()})
}
| 85 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package operator
import (
"context"
"fmt"
"io"
"net/http"
"sync"
"time"
"github.com/go-logr/zapr"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/utils/clock"
"knative.dev/pkg/configmap/informer"
knativeinjection "knative.dev/pkg/injection"
"knative.dev/pkg/injection/sharedmain"
"knative.dev/pkg/logging"
"knative.dev/pkg/signals"
"knative.dev/pkg/system"
"knative.dev/pkg/webhook"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/manager"
"github.com/aws/karpenter-core/pkg/apis"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/events"
corecontroller "github.com/aws/karpenter-core/pkg/operator/controller"
"github.com/aws/karpenter-core/pkg/operator/injection"
"github.com/aws/karpenter-core/pkg/operator/options"
"github.com/aws/karpenter-core/pkg/operator/scheme"
)
const (
appName = "karpenter"
component = "controller"
)
type Operator struct {
manager.Manager
KubernetesInterface kubernetes.Interface
EventRecorder events.Recorder
Clock clock.Clock
webhooks []knativeinjection.ControllerConstructor
}
// NewOperator instantiates a controller manager or panics
func NewOperator() (context.Context, *Operator) {
// Root Context
ctx := signals.NewContext()
ctx = knativeinjection.WithNamespaceScope(ctx, system.Namespace())
// TODO: This can be removed if we eventually decide that we need leader election. Having leader election has resulted in the webhook
// having issues described in https://github.com/aws/karpenter/issues/2562 so these issues need to be resolved if this line is removed
ctx = sharedmain.WithHADisabled(ctx) // Disable leader election for webhook
// Options
opts := options.New().MustParse()
ctx = injection.WithOptions(ctx, *opts)
// Webhook
ctx = webhook.WithOptions(ctx, webhook.Options{
Port: opts.WebhookPort,
ServiceName: opts.ServiceName,
SecretName: fmt.Sprintf("%s-cert", opts.ServiceName),
GracePeriod: 5 * time.Second,
})
// Client Config
config := controllerruntime.GetConfigOrDie()
config.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(float32(opts.KubeClientQPS), opts.KubeClientBurst)
config.UserAgent = appName
// Client
kubernetesInterface := kubernetes.NewForConfigOrDie(config)
configMapWatcher := informer.NewInformedWatcher(kubernetesInterface, system.Namespace())
lo.Must0(configMapWatcher.Start(ctx.Done()))
// Logging
logger := NewLogger(ctx, component, config, configMapWatcher)
ctx = logging.WithLogger(ctx, logger)
ConfigureGlobalLoggers(ctx)
// Inject settings from the ConfigMap(s) into the context
ctx = injection.WithSettingsOrDie(ctx, kubernetesInterface, apis.Settings...)
// Manager
mgr, err := controllerruntime.NewManager(config, controllerruntime.Options{
Logger: ignoreDebugEvents(zapr.NewLogger(logger.Desugar())),
LeaderElection: opts.EnableLeaderElection,
LeaderElectionID: "karpenter-leader-election",
LeaderElectionResourceLock: resourcelock.LeasesResourceLock,
Scheme: scheme.Scheme,
MetricsBindAddress: fmt.Sprintf(":%d", opts.MetricsPort),
HealthProbeBindAddress: fmt.Sprintf(":%d", opts.HealthProbePort),
BaseContext: func() context.Context {
ctx := context.Background()
ctx = logging.WithLogger(ctx, logger)
ctx = injection.WithSettingsOrDie(ctx, kubernetesInterface, apis.Settings...)
ctx = injection.WithConfig(ctx, config)
ctx = injection.WithOptions(ctx, *opts)
return ctx
},
})
mgr = lo.Must(mgr, err, "failed to setup manager")
if opts.EnableProfiling {
registerPprof(mgr)
}
lo.Must0(mgr.GetFieldIndexer().IndexField(ctx, &v1.Pod{}, "spec.nodeName", func(o client.Object) []string {
return []string{o.(*v1.Pod).Spec.NodeName}
}), "failed to setup pod indexer")
lo.Must0(mgr.GetFieldIndexer().IndexField(ctx, &v1.Node{}, "spec.providerID", func(o client.Object) []string {
return []string{o.(*v1.Node).Spec.ProviderID}
}), "failed to setup node provider id indexer")
lo.Must0(mgr.GetFieldIndexer().IndexField(ctx, &v1alpha5.Machine{}, "status.providerID", func(o client.Object) []string {
return []string{o.(*v1alpha5.Machine).Status.ProviderID}
}), "failed to setup machine provider id indexer")
return ctx, &Operator{
Manager: mgr,
KubernetesInterface: kubernetesInterface,
EventRecorder: events.NewRecorder(mgr.GetEventRecorderFor(appName)),
Clock: clock.RealClock{},
}
}
func (o *Operator) WithControllers(ctx context.Context, controllers ...corecontroller.Controller) *Operator {
for _, c := range controllers {
lo.Must0(c.Builder(ctx, o.Manager).Complete(c), "failed to register controller")
}
lo.Must0(o.Manager.AddHealthzCheck("healthz", healthz.Ping), "failed to setup liveness probe")
lo.Must0(o.Manager.AddReadyzCheck("readyz", healthz.Ping), "failed to setup readiness probe")
return o
}
func (o *Operator) WithWebhooks(webhooks ...knativeinjection.ControllerConstructor) *Operator {
o.webhooks = append(o.webhooks, webhooks...)
lo.Must0(o.Manager.AddReadyzCheck("webhooks", knativeChecker("readiness")))
lo.Must0(o.Manager.AddHealthzCheck("webhooks", knativeChecker("health")))
return o
}
func (o *Operator) Start(ctx context.Context) {
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
lo.Must0(o.Manager.Start(ctx))
}()
if !injection.GetOptions(ctx).DisableWebhook {
wg.Add(1)
go func() {
defer wg.Done()
sharedmain.MainWithConfig(ctx, "webhook", o.GetConfig(), o.webhooks...)
}()
}
wg.Wait()
}
func knativeChecker(path string) healthz.Checker {
return func(req *http.Request) (err error) {
res, err := http.Get(fmt.Sprintf("http://:%d/%s", knativeinjection.HealthCheckDefaultPort, path))
if err != nil {
return err
}
if res.StatusCode != http.StatusOK {
return fmt.Errorf("%s probe failed, %s", path, lo.Must(io.ReadAll(res.Body)))
}
return nil
}
}
| 192 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package operator
import (
"net/http"
"net/http/pprof"
"github.com/samber/lo"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
func registerPprof(manager manager.Manager) {
for path, handler := range map[string]http.Handler{
"/debug/pprof/": http.HandlerFunc(pprof.Index),
"/debug/pprof/cmdline": http.HandlerFunc(pprof.Cmdline),
"/debug/pprof/profile": http.HandlerFunc(pprof.Profile),
"/debug/pprof/symbol": http.HandlerFunc(pprof.Symbol),
"/debug/pprof/trace": http.HandlerFunc(pprof.Trace),
"/debug/pprof/allocs": pprof.Handler("allocs"),
"/debug/pprof/heap": pprof.Handler("heap"),
"/debug/pprof/block": pprof.Handler("block"),
"/debug/pprof/goroutine": pprof.Handler("goroutine"),
"/debug/pprof/threadcreate": pprof.Handler("threadcreate"),
} {
lo.Must0(manager.AddMetricsExtraHandler(path, handler), "setting up profiling")
}
}
| 41 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
type Reconciler interface {
reconcile.Reconciler
// Name is the name of the Reconciler for metrics and logging
Name() string
}
// Controller defines a controller that can be registered with controller-runtime
type Controller interface {
Reconciler
// Builder returns a Builder registered with the manager that can be wrapped
// with other Builders and completed later to complete registration to the manager
Builder(context.Context, manager.Manager) Builder
}
// Builder is a struct, that when complete, registers the passed reconciler with the manager stored
// insider of the builder. Typed reference implementations, see controllerruntime.Builder
type Builder interface {
// Complete builds a builder by registering the Reconciler with the manager
Complete(Reconciler) error
}
// Adapter adapts a controllerruntime.Builder into the Builder interface
type Adapter struct {
builder *controllerruntime.Builder
}
func Adapt(builder *controllerruntime.Builder) Builder {
return &Adapter{
builder: builder,
}
}
func (a *Adapter) Complete(r Reconciler) error {
a.builder = a.builder.Named(r.Name())
return a.builder.Complete(r)
}
| 62 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"errors"
"time"
"github.com/prometheus/client_golang/prometheus"
"k8s.io/client-go/util/workqueue"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/manager"
crmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
"sigs.k8s.io/controller-runtime/pkg/ratelimiter"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/karpenter-core/pkg/metrics"
)
type SingletonBuilder struct {
mgr manager.Manager
}
func NewSingletonManagedBy(m manager.Manager) SingletonBuilder {
return SingletonBuilder{
mgr: m,
}
}
func (b SingletonBuilder) Complete(r Reconciler) error {
return b.mgr.Add(newSingleton(r))
}
type Singleton struct {
Reconciler
rateLimiter ratelimiter.RateLimiter
}
func newSingleton(r Reconciler) *Singleton {
s := &Singleton{
Reconciler: r,
rateLimiter: workqueue.DefaultItemBasedRateLimiter(),
}
s.initMetrics()
return s
}
// initMetrics is effectively the same metrics initialization function used by controller-runtime
// https://github.com/kubernetes-sigs/controller-runtime/blob/main/pkg/internal/controller/controller.go
func (s *Singleton) initMetrics() {
activeWorkers.WithLabelValues(s.Name()).Set(0)
reconcileErrors.WithLabelValues(s.Name()).Add(0)
reconcileTotal.WithLabelValues(s.Name(), labelError).Add(0)
reconcileTotal.WithLabelValues(s.Name(), labelRequeueAfter).Add(0)
reconcileTotal.WithLabelValues(s.Name(), labelRequeue).Add(0)
reconcileTotal.WithLabelValues(s.Name(), labelSuccess).Add(0)
workerCount.WithLabelValues(s.Name()).Set(float64(1))
}
var singletonRequest = reconcile.Request{}
func (s *Singleton) Start(ctx context.Context) error {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).Named(s.Name()))
logging.FromContext(ctx).Infof("starting controller")
defer logging.FromContext(ctx).Infof("stopping controller")
for {
select {
case <-time.After(s.reconcile(ctx)):
case <-ctx.Done():
return nil
}
}
}
func (s *Singleton) reconcile(ctx context.Context) time.Duration {
activeWorkers.WithLabelValues(s.Name()).Inc()
defer activeWorkers.WithLabelValues(s.Name()).Dec()
measureDuration := metrics.Measure(reconcileDuration.WithLabelValues(s.Name()))
res, err := s.Reconcile(ctx, singletonRequest)
measureDuration() // Observe the length of time between the function creation and now
switch {
case err != nil:
reconcileErrors.WithLabelValues(s.Name()).Inc()
reconcileTotal.WithLabelValues(s.Name(), labelError).Inc()
logging.FromContext(ctx).Error(err)
return s.rateLimiter.When(singletonRequest)
case res.Requeue:
reconcileTotal.WithLabelValues(s.Name(), labelRequeue).Inc()
return s.rateLimiter.When(singletonRequest)
default:
s.rateLimiter.Forget(singletonRequest)
switch {
case res.RequeueAfter > 0:
reconcileTotal.WithLabelValues(s.Name(), labelRequeueAfter).Inc()
return res.RequeueAfter
default:
reconcileTotal.WithLabelValues(s.Name(), labelSuccess).Inc()
return time.Duration(0)
}
}
}
func (s *Singleton) NeedLeaderElection() bool {
return true
}
func init() {
mergeMetrics()
}
const (
labelError = "error"
labelRequeueAfter = "requeue_after"
labelRequeue = "requeue"
labelSuccess = "success"
)
// Metrics below are copied metrics fired by controller-runtime in its /internal package. This is leveraged
// so that we can fire to the same namespace as users expect other controller-runtime metrics to be fired
// https://github.com/kubernetes-sigs/controller-runtime/blob/main/pkg/internal/controller/metrics/metrics.go
var (
reconcileTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "controller_runtime_reconcile_total",
Help: "Total number of reconciliations per controller",
}, []string{"controller", "result"})
reconcileDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: "controller_runtime_reconcile_time_seconds",
Help: "Length of time per reconciliation per controller",
Buckets: metrics.DurationBuckets(),
}, []string{"controller"})
reconcileErrors = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "controller_runtime_reconcile_errors_total",
Help: "Total number of reconciliation errors per controller",
}, []string{"controller"})
workerCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "controller_runtime_max_concurrent_reconciles",
Help: "Maximum number of concurrent reconciles per controller",
}, []string{"controller"})
activeWorkers = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "controller_runtime_active_workers",
Help: "Number of currently used workers per controller",
}, []string{"controller"})
)
// mergeMetrics merges the singletonMetrics with metrics already registered in the controller-runtime metrics registry
// https://github.com/kubernetes-sigs/controller-runtime/blob/main/pkg/internal/controller/metrics/metrics.go
// We know that all these metrics should be registered by controller-runtime so we should switch over
func mergeMetrics() {
err := &prometheus.AlreadyRegisteredError{}
errors.As(crmetrics.Registry.Register(reconcileTotal), err)
reconcileTotal = err.ExistingCollector.(*prometheus.CounterVec)
errors.As(crmetrics.Registry.Register(reconcileDuration), err)
reconcileDuration = err.ExistingCollector.(*prometheus.HistogramVec)
errors.As(crmetrics.Registry.Register(reconcileErrors), err)
reconcileErrors = err.ExistingCollector.(*prometheus.CounterVec)
errors.As(crmetrics.Registry.Register(workerCount), err)
workerCount = err.ExistingCollector.(*prometheus.GaugeVec)
errors.As(crmetrics.Registry.Register(activeWorkers), err)
activeWorkers = err.ExistingCollector.(*prometheus.GaugeVec)
}
| 177 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller_test
import (
"context"
"testing"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/configmap/informer"
"knative.dev/pkg/system"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/karpenter-core/pkg/apis"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
"github.com/aws/karpenter-core/pkg/operator/controller"
"github.com/aws/karpenter-core/pkg/operator/scheme"
"github.com/aws/karpenter-core/pkg/test"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "knative.dev/pkg/logging/testing"
. "github.com/aws/karpenter-core/pkg/test/expectations"
)
var ctx context.Context
var env *test.Environment
var cmw *informer.InformedWatcher
var defaultConfigMap *v1.ConfigMap
func TestAPIs(t *testing.T) {
ctx = TestContextWithLogger(t)
RegisterFailHandler(Fail)
RunSpecs(t, "Controller")
}
var _ = BeforeSuite(func() {
env = test.NewEnvironment(scheme.Scheme, test.WithCRDs(apis.CRDs...))
cmw = informer.NewInformedWatcher(env.KubernetesInterface, system.Namespace())
defaultConfigMap = &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "karpenter-global-settings",
Namespace: system.Namespace(),
},
}
ExpectApplied(ctx, env.Client, defaultConfigMap)
Expect(cmw.Start(env.Done))
})
var _ = AfterSuite(func() {
Expect(env.Stop()).To(Succeed())
})
var _ = Describe("Typed", func() {
AfterEach(func() {
ExpectCleanedUp(ctx, env.Client)
})
It("should pass in expected node into reconcile", func() {
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: "default",
},
},
})
ExpectApplied(ctx, env.Client, node)
fakeController := &FakeTypedController[*v1.Node]{
ReconcileAssertions: []TypedReconcileAssertion[*v1.Node]{
func(ctx context.Context, n *v1.Node) {
Expect(n.Name).To(Equal(node.Name))
Expect(n.Labels).To(HaveKeyWithValue(v1alpha5.ProvisionerNameLabelKey, "default"))
},
},
}
typedController := controller.Typed[*v1.Node](env.Client, fakeController)
ExpectReconcileSucceeded(ctx, typedController, client.ObjectKeyFromObject(node))
})
It("should call finalizer func when finalizing", func() {
node := test.Node(test.NodeOptions{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1alpha5.ProvisionerNameLabelKey: "default",
},
Finalizers: []string{
v1alpha5.TestingGroup + "/finalizer",
},
},
})
ExpectApplied(ctx, env.Client, node)
Expect(env.Client.Delete(ctx, node)).To(Succeed())
called := false
fakeController := &FakeTypedController[*v1.Node]{
FinalizeAssertions: []TypedReconcileAssertion[*v1.Node]{
func(ctx context.Context, n *v1.Node) {
called = true
},
},
}
typedController := controller.Typed[*v1.Node](env.Client, fakeController)
ExpectReconcileSucceeded(ctx, typedController, client.ObjectKeyFromObject(node))
Expect(called).To(BeTrue())
})
})
type TypedReconcileAssertion[T client.Object] func(context.Context, T)
type FakeTypedController[T client.Object] struct {
ReconcileAssertions []TypedReconcileAssertion[T]
FinalizeAssertions []TypedReconcileAssertion[T]
}
func (c *FakeTypedController[T]) Name() string {
return ""
}
func (c *FakeTypedController[T]) Reconcile(ctx context.Context, obj T) (reconcile.Result, error) {
for _, elem := range c.ReconcileAssertions {
elem(ctx, obj)
}
return reconcile.Result{}, nil
}
func (c *FakeTypedController[T]) Finalize(ctx context.Context, obj T) (reconcile.Result, error) {
for _, elem := range c.FinalizeAssertions {
elem(ctx, obj)
}
return reconcile.Result{}, nil
}
func (c *FakeTypedController[T]) Builder(_ context.Context, _ manager.Manager) controller.Builder {
return nil
}
| 151 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"reflect"
"strings"
"github.com/samber/lo"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/karpenter-core/pkg/operator/injection"
"github.com/aws/karpenter-core/pkg/operator/scheme"
)
type TypedController[T client.Object] interface {
Reconcile(context.Context, T) (reconcile.Result, error)
Name() string
Builder(context.Context, manager.Manager) Builder
}
type FinalizingTypedController[T client.Object] interface {
TypedController[T]
Finalize(context.Context, T) (reconcile.Result, error)
}
type typedDecorator[T client.Object] struct {
kubeClient client.Client
typedController TypedController[T]
}
func Typed[T client.Object](kubeClient client.Client, typedController TypedController[T]) Controller {
return &typedDecorator[T]{
kubeClient: kubeClient,
typedController: typedController,
}
}
func (t *typedDecorator[T]) Name() string {
return t.typedController.Name()
}
func (t *typedDecorator[T]) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
obj := reflect.New(reflect.TypeOf(*new(T)).Elem()).Interface().(T) // Create a new pointer to a client.Object
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).
Named(t.typedController.Name()).
With(
strings.ToLower(lo.Must(apiutil.GVKForObject(obj, scheme.Scheme)).Kind),
lo.Ternary(req.NamespacedName.Namespace != "", req.NamespacedName.String(), req.Name),
),
)
ctx = injection.WithControllerName(ctx, t.typedController.Name())
if err := t.kubeClient.Get(ctx, req.NamespacedName, obj); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
finalizingTypedController, ok := t.typedController.(FinalizingTypedController[T])
if !obj.GetDeletionTimestamp().IsZero() && ok {
return finalizingTypedController.Finalize(ctx, obj)
}
return t.typedController.Reconcile(ctx, obj)
}
func (t *typedDecorator[T]) Builder(ctx context.Context, m manager.Manager) Builder {
return t.typedController.Builder(ctx, m)
}
| 85 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package injection
import (
"context"
"fmt"
"time"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"knative.dev/pkg/system"
"github.com/aws/karpenter-core/pkg/apis/settings"
"github.com/aws/karpenter-core/pkg/operator/options"
)
type optionsKey struct{}
func WithOptions(ctx context.Context, opts options.Options) context.Context {
return context.WithValue(ctx, optionsKey{}, opts)
}
func GetOptions(ctx context.Context) options.Options {
retval := ctx.Value(optionsKey{})
if retval == nil {
return options.Options{}
}
return retval.(options.Options)
}
type configKey struct{}
func WithConfig(ctx context.Context, config *rest.Config) context.Context {
return context.WithValue(ctx, configKey{}, config)
}
func GetConfig(ctx context.Context) *rest.Config {
retval := ctx.Value(configKey{})
if retval == nil {
return nil
}
return retval.(*rest.Config)
}
type controllerNameKeyType struct{}
var controllerNameKey = controllerNameKeyType{}
func WithControllerName(ctx context.Context, name string) context.Context {
return context.WithValue(ctx, controllerNameKey, name)
}
func GetControllerName(ctx context.Context) string {
name := ctx.Value(controllerNameKey)
if name == nil {
return ""
}
return name.(string)
}
// WithSettingsOrDie injects the settings into the context for all configMaps passed through the registrations
// NOTE: Settings are resolved statically into the global context.Context at startup. This was changed from updating them
// dynamically at runtime due to the necessity of having to build logic around re-queueing to ensure that settings are
// properly reloaded for things like feature gates
func WithSettingsOrDie(ctx context.Context, kubernetesInterface kubernetes.Interface, settings ...settings.Injectable) context.Context {
cancelCtx, cancel := context.WithCancel(ctx)
defer cancel()
factory := informers.NewSharedInformerFactoryWithOptions(kubernetesInterface, time.Second*30, informers.WithNamespace(system.Namespace()))
informer := factory.Core().V1().ConfigMaps().Informer()
factory.Start(cancelCtx.Done())
for _, setting := range settings {
cm := lo.Must(waitForConfigMap(ctx, setting.ConfigMap(), informer))
ctx = lo.Must(setting.Inject(ctx, cm))
}
return ctx
}
// waitForConfigMap waits until all registered configMaps in the settingsStore are created
func waitForConfigMap(ctx context.Context, name string, informer cache.SharedIndexInformer) (*v1.ConfigMap, error) {
for {
configMap, exists, err := informer.GetStore().GetByKey(types.NamespacedName{Namespace: system.Namespace(), Name: name}.String())
if configMap != nil && exists && err == nil {
return configMap.(*v1.ConfigMap), nil
}
select {
case <-ctx.Done():
return nil, fmt.Errorf("context canceled")
case <-time.After(time.Millisecond * 500):
}
}
}
| 112 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package injection_test
import (
"context"
"testing"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/system"
"github.com/aws/karpenter-core/pkg/apis/settings"
"github.com/aws/karpenter-core/pkg/operator/injection"
"github.com/aws/karpenter-core/pkg/operator/injection/fake"
"github.com/aws/karpenter-core/pkg/operator/scheme"
"github.com/aws/karpenter-core/pkg/test"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "knative.dev/pkg/logging/testing"
. "github.com/aws/karpenter-core/pkg/test/expectations"
)
var ctx context.Context
var env *test.Environment
var defaultConfigMap *v1.ConfigMap
func TestAPIs(t *testing.T) {
ctx = TestContextWithLogger(t)
RegisterFailHandler(Fail)
RunSpecs(t, "Injection")
}
var _ = BeforeSuite(func() {
env = test.NewEnvironment(scheme.Scheme)
defaultConfigMap = &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "karpenter-global-settings",
Namespace: system.Namespace(),
},
}
ExpectApplied(ctx, env.Client, defaultConfigMap)
})
var _ = AfterSuite(func() {
Expect(env.Stop()).To(Succeed())
})
var _ = BeforeEach(func() {
defaultConfigMap = &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "karpenter-global-settings",
Namespace: system.Namespace(),
},
}
ExpectApplied(ctx, env.Client, defaultConfigMap)
})
var _ = AfterEach(func() {
ExpectDeleted(ctx, env.Client, defaultConfigMap.DeepCopy())
})
var _ = Describe("Settings", func() {
Context("Operator Settings", func() {
It("should have default values", func() {
Eventually(func(g Gomega) {
testCtx := injection.WithSettingsOrDie(ctx, env.KubernetesInterface, &settings.Settings{})
s := settings.FromContext(testCtx)
g.Expect(s.BatchIdleDuration.Duration).To(Equal(1 * time.Second))
g.Expect(s.BatchMaxDuration.Duration).To(Equal(10 * time.Second))
}).Should(Succeed())
})
It("should update if values are changed", func() {
Eventually(func(g Gomega) {
testCtx := injection.WithSettingsOrDie(ctx, env.KubernetesInterface, &settings.Settings{})
s := settings.FromContext(testCtx)
g.Expect(s.BatchIdleDuration.Duration).To(Equal(1 * time.Second))
g.Expect(s.BatchMaxDuration.Duration).To(Equal(10 * time.Second))
})
cm := defaultConfigMap.DeepCopy()
cm.Data = map[string]string{
"batchIdleDuration": "2s",
"batchMaxDuration": "15s",
}
ExpectApplied(ctx, env.Client, cm)
Eventually(func(g Gomega) {
testCtx := injection.WithSettingsOrDie(ctx, env.KubernetesInterface, &settings.Settings{})
s := settings.FromContext(testCtx)
g.Expect(s.BatchIdleDuration.Duration).To(Equal(2 * time.Second))
g.Expect(s.BatchMaxDuration.Duration).To(Equal(15 * time.Second))
}).Should(Succeed())
})
})
Context("Multiple Settings", func() {
It("should get operator settings and features from same configMap", func() {
Eventually(func(g Gomega) {
testCtx := injection.WithSettingsOrDie(ctx, env.KubernetesInterface, &settings.Settings{}, &fake.Settings{})
s := fake.FromContext(testCtx)
g.Expect(s.TestArg).To(Equal("default"))
}).Should(Succeed())
})
It("should get operator settings and features from same configMap", func() {
cm := defaultConfigMap.DeepCopy()
cm.Data = map[string]string{
"batchIdleDuration": "2s",
"batchMaxDuration": "15s",
"testArg": "my-value",
}
ExpectApplied(ctx, env.Client, cm)
Eventually(func(g Gomega) {
testCtx := injection.WithSettingsOrDie(ctx, env.KubernetesInterface, &settings.Settings{}, &fake.Settings{})
s := settings.FromContext(testCtx)
fs := fake.FromContext(testCtx)
g.Expect(s.BatchIdleDuration.Duration).To(Equal(2 * time.Second))
g.Expect(s.BatchMaxDuration.Duration).To(Equal(15 * time.Second))
g.Expect(fs.TestArg).To(Equal("my-value"))
}).Should(Succeed())
})
})
})
| 138 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"knative.dev/pkg/configmap"
)
type settingsKeyType struct{}
var ContextKey = settingsKeyType{}
var defaultSettings = &Settings{
TestArg: "default",
}
type Settings struct {
TestArg string `json:"testArg"`
}
func (*Settings) ConfigMap() string {
return "karpenter-global-settings"
}
func (*Settings) Inject(ctx context.Context, cm *v1.ConfigMap) (context.Context, error) {
s := defaultSettings
if err := configmap.Parse(cm.Data,
configmap.AsString("testArg", &s.TestArg),
); err != nil {
return ctx, fmt.Errorf("parsing config data, %w", err)
}
return ToContext(ctx, s), nil
}
func ToContext(ctx context.Context, s *Settings) context.Context {
return context.WithValue(ctx, ContextKey, s)
}
func FromContext(ctx context.Context) *Settings {
data := ctx.Value(ContextKey)
if data == nil {
// This is developer error if this happens, so we should panic
panic("settings doesn't exist in context")
}
return data.(*Settings)
}
| 64 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"errors"
"flag"
"os"
"runtime/debug"
"github.com/aws/karpenter-core/pkg/utils/env"
)
// Options for running this binary
type Options struct {
*flag.FlagSet
// Vendor Neutral
ServiceName string
DisableWebhook bool
WebhookPort int
MetricsPort int
HealthProbePort int
KubeClientQPS int
KubeClientBurst int
EnableProfiling bool
EnableLeaderElection bool
MemoryLimit int64
}
// New creates an Options struct and registers CLI flags and environment variables to fill-in the Options struct fields
func New() *Options {
opts := &Options{}
f := flag.NewFlagSet("karpenter", flag.ContinueOnError)
opts.FlagSet = f
// Vendor Neutral
f.StringVar(&opts.ServiceName, "karpenter-service", env.WithDefaultString("KARPENTER_SERVICE", ""), "The Karpenter Service name for the dynamic webhook certificate")
f.BoolVar(&opts.DisableWebhook, "disable-webhook", env.WithDefaultBool("DISABLE_WEBHOOK", false), "Disable the admission and validation webhooks")
f.IntVar(&opts.WebhookPort, "webhook-port", env.WithDefaultInt("WEBHOOK_PORT", 8443), "The port the webhook endpoint binds to for validation and mutation of resources")
f.IntVar(&opts.MetricsPort, "metrics-port", env.WithDefaultInt("METRICS_PORT", 8000), "The port the metric endpoint binds to for operating metrics about the controller itself")
f.IntVar(&opts.HealthProbePort, "health-probe-port", env.WithDefaultInt("HEALTH_PROBE_PORT", 8081), "The port the health probe endpoint binds to for reporting controller health")
f.IntVar(&opts.KubeClientQPS, "kube-client-qps", env.WithDefaultInt("KUBE_CLIENT_QPS", 200), "The smoothed rate of qps to kube-apiserver")
f.IntVar(&opts.KubeClientBurst, "kube-client-burst", env.WithDefaultInt("KUBE_CLIENT_BURST", 300), "The maximum allowed burst of queries to the kube-apiserver")
f.BoolVar(&opts.EnableProfiling, "enable-profiling", env.WithDefaultBool("ENABLE_PROFILING", false), "Enable the profiling on the metric endpoint")
f.BoolVar(&opts.EnableLeaderElection, "leader-elect", env.WithDefaultBool("LEADER_ELECT", true), "Start leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.")
f.Int64Var(&opts.MemoryLimit, "memory-limit", env.WithDefaultInt64("MEMORY_LIMIT", -1), "Memory limit on the container running the controller. The GC soft memory limit is set to 90% of this value.")
if opts.MemoryLimit > 0 {
newLimit := int64(float64(opts.MemoryLimit) * 0.9)
debug.SetMemoryLimit(newLimit)
}
return opts
}
// MustParse reads the user passed flags, environment variables, and default values.
// Options are valided and panics if an error is returned
func (o *Options) MustParse() *Options {
err := o.Parse(os.Args[1:])
if errors.Is(err, flag.ErrHelp) {
os.Exit(0)
}
if err != nil {
panic(err)
}
return o
}
| 80 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheme
import (
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"github.com/samber/lo"
"github.com/aws/karpenter-core/pkg/apis"
)
var (
Scheme = runtime.NewScheme()
)
func init() {
lo.Must0(clientgoscheme.AddToScheme(Scheme))
lo.Must0(apis.AddToScheme(Scheme))
}
| 34 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"context"
"fmt"
"net"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// HostPortUsage tracks HostPort usage within a node. On a node, each <hostIP, hostPort, protocol> used by pods bound
// to the node must be unique. We need to track this to keep an accurate concept of what pods can potentially schedule
// together.
type HostPortUsage struct {
reserved map[types.NamespacedName][]entry
}
type entry struct {
ip net.IP
port int32
protocol v1.Protocol
}
func (e entry) String() string {
return fmt.Sprintf("IP=%s Port=%d Proto=%s", e.ip, e.port, e.protocol)
}
func (e entry) matches(rhs entry) bool {
if e.protocol != rhs.protocol {
return false
}
if e.port != rhs.port {
return false
}
// If IPs are unequal, they don't match unless one is an unspecified address "0.0.0.0" or the IPv6 address "::".
if !e.ip.Equal(rhs.ip) && !e.ip.IsUnspecified() && !rhs.ip.IsUnspecified() {
return false
}
return true
}
func NewHostPortUsage() *HostPortUsage {
return &HostPortUsage{
reserved: map[types.NamespacedName][]entry{},
}
}
// Add adds a port to the HostPortUsage, returning an error in the case of a conflict
func (u *HostPortUsage) Add(ctx context.Context, pod *v1.Pod) {
newUsage, err := u.validate(pod)
if err != nil {
logging.FromContext(ctx).Errorf("invariant violated registering host port usage, %s, please file an issue", err)
}
u.reserved[client.ObjectKeyFromObject(pod)] = newUsage
}
// Validate performs host port conflict validation to allow for determining if we can schedule the pod to the node
// before doing so.
func (u *HostPortUsage) Validate(pod *v1.Pod) error {
_, err := u.validate(pod)
return err
}
func (u *HostPortUsage) validate(pod *v1.Pod) ([]entry, error) {
newUsage := getHostPorts(pod)
for _, newEntry := range newUsage {
for podKey, entries := range u.reserved {
for _, existing := range entries {
if newEntry.matches(existing) && podKey != client.ObjectKeyFromObject(pod) {
return nil, fmt.Errorf("%s conflicts with existing HostPort configuration %s", newEntry, existing)
}
}
}
}
return newUsage, nil
}
// DeletePod deletes all host port usage from the HostPortUsage that were created by the pod with the given name.
func (u *HostPortUsage) DeletePod(key types.NamespacedName) {
delete(u.reserved, key)
}
func (u *HostPortUsage) DeepCopy() *HostPortUsage {
if u == nil {
return nil
}
out := &HostPortUsage{}
u.DeepCopyInto(out)
return out
}
func (u *HostPortUsage) DeepCopyInto(out *HostPortUsage) {
out.reserved = map[types.NamespacedName][]entry{}
for k, v := range u.reserved {
for _, e := range v {
out.reserved[k] = append(out.reserved[k], entry{
ip: e.ip,
port: e.port,
protocol: e.protocol,
})
}
}
}
func getHostPorts(pod *v1.Pod) []entry {
var usage []entry
for _, c := range pod.Spec.Containers {
for _, p := range c.Ports {
if p.HostPort == 0 {
continue
}
// Per the K8s docs, "If you don't specify the hostIP and protocol explicitly, Kubernetes will use 0.0.0.0
// as the default hostIP and TCP as the default protocol." In testing, and looking at the code the protocol
// is defaulted to TCP, but it leaves the IP empty.
hostIP := p.HostIP
if hostIP == "" {
hostIP = "0.0.0.0"
}
usage = append(usage, entry{
ip: net.ParseIP(hostIP),
port: p.HostPort,
protocol: p.Protocol,
})
}
}
return usage
}
| 145 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"fmt"
"net"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
)
var _ = Describe("HostPortUsage", func() {
Context("entry type UT", func() {
It("String() output", func() {
ipVal := net.IPv4(10, 0, 0, 0)
portVal := int32(4443)
protocolVal := v1.ProtocolTCP
e := entry{
ip: ipVal,
port: portVal,
protocol: protocolVal,
}
Expect(e.String()).To(Equal(fmt.Sprintf("IP=%s Port=%d Proto=%s", ipVal, portVal, protocolVal)))
})
It("identical entries match", func() {
ipVal := net.IPv4(10, 0, 0, 0)
portVal := int32(4443)
protocolVal := v1.ProtocolTCP
e1 := entry{
ip: ipVal,
port: portVal,
protocol: protocolVal,
}
e2 := e1
Expect(e1.matches(e2)).To(BeTrue())
Expect(e2.matches(e1)).To(BeTrue())
})
It("if any one IP has an unspecified IPv4 or IPv6 address, they match", func() {
ipVal := net.IPv4(10, 0, 0, 0)
portVal := int32(4443)
protocolVal := v1.ProtocolTCP
e1 := entry{
ip: ipVal,
port: portVal,
protocol: protocolVal,
}
e2 := entry{
ip: net.IPv4zero,
port: portVal,
protocol: protocolVal,
}
Expect(e1.matches(e2)).To(BeTrue())
Expect(e2.matches(e1)).To(BeTrue())
e2.ip = net.IPv6zero
Expect(e1.matches(e2)).To(BeTrue())
Expect(e2.matches(e1)).To(BeTrue())
})
It("mismatched protocols don't match", func() {
ipVal := net.IPv4(10, 0, 0, 0)
portVal := int32(4443)
protocolVal := v1.ProtocolTCP
e1 := entry{
ip: ipVal,
port: portVal,
protocol: protocolVal,
}
e2 := e1
e2.protocol = v1.ProtocolSCTP
Expect(e1.matches(e2)).To(BeFalse())
Expect(e2.matches(e1)).To(BeFalse())
})
It("mismatched ports don't match", func() {
ipVal := net.IPv4(10, 0, 0, 0)
portVal := int32(4443)
protocolVal := v1.ProtocolTCP
e1 := entry{
ip: ipVal,
port: portVal,
protocol: protocolVal,
}
e2 := e1
e2.port = int32(443)
Expect(e1.matches(e2)).To(BeFalse())
Expect(e2.matches(e1)).To(BeFalse())
})
})
})
| 102 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"fmt"
"math"
"math/rand"
"strconv"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
)
// Requirement is an efficient represenatation of v1.NodeSelectorRequirement
type Requirement struct {
Key string
complement bool
values sets.String
greaterThan *int
lessThan *int
}
func NewRequirement(key string, operator v1.NodeSelectorOperator, values ...string) *Requirement {
if normalized, ok := v1alpha5.NormalizedLabels[key]; ok {
key = normalized
}
r := &Requirement{
Key: key,
values: sets.NewString(),
complement: true,
}
if operator == v1.NodeSelectorOpIn || operator == v1.NodeSelectorOpDoesNotExist {
r.complement = false
}
if operator == v1.NodeSelectorOpIn || operator == v1.NodeSelectorOpNotIn {
r.values.Insert(values...)
}
if operator == v1.NodeSelectorOpGt {
value, _ := strconv.Atoi(values[0]) // prevalidated
r.greaterThan = &value
}
if operator == v1.NodeSelectorOpLt {
value, _ := strconv.Atoi(values[0]) // prevalidated
r.lessThan = &value
}
return r
}
func (r *Requirement) NodeSelectorRequirement() v1.NodeSelectorRequirement {
switch {
case r.greaterThan != nil:
return v1.NodeSelectorRequirement{
Key: r.Key,
Operator: v1.NodeSelectorOpGt,
Values: []string{strconv.FormatInt(int64(lo.FromPtr(r.greaterThan)), 10)},
}
case r.lessThan != nil:
return v1.NodeSelectorRequirement{
Key: r.Key,
Operator: v1.NodeSelectorOpLt,
Values: []string{strconv.FormatInt(int64(lo.FromPtr(r.lessThan)), 10)},
}
case r.complement:
switch {
case len(r.values) > 0:
return v1.NodeSelectorRequirement{
Key: r.Key,
Operator: v1.NodeSelectorOpNotIn,
Values: r.values.List(),
}
default:
return v1.NodeSelectorRequirement{
Key: r.Key,
Operator: v1.NodeSelectorOpExists,
}
}
default:
switch {
case len(r.values) > 0:
return v1.NodeSelectorRequirement{
Key: r.Key,
Operator: v1.NodeSelectorOpIn,
Values: r.values.List(),
}
default:
return v1.NodeSelectorRequirement{
Key: r.Key,
Operator: v1.NodeSelectorOpDoesNotExist,
}
}
}
}
// Intersection constraints the Requirement from the incoming requirements
// nolint:gocyclo
func (r *Requirement) Intersection(requirement *Requirement) *Requirement {
// Complement
complement := r.complement && requirement.complement
// Boundaries
greaterThan := maxIntPtr(r.greaterThan, requirement.greaterThan)
lessThan := minIntPtr(r.lessThan, requirement.lessThan)
if greaterThan != nil && lessThan != nil && *greaterThan >= *lessThan {
return NewRequirement(r.Key, v1.NodeSelectorOpDoesNotExist)
}
// Values
var values sets.String
if r.complement && requirement.complement {
values = r.values.Union(requirement.values)
} else if r.complement && !requirement.complement {
values = requirement.values.Difference(r.values)
} else if !r.complement && requirement.complement {
values = r.values.Difference(requirement.values)
} else {
values = r.values.Intersection(requirement.values)
}
for value := range values {
if !withinIntPtrs(value, greaterThan, lessThan) {
values.Delete(value)
}
}
// Remove boundaries for concrete sets
if !complement {
greaterThan, lessThan = nil, nil
}
return &Requirement{Key: r.Key, values: values, complement: complement, greaterThan: greaterThan, lessThan: lessThan}
}
func (r *Requirement) Any() string {
switch r.Operator() {
case v1.NodeSelectorOpIn:
return r.values.UnsortedList()[0]
case v1.NodeSelectorOpNotIn, v1.NodeSelectorOpExists:
min := 0
max := math.MaxInt64
if r.greaterThan != nil {
min = *r.greaterThan + 1
}
if r.lessThan != nil {
max = *r.lessThan
}
return fmt.Sprint(rand.Intn(max-min) + min) //nolint:gosec
}
return ""
}
// Has returns true if the requirement allows the value
func (r *Requirement) Has(value string) bool {
if r.complement {
return !r.values.Has(value) && withinIntPtrs(value, r.greaterThan, r.lessThan)
}
return r.values.Has(value) && withinIntPtrs(value, r.greaterThan, r.lessThan)
}
func (r *Requirement) Values() []string {
return r.values.UnsortedList()
}
func (r *Requirement) Insert(items ...string) {
r.values.Insert(items...)
}
func (r *Requirement) Operator() v1.NodeSelectorOperator {
if r.complement {
if r.Len() < math.MaxInt64 {
return v1.NodeSelectorOpNotIn
}
return v1.NodeSelectorOpExists // v1.NodeSelectorOpGt and v1.NodeSelectorOpLt are treated as "Exists" with bounds
}
if r.Len() > 0 {
return v1.NodeSelectorOpIn
}
return v1.NodeSelectorOpDoesNotExist
}
func (r *Requirement) Len() int {
if r.complement {
return math.MaxInt64 - r.values.Len()
}
return r.values.Len()
}
func (r *Requirement) String() string {
var s string
switch r.Operator() {
case v1.NodeSelectorOpExists, v1.NodeSelectorOpDoesNotExist:
s = fmt.Sprintf("%s %s", r.Key, r.Operator())
default:
values := r.values.List()
if length := len(values); length > 5 {
values = append(values[:5], fmt.Sprintf("and %d others", length-5))
}
s = fmt.Sprintf("%s %s %s", r.Key, r.Operator(), values)
}
if r.greaterThan != nil {
s += fmt.Sprintf(" >%d", *r.greaterThan)
}
if r.lessThan != nil {
s += fmt.Sprintf(" <%d", *r.lessThan)
}
return s
}
func withinIntPtrs(valueAsString string, greaterThan, lessThan *int) bool {
if greaterThan == nil && lessThan == nil {
return true
}
// If bounds are set, non integer values are invalid
value, err := strconv.Atoi(valueAsString)
if err != nil {
return false
}
if greaterThan != nil && *greaterThan >= value {
return false
}
if lessThan != nil && *lessThan <= value {
return false
}
return true
}
func minIntPtr(a, b *int) *int {
if a == nil {
return b
}
if b == nil {
return a
}
if *a < *b {
return a
}
return b
}
func maxIntPtr(a, b *int) *int {
if a == nil {
return b
}
if b == nil {
return a
}
if *a > *b {
return a
}
return b
}
| 265 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"fmt"
"sort"
"strings"
"github.com/samber/lo"
"go.uber.org/multierr"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"github.com/aws/karpenter-core/pkg/apis/v1alpha5"
)
// Requirements are an efficient set representation under the hood. Since its underlying
// types are slices and maps, this type should not be used as a pointer.
type Requirements map[string]*Requirement
func NewRequirements(requirements ...*Requirement) Requirements {
r := Requirements{}
for _, requirement := range requirements {
r.Add(requirement)
}
return r
}
// NewRequirements constructs requirements from NodeSelectorRequirements
func NewNodeSelectorRequirements(requirements ...v1.NodeSelectorRequirement) Requirements {
r := NewRequirements()
for _, requirement := range requirements {
r.Add(NewRequirement(requirement.Key, requirement.Operator, requirement.Values...))
}
return r
}
// NewLabelRequirements constructs requirements from labels
func NewLabelRequirements(labels map[string]string) Requirements {
requirements := NewRequirements()
for key, value := range labels {
requirements.Add(NewRequirement(key, v1.NodeSelectorOpIn, value))
}
return requirements
}
// NewPodRequirements constructs requirements from a pod
func NewPodRequirements(pod *v1.Pod) Requirements {
requirements := NewLabelRequirements(pod.Spec.NodeSelector)
if pod.Spec.Affinity == nil || pod.Spec.Affinity.NodeAffinity == nil {
return requirements
}
// The legal operators for pod affinity and anti-affinity are In, NotIn, Exists, DoesNotExist.
// Select heaviest preference and treat as a requirement. An outer loop will iteratively unconstrain them if unsatisfiable.
if preferred := pod.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution; len(preferred) > 0 {
sort.Slice(preferred, func(i int, j int) bool { return preferred[i].Weight > preferred[j].Weight })
requirements.Add(NewNodeSelectorRequirements(preferred[0].Preference.MatchExpressions...).Values()...)
}
// Select first requirement. An outer loop will iteratively remove OR requirements if unsatisfiable
if pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil &&
len(pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) > 0 {
requirements.Add(NewNodeSelectorRequirements(pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions...).Values()...)
}
return requirements
}
func (r Requirements) NodeSelectorRequirements() []v1.NodeSelectorRequirement {
return lo.Map(lo.Values(r), func(req *Requirement, _ int) v1.NodeSelectorRequirement {
return req.NodeSelectorRequirement()
})
}
// Add requirements to provided requirements. Mutates existing requirements
func (r Requirements) Add(requirements ...*Requirement) {
for _, requirement := range requirements {
if existing, ok := r[requirement.Key]; ok {
requirement = requirement.Intersection(existing)
}
r[requirement.Key] = requirement
}
}
// Keys returns unique set of the label keys from the requirements
func (r Requirements) Keys() sets.String {
keys := sets.NewString()
for key := range r {
keys.Insert(key)
}
return keys
}
func (r Requirements) Values() []*Requirement {
return lo.Values(r)
}
func (r Requirements) Has(key string) bool {
_, ok := r[key]
return ok
}
func (r Requirements) Get(key string) *Requirement {
if _, ok := r[key]; !ok {
// If not defined, allow any values with the exists operator
return NewRequirement(key, v1.NodeSelectorOpExists)
}
return r[key]
}
// Compatible ensures the provided requirements can be met.
func (r Requirements) Compatible(requirements Requirements) (errs error) {
// Custom Labels must intersect, but if not defined are denied.
for key := range requirements.Keys().Difference(v1alpha5.WellKnownLabels) {
if operator := requirements.Get(key).Operator(); r.Has(key) || operator == v1.NodeSelectorOpNotIn || operator == v1.NodeSelectorOpDoesNotExist {
continue
}
errs = multierr.Append(errs, fmt.Errorf("label %q does not have known values%s", key, labelHint(r, key)))
}
// Well Known Labels must intersect, but if not defined, are allowed.
return multierr.Append(errs, r.Intersects(requirements))
}
// editDistance is an implementation of edit distance from Algorithms/DPV
func editDistance(s, t string) int {
min := func(a, b, c int) int {
m := a
if b < m {
m = b
}
if c < m {
m = c
}
return m
}
m := len(s)
n := len(t)
if m == 0 {
return n
}
if n == 0 {
return m
}
prevRow := make([]int, n)
curRow := make([]int, n)
for j := 1; j < n; j++ {
prevRow[j] = j
}
for i := 1; i < m; i++ {
for j := 1; j < n; j++ {
diff := 0
if s[i] != t[j] {
diff = 1
}
curRow[j] = min(prevRow[j]+1, curRow[j-1]+1, prevRow[j-1]+diff)
}
prevRow, curRow = curRow, prevRow
}
return prevRow[n-1]
}
func labelHint(r Requirements, key string) string {
for wellKnown := range v1alpha5.WellKnownLabels {
if strings.Contains(wellKnown, key) || editDistance(key, wellKnown) < len(wellKnown)/5 {
return fmt.Sprintf(" (typo of %q?)", wellKnown)
}
}
for existing := range r {
if strings.Contains(existing, key) || editDistance(key, existing) < len(existing)/5 {
return fmt.Sprintf(" (typo of %q?)", existing)
}
}
return ""
}
// Intersects returns errors if the requirements don't have overlapping values, undefined keys are allowed
func (r Requirements) Intersects(requirements Requirements) (errs error) {
for key := range r.Keys().Intersection(requirements.Keys()) {
existing := r.Get(key)
incoming := requirements.Get(key)
// There must be some value, except
if existing.Intersection(incoming).Len() == 0 {
// where the incoming requirement has operator { NotIn, DoesNotExist }
if operator := incoming.Operator(); operator == v1.NodeSelectorOpNotIn || operator == v1.NodeSelectorOpDoesNotExist {
// and the existing requirement has operator { NotIn, DoesNotExist }
if operator := existing.Operator(); operator == v1.NodeSelectorOpNotIn || operator == v1.NodeSelectorOpDoesNotExist {
continue
}
}
errs = multierr.Append(errs, fmt.Errorf("key %s, %s not in %s", key, incoming, existing))
}
}
return errs
}
func (r Requirements) Labels() map[string]string {
labels := map[string]string{}
for key, requirement := range r {
if !v1alpha5.IsRestrictedNodeLabel(key) {
if value := requirement.Any(); value != "" {
labels[key] = value
}
}
}
return labels
}
func (r Requirements) String() string {
requirements := lo.Reject(r.Values(), func(requirement *Requirement, _ int) bool { return v1alpha5.RestrictedLabels.Has(requirement.Key) })
stringRequirements := lo.Map(requirements, func(requirement *Requirement, _ int) string { return requirement.String() })
sort.Strings(stringRequirements)
return strings.Join(stringRequirements, ", ")
}
| 226 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
)
var _ = Describe("Requirements", func() {
Context("Compatibility", func() {
It("should normalize aliased labels", func() {
requirements := NewRequirements(NewRequirement(v1.LabelFailureDomainBetaZone, v1.NodeSelectorOpIn, "test"))
Expect(requirements.Has(v1.LabelFailureDomainBetaZone)).To(BeFalse())
Expect(requirements.Get(v1.LabelTopologyZone).Has("test")).To(BeTrue())
})
// Use a well known label like zone, because it behaves differently than custom labels
unconstrained := NewRequirements()
exists := NewRequirements(NewRequirement(v1.LabelTopologyZone, v1.NodeSelectorOpExists))
doesNotExist := NewRequirements(NewRequirement(v1.LabelTopologyZone, v1.NodeSelectorOpDoesNotExist))
inA := NewRequirements(NewRequirement(v1.LabelTopologyZone, v1.NodeSelectorOpIn, "A"))
inB := NewRequirements(NewRequirement(v1.LabelTopologyZone, v1.NodeSelectorOpIn, "B"))
inAB := NewRequirements(NewRequirement(v1.LabelTopologyZone, v1.NodeSelectorOpIn, "A", "B"))
notInA := NewRequirements(NewRequirement(v1.LabelTopologyZone, v1.NodeSelectorOpNotIn, "A"))
in1 := NewRequirements(NewRequirement(v1.LabelTopologyZone, v1.NodeSelectorOpIn, "1"))
in9 := NewRequirements(NewRequirement(v1.LabelTopologyZone, v1.NodeSelectorOpIn, "9"))
in19 := NewRequirements(NewRequirement(v1.LabelTopologyZone, v1.NodeSelectorOpIn, "1", "9"))
notIn12 := NewRequirements(NewRequirement(v1.LabelTopologyZone, v1.NodeSelectorOpNotIn, "1", "2"))
greaterThan1 := NewRequirements(NewRequirement(v1.LabelTopologyZone, v1.NodeSelectorOpGt, "1"))
greaterThan9 := NewRequirements(NewRequirement(v1.LabelTopologyZone, v1.NodeSelectorOpGt, "9"))
lessThan1 := NewRequirements(NewRequirement(v1.LabelTopologyZone, v1.NodeSelectorOpLt, "1"))
lessThan9 := NewRequirements(NewRequirement(v1.LabelTopologyZone, v1.NodeSelectorOpLt, "9"))
It("should be compatible", func() {
Expect(unconstrained.Compatible(unconstrained)).To(Succeed())
Expect(unconstrained.Compatible(exists)).To(Succeed())
Expect(unconstrained.Compatible(doesNotExist)).To(Succeed())
Expect(unconstrained.Compatible(inA)).To(Succeed())
Expect(unconstrained.Compatible(inB)).To(Succeed())
Expect(unconstrained.Compatible(inAB)).To(Succeed())
Expect(unconstrained.Compatible(notInA)).To(Succeed())
Expect(unconstrained.Compatible(in1)).To(Succeed())
Expect(unconstrained.Compatible(in9)).To(Succeed())
Expect(unconstrained.Compatible(in19)).To(Succeed())
Expect(unconstrained.Compatible(notIn12)).To(Succeed())
Expect(unconstrained.Compatible(greaterThan1)).To(Succeed())
Expect(unconstrained.Compatible(greaterThan9)).To(Succeed())
Expect(unconstrained.Compatible(lessThan1)).To(Succeed())
Expect(unconstrained.Compatible(lessThan9)).To(Succeed())
Expect(exists.Compatible(unconstrained)).To(Succeed())
Expect(exists.Compatible(exists)).To(Succeed())
Expect(exists.Compatible(doesNotExist)).ToNot(Succeed())
Expect(exists.Compatible(inA)).To(Succeed())
Expect(exists.Compatible(inB)).To(Succeed())
Expect(exists.Compatible(inAB)).To(Succeed())
Expect(exists.Compatible(notInA)).To(Succeed())
Expect(exists.Compatible(in1)).To(Succeed())
Expect(exists.Compatible(in9)).To(Succeed())
Expect(exists.Compatible(in19)).To(Succeed())
Expect(exists.Compatible(notIn12)).To(Succeed())
Expect(exists.Compatible(greaterThan1)).To(Succeed())
Expect(exists.Compatible(greaterThan9)).To(Succeed())
Expect(exists.Compatible(lessThan1)).To(Succeed())
Expect(exists.Compatible(lessThan9)).To(Succeed())
Expect(doesNotExist.Compatible(unconstrained)).To(Succeed())
Expect(doesNotExist.Compatible(exists)).ToNot(Succeed())
Expect(doesNotExist.Compatible(doesNotExist)).To(Succeed())
Expect(doesNotExist.Compatible(inA)).ToNot(Succeed())
Expect(doesNotExist.Compatible(inB)).ToNot(Succeed())
Expect(doesNotExist.Compatible(inAB)).ToNot(Succeed())
Expect(doesNotExist.Compatible(notInA)).To(Succeed())
Expect(doesNotExist.Compatible(in1)).ToNot(Succeed())
Expect(doesNotExist.Compatible(in9)).ToNot(Succeed())
Expect(doesNotExist.Compatible(in19)).ToNot(Succeed())
Expect(doesNotExist.Compatible(notIn12)).To(Succeed())
Expect(doesNotExist.Compatible(greaterThan1)).ToNot(Succeed())
Expect(doesNotExist.Compatible(greaterThan9)).ToNot(Succeed())
Expect(doesNotExist.Compatible(lessThan1)).ToNot(Succeed())
Expect(doesNotExist.Compatible(lessThan9)).ToNot(Succeed())
Expect(inA.Compatible(unconstrained)).To(Succeed())
Expect(inA.Compatible(exists)).To(Succeed())
Expect(inA.Compatible(doesNotExist)).ToNot(Succeed())
Expect(inA.Compatible(inA)).To(Succeed())
Expect(inA.Compatible(inB)).ToNot(Succeed())
Expect(inA.Compatible(inAB)).To(Succeed())
Expect(inA.Compatible(notInA)).ToNot(Succeed())
Expect(inA.Compatible(in1)).ToNot(Succeed())
Expect(inA.Compatible(in9)).ToNot(Succeed())
Expect(inA.Compatible(in19)).ToNot(Succeed())
Expect(inA.Compatible(notIn12)).To(Succeed())
Expect(inA.Compatible(greaterThan1)).ToNot(Succeed())
Expect(inA.Compatible(greaterThan9)).ToNot(Succeed())
Expect(inA.Compatible(lessThan1)).ToNot(Succeed())
Expect(inA.Compatible(lessThan9)).ToNot(Succeed())
Expect(inB.Compatible(unconstrained)).To(Succeed())
Expect(inB.Compatible(exists)).To(Succeed())
Expect(inB.Compatible(doesNotExist)).ToNot(Succeed())
Expect(inB.Compatible(inA)).ToNot(Succeed())
Expect(inB.Compatible(inB)).To(Succeed())
Expect(inB.Compatible(inAB)).To(Succeed())
Expect(inB.Compatible(notInA)).To(Succeed())
Expect(inB.Compatible(in1)).ToNot(Succeed())
Expect(inB.Compatible(in9)).ToNot(Succeed())
Expect(inB.Compatible(in19)).ToNot(Succeed())
Expect(inB.Compatible(notIn12)).To(Succeed())
Expect(inB.Compatible(greaterThan1)).ToNot(Succeed())
Expect(inB.Compatible(greaterThan9)).ToNot(Succeed())
Expect(inB.Compatible(lessThan1)).ToNot(Succeed())
Expect(inB.Compatible(lessThan9)).ToNot(Succeed())
Expect(inAB.Compatible(unconstrained)).To(Succeed())
Expect(inAB.Compatible(exists)).To(Succeed())
Expect(inAB.Compatible(doesNotExist)).ToNot(Succeed())
Expect(inAB.Compatible(inA)).To(Succeed())
Expect(inAB.Compatible(inB)).To(Succeed())
Expect(inAB.Compatible(inAB)).To(Succeed())
Expect(inAB.Compatible(notInA)).To(Succeed())
Expect(inAB.Compatible(in1)).ToNot(Succeed())
Expect(inAB.Compatible(in9)).ToNot(Succeed())
Expect(inAB.Compatible(in19)).ToNot(Succeed())
Expect(inAB.Compatible(notIn12)).To(Succeed())
Expect(inAB.Compatible(greaterThan1)).ToNot(Succeed())
Expect(inAB.Compatible(greaterThan9)).ToNot(Succeed())
Expect(inAB.Compatible(lessThan1)).ToNot(Succeed())
Expect(inAB.Compatible(lessThan9)).ToNot(Succeed())
Expect(notInA.Compatible(unconstrained)).To(Succeed())
Expect(notInA.Compatible(exists)).To(Succeed())
Expect(notInA.Compatible(doesNotExist)).To(Succeed())
Expect(notInA.Compatible(inA)).ToNot(Succeed())
Expect(notInA.Compatible(inB)).To(Succeed())
Expect(notInA.Compatible(inAB)).To(Succeed())
Expect(notInA.Compatible(notInA)).To(Succeed())
Expect(notInA.Compatible(in1)).To(Succeed())
Expect(notInA.Compatible(in9)).To(Succeed())
Expect(notInA.Compatible(in19)).To(Succeed())
Expect(notInA.Compatible(notIn12)).To(Succeed())
Expect(notInA.Compatible(greaterThan1)).To(Succeed())
Expect(notInA.Compatible(greaterThan9)).To(Succeed())
Expect(notInA.Compatible(lessThan1)).To(Succeed())
Expect(notInA.Compatible(lessThan9)).To(Succeed())
Expect(in1.Compatible(unconstrained)).To(Succeed())
Expect(in1.Compatible(exists)).To(Succeed())
Expect(in1.Compatible(doesNotExist)).ToNot(Succeed())
Expect(in1.Compatible(inA)).ToNot(Succeed())
Expect(in1.Compatible(inB)).ToNot(Succeed())
Expect(in1.Compatible(inAB)).ToNot(Succeed())
Expect(in1.Compatible(notInA)).To(Succeed())
Expect(in1.Compatible(in1)).To(Succeed())
Expect(in1.Compatible(in9)).ToNot(Succeed())
Expect(in1.Compatible(in19)).To(Succeed())
Expect(in1.Compatible(notIn12)).ToNot(Succeed())
Expect(in1.Compatible(greaterThan1)).ToNot(Succeed())
Expect(in1.Compatible(greaterThan9)).ToNot(Succeed())
Expect(in1.Compatible(lessThan1)).ToNot(Succeed())
Expect(in1.Compatible(lessThan9)).To(Succeed())
Expect(in9.Compatible(unconstrained)).To(Succeed())
Expect(in9.Compatible(exists)).To(Succeed())
Expect(in9.Compatible(doesNotExist)).ToNot(Succeed())
Expect(in9.Compatible(inA)).ToNot(Succeed())
Expect(in9.Compatible(inB)).ToNot(Succeed())
Expect(in9.Compatible(inAB)).ToNot(Succeed())
Expect(in9.Compatible(notInA)).To(Succeed())
Expect(in9.Compatible(in1)).ToNot(Succeed())
Expect(in9.Compatible(in9)).To(Succeed())
Expect(in9.Compatible(in19)).To(Succeed())
Expect(in9.Compatible(notIn12)).To(Succeed())
Expect(in9.Compatible(greaterThan1)).To(Succeed())
Expect(in9.Compatible(greaterThan9)).ToNot(Succeed())
Expect(in9.Compatible(lessThan1)).ToNot(Succeed())
Expect(in9.Compatible(lessThan9)).ToNot(Succeed())
Expect(in19.Compatible(unconstrained)).To(Succeed())
Expect(in19.Compatible(exists)).To(Succeed())
Expect(in19.Compatible(doesNotExist)).ToNot(Succeed())
Expect(in19.Compatible(inA)).ToNot(Succeed())
Expect(in19.Compatible(inB)).ToNot(Succeed())
Expect(in19.Compatible(inAB)).ToNot(Succeed())
Expect(in19.Compatible(notInA)).To(Succeed())
Expect(in19.Compatible(in1)).To(Succeed())
Expect(in19.Compatible(in9)).To(Succeed())
Expect(in19.Compatible(in19)).To(Succeed())
Expect(in19.Compatible(notIn12)).To(Succeed())
Expect(in19.Compatible(greaterThan1)).To(Succeed())
Expect(in19.Compatible(greaterThan9)).ToNot(Succeed())
Expect(in19.Compatible(lessThan1)).ToNot(Succeed())
Expect(in19.Compatible(lessThan9)).To(Succeed())
Expect(notIn12.Compatible(unconstrained)).To(Succeed())
Expect(notIn12.Compatible(exists)).To(Succeed())
Expect(notIn12.Compatible(doesNotExist)).To(Succeed())
Expect(notIn12.Compatible(inA)).To(Succeed())
Expect(notIn12.Compatible(inB)).To(Succeed())
Expect(notIn12.Compatible(inAB)).To(Succeed())
Expect(notIn12.Compatible(notInA)).To(Succeed())
Expect(notIn12.Compatible(in1)).ToNot(Succeed())
Expect(notIn12.Compatible(in9)).To(Succeed())
Expect(notIn12.Compatible(in19)).To(Succeed())
Expect(notIn12.Compatible(notIn12)).To(Succeed())
Expect(notIn12.Compatible(greaterThan1)).To(Succeed())
Expect(notIn12.Compatible(greaterThan9)).To(Succeed())
Expect(notIn12.Compatible(lessThan1)).To(Succeed())
Expect(notIn12.Compatible(lessThan9)).To(Succeed())
Expect(greaterThan1.Compatible(unconstrained)).To(Succeed())
Expect(greaterThan1.Compatible(exists)).To(Succeed())
Expect(greaterThan1.Compatible(doesNotExist)).ToNot(Succeed())
Expect(greaterThan1.Compatible(inA)).ToNot(Succeed())
Expect(greaterThan1.Compatible(inB)).ToNot(Succeed())
Expect(greaterThan1.Compatible(inAB)).ToNot(Succeed())
Expect(greaterThan1.Compatible(notInA)).To(Succeed())
Expect(greaterThan1.Compatible(in1)).ToNot(Succeed())
Expect(greaterThan1.Compatible(in9)).To(Succeed())
Expect(greaterThan1.Compatible(in19)).To(Succeed())
Expect(greaterThan1.Compatible(notIn12)).To(Succeed())
Expect(greaterThan1.Compatible(greaterThan1)).To(Succeed())
Expect(greaterThan1.Compatible(greaterThan9)).To(Succeed())
Expect(greaterThan1.Compatible(lessThan1)).ToNot(Succeed())
Expect(greaterThan1.Compatible(lessThan9)).To(Succeed())
Expect(greaterThan9.Compatible(unconstrained)).To(Succeed())
Expect(greaterThan9.Compatible(exists)).To(Succeed())
Expect(greaterThan9.Compatible(doesNotExist)).ToNot(Succeed())
Expect(greaterThan9.Compatible(inA)).ToNot(Succeed())
Expect(greaterThan9.Compatible(inB)).ToNot(Succeed())
Expect(greaterThan9.Compatible(inAB)).ToNot(Succeed())
Expect(greaterThan9.Compatible(notInA)).To(Succeed())
Expect(greaterThan9.Compatible(in1)).ToNot(Succeed())
Expect(greaterThan9.Compatible(in9)).ToNot(Succeed())
Expect(greaterThan9.Compatible(in19)).ToNot(Succeed())
Expect(greaterThan9.Compatible(notIn12)).To(Succeed())
Expect(greaterThan9.Compatible(greaterThan1)).To(Succeed())
Expect(greaterThan9.Compatible(greaterThan9)).To(Succeed())
Expect(greaterThan9.Compatible(lessThan1)).ToNot(Succeed())
Expect(greaterThan9.Compatible(lessThan9)).ToNot(Succeed())
Expect(lessThan1.Compatible(unconstrained)).To(Succeed())
Expect(lessThan1.Compatible(exists)).To(Succeed())
Expect(lessThan1.Compatible(doesNotExist)).ToNot(Succeed())
Expect(lessThan1.Compatible(inA)).ToNot(Succeed())
Expect(lessThan1.Compatible(inB)).ToNot(Succeed())
Expect(lessThan1.Compatible(inAB)).ToNot(Succeed())
Expect(lessThan1.Compatible(notInA)).To(Succeed())
Expect(lessThan1.Compatible(in1)).ToNot(Succeed())
Expect(lessThan1.Compatible(in9)).ToNot(Succeed())
Expect(lessThan1.Compatible(in19)).ToNot(Succeed())
Expect(lessThan1.Compatible(notIn12)).To(Succeed())
Expect(lessThan1.Compatible(greaterThan1)).ToNot(Succeed())
Expect(lessThan1.Compatible(greaterThan9)).ToNot(Succeed())
Expect(lessThan1.Compatible(lessThan1)).To(Succeed())
Expect(lessThan1.Compatible(lessThan9)).To(Succeed())
Expect(lessThan9.Compatible(unconstrained)).To(Succeed())
Expect(lessThan9.Compatible(exists)).To(Succeed())
Expect(lessThan9.Compatible(doesNotExist)).ToNot(Succeed())
Expect(lessThan9.Compatible(inA)).ToNot(Succeed())
Expect(lessThan9.Compatible(inB)).ToNot(Succeed())
Expect(lessThan9.Compatible(inAB)).ToNot(Succeed())
Expect(lessThan9.Compatible(notInA)).To(Succeed())
Expect(lessThan9.Compatible(in1)).To(Succeed())
Expect(lessThan9.Compatible(in9)).ToNot(Succeed())
Expect(lessThan9.Compatible(in19)).To(Succeed())
Expect(lessThan9.Compatible(notIn12)).To(Succeed())
Expect(lessThan9.Compatible(greaterThan1)).To(Succeed())
Expect(lessThan9.Compatible(greaterThan9)).ToNot(Succeed())
Expect(lessThan9.Compatible(lessThan1)).To(Succeed())
Expect(lessThan9.Compatible(lessThan9)).To(Succeed())
})
})
Context("Error Messages", func() {
It("should detect well known label truncations", func() {
unconstrained := NewRequirements()
for _, tc := range []struct {
badLabel string
expectedError string
}{
{
badLabel: "zone",
expectedError: `label "zone" does not have known values (typo of "topology.kubernetes.io/zone"?)`,
},
{
badLabel: "region",
expectedError: `label "region" does not have known values (typo of "topology.kubernetes.io/region"?)`,
},
{
badLabel: "provisioner-name",
expectedError: `label "provisioner-name" does not have known values (typo of "karpenter.sh/provisioner-name"?)`,
},
{
badLabel: "instance-type",
expectedError: `label "instance-type" does not have known values (typo of "node.kubernetes.io/instance-type"?)`,
},
{
badLabel: "arch",
expectedError: `label "arch" does not have known values (typo of "kubernetes.io/arch"?)`,
},
{
badLabel: "capacity-type",
expectedError: `label "capacity-type" does not have known values (typo of "karpenter.sh/capacity-type"?)`,
},
} {
provisionerRequirement := NewRequirements(NewRequirement(tc.badLabel, v1.NodeSelectorOpExists))
Expect(unconstrained.Compatible(provisionerRequirement).Error()).To(Equal(tc.expectedError))
}
})
It("should detect well known label typos", func() {
unconstrained := NewRequirements()
for _, tc := range []struct {
badLabel string
expectedError string
}{
{
badLabel: "topology.kubernetesio/zone",
expectedError: `label "topology.kubernetesio/zone" does not have known values (typo of "topology.kubernetes.io/zone"?)`,
},
{
badLabel: "topology.kubernetes.io/regio",
expectedError: `label "topology.kubernetes.io/regio" does not have known values (typo of "topology.kubernetes.io/region"?)`,
},
{
badLabel: "karpenterprovisioner-name",
expectedError: `label "karpenterprovisioner-name" does not have known values (typo of "karpenter.sh/provisioner-name"?)`,
},
} {
provisionerRequirement := NewRequirements(NewRequirement(tc.badLabel, v1.NodeSelectorOpExists))
Expect(unconstrained.Compatible(provisionerRequirement).Error()).To(Equal(tc.expectedError))
}
})
It("should display an error message for unknown labels", func() {
unconstrained := NewRequirements()
provisionerRequirement := NewRequirements(NewRequirement("deployment", v1.NodeSelectorOpExists))
Expect(unconstrained.Compatible(provisionerRequirement).Error()).To(Equal(`label "deployment" does not have known values`))
})
})
Context("NodeSelectorRequirements Conversion", func() {
It("should convert combinations of labels to expected NodeSelectorRequirements", func() {
exists := NewRequirement("exists", v1.NodeSelectorOpExists)
doesNotExist := NewRequirement("doesNotExist", v1.NodeSelectorOpDoesNotExist)
inA := NewRequirement("inA", v1.NodeSelectorOpIn, "A")
inB := NewRequirement("inB", v1.NodeSelectorOpIn, "B")
inAB := NewRequirement("inAB", v1.NodeSelectorOpIn, "A", "B")
notInA := NewRequirement("notInA", v1.NodeSelectorOpNotIn, "A")
in1 := NewRequirement("in1", v1.NodeSelectorOpIn, "1")
in9 := NewRequirement("in9", v1.NodeSelectorOpIn, "9")
in19 := NewRequirement("in19", v1.NodeSelectorOpIn, "1", "9")
notIn12 := NewRequirement("notIn12", v1.NodeSelectorOpNotIn, "1", "2")
greaterThan1 := NewRequirement("greaterThan1", v1.NodeSelectorOpGt, "1")
greaterThan9 := NewRequirement("greaterThan9", v1.NodeSelectorOpGt, "9")
lessThan1 := NewRequirement("lessThan1", v1.NodeSelectorOpLt, "1")
lessThan9 := NewRequirement("lessThan9", v1.NodeSelectorOpLt, "9")
reqs := NewRequirements(
exists,
doesNotExist,
inA,
inB,
inAB,
notInA,
in1,
in9,
in19,
notIn12,
greaterThan1,
greaterThan9,
lessThan1,
lessThan9,
)
Expect(reqs.NodeSelectorRequirements()).To(ContainElements(
v1.NodeSelectorRequirement{Key: "exists", Operator: v1.NodeSelectorOpExists},
v1.NodeSelectorRequirement{Key: "doesNotExist", Operator: v1.NodeSelectorOpDoesNotExist},
v1.NodeSelectorRequirement{Key: "inA", Operator: v1.NodeSelectorOpIn, Values: []string{"A"}},
v1.NodeSelectorRequirement{Key: "inB", Operator: v1.NodeSelectorOpIn, Values: []string{"B"}},
v1.NodeSelectorRequirement{Key: "inAB", Operator: v1.NodeSelectorOpIn, Values: []string{"A", "B"}},
v1.NodeSelectorRequirement{Key: "notInA", Operator: v1.NodeSelectorOpNotIn, Values: []string{"A"}},
v1.NodeSelectorRequirement{Key: "in1", Operator: v1.NodeSelectorOpIn, Values: []string{"1"}},
v1.NodeSelectorRequirement{Key: "in9", Operator: v1.NodeSelectorOpIn, Values: []string{"9"}},
v1.NodeSelectorRequirement{Key: "in19", Operator: v1.NodeSelectorOpIn, Values: []string{"1", "9"}},
v1.NodeSelectorRequirement{Key: "notIn12", Operator: v1.NodeSelectorOpNotIn, Values: []string{"1", "2"}},
v1.NodeSelectorRequirement{Key: "greaterThan1", Operator: v1.NodeSelectorOpGt, Values: []string{"1"}},
v1.NodeSelectorRequirement{Key: "greaterThan9", Operator: v1.NodeSelectorOpGt, Values: []string{"9"}},
v1.NodeSelectorRequirement{Key: "lessThan1", Operator: v1.NodeSelectorOpLt, Values: []string{"1"}},
v1.NodeSelectorRequirement{Key: "lessThan9", Operator: v1.NodeSelectorOpLt, Values: []string{"9"}},
))
Expect(reqs.NodeSelectorRequirements()).To(HaveLen(14))
})
})
Context("Stringify Requirements", func() {
It("should print Requirements in the same order", func() {
reqs := NewRequirements(
NewRequirement("exists", v1.NodeSelectorOpExists),
NewRequirement("doesNotExist", v1.NodeSelectorOpDoesNotExist),
NewRequirement("inA", v1.NodeSelectorOpIn, "A"),
NewRequirement("inB", v1.NodeSelectorOpIn, "B"),
NewRequirement("inAB", v1.NodeSelectorOpIn, "A", "B"),
NewRequirement("notInA", v1.NodeSelectorOpNotIn, "A"),
NewRequirement("in1", v1.NodeSelectorOpIn, "1"),
NewRequirement("in9", v1.NodeSelectorOpIn, "9"),
NewRequirement("in19", v1.NodeSelectorOpIn, "1", "9"),
NewRequirement("notIn12", v1.NodeSelectorOpNotIn, "1", "2"),
NewRequirement("greaterThan1", v1.NodeSelectorOpGt, "1"),
NewRequirement("greaterThan9", v1.NodeSelectorOpGt, "9"),
NewRequirement("lessThan1", v1.NodeSelectorOpLt, "1"),
NewRequirement("lessThan9", v1.NodeSelectorOpLt, "9"),
)
Expect(reqs.String()).To(Equal("doesNotExist DoesNotExist, exists Exists, greaterThan1 Exists >1, greaterThan9 Exists >9, in1 In [1], in19 In [1 9], in9 In [9], inA In [A], inAB In [A B], inB In [B], lessThan1 Exists <1, lessThan9 Exists <9, notIn12 NotIn [1 2], notInA NotIn [A]"))
})
})
})
// Keeping this in case we need it, I ran for 1m+ samples and had no issues
// fuzz: elapsed: 2m27s, execs: 1002748 (6130/sec), new interesting: 30 (total: 33)
func FuzzEditDistance(f *testing.F) {
f.Add("foo", "bar")
f.Add("foo", "")
f.Add("", "foo")
f.Fuzz(func(t *testing.T, lhs, rhs string) {
editDistance(lhs, rhs)
})
}
| 443 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"math"
"strconv"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
)
var _ = Describe("Requirement", func() {
exists := NewRequirement("key", v1.NodeSelectorOpExists)
doesNotExist := NewRequirement("key", v1.NodeSelectorOpDoesNotExist)
inA := NewRequirement("key", v1.NodeSelectorOpIn, "A")
inB := NewRequirement("key", v1.NodeSelectorOpIn, "B")
inAB := NewRequirement("key", v1.NodeSelectorOpIn, "A", "B")
notInA := NewRequirement("key", v1.NodeSelectorOpNotIn, "A")
in1 := NewRequirement("key", v1.NodeSelectorOpIn, "1")
in9 := NewRequirement("key", v1.NodeSelectorOpIn, "9")
in19 := NewRequirement("key", v1.NodeSelectorOpIn, "1", "9")
notIn12 := NewRequirement("key", v1.NodeSelectorOpNotIn, "1", "2")
greaterThan1 := NewRequirement("key", v1.NodeSelectorOpGt, "1")
greaterThan9 := NewRequirement("key", v1.NodeSelectorOpGt, "9")
lessThan1 := NewRequirement("key", v1.NodeSelectorOpLt, "1")
lessThan9 := NewRequirement("key", v1.NodeSelectorOpLt, "9")
Context("NewRequirements", func() {
It("should normalize labels", func() {
nodeSelector := map[string]string{
v1.LabelFailureDomainBetaZone: "test",
v1.LabelFailureDomainBetaRegion: "test",
"beta.kubernetes.io/arch": "test",
"beta.kubernetes.io/os": "test",
v1.LabelInstanceType: "test",
}
requirements := lo.MapToSlice(nodeSelector, func(key string, value string) v1.NodeSelectorRequirement {
return v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}}
})
for _, r := range []Requirements{
NewLabelRequirements(nodeSelector),
NewNodeSelectorRequirements(requirements...),
NewPodRequirements(&v1.Pod{
Spec: v1.PodSpec{
NodeSelector: nodeSelector,
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{{MatchExpressions: requirements}}},
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{{Weight: 1, Preference: v1.NodeSelectorTerm{MatchExpressions: requirements}}},
},
},
},
}),
} {
Expect(r.Keys().List()).To(ConsistOf(
v1.LabelArchStable,
v1.LabelOSStable,
v1.LabelInstanceTypeStable,
v1.LabelTopologyRegion,
v1.LabelTopologyZone,
))
}
})
})
Context("Intersection", func() {
It("should intersect sets", func() {
Expect(exists.Intersection(exists)).To(Equal(exists))
Expect(exists.Intersection(doesNotExist)).To(Equal(doesNotExist))
Expect(exists.Intersection(inA)).To(Equal(inA))
Expect(exists.Intersection(inB)).To(Equal(inB))
Expect(exists.Intersection(inAB)).To(Equal(inAB))
Expect(exists.Intersection(notInA)).To(Equal(notInA))
Expect(exists.Intersection(in1)).To(Equal(in1))
Expect(exists.Intersection(in9)).To(Equal(in9))
Expect(exists.Intersection(in19)).To(Equal(in19))
Expect(exists.Intersection(notIn12)).To(Equal(notIn12))
Expect(exists.Intersection(greaterThan1)).To(Equal(greaterThan1))
Expect(exists.Intersection(greaterThan9)).To(Equal(greaterThan9))
Expect(exists.Intersection(lessThan1)).To(Equal(lessThan1))
Expect(exists.Intersection(lessThan9)).To(Equal(lessThan9))
Expect(doesNotExist.Intersection(exists)).To(Equal(doesNotExist))
Expect(doesNotExist.Intersection(doesNotExist)).To(Equal(doesNotExist))
Expect(doesNotExist.Intersection(inA)).To(Equal(doesNotExist))
Expect(doesNotExist.Intersection(inB)).To(Equal(doesNotExist))
Expect(doesNotExist.Intersection(inAB)).To(Equal(doesNotExist))
Expect(doesNotExist.Intersection(notInA)).To(Equal(doesNotExist))
Expect(doesNotExist.Intersection(in1)).To(Equal(doesNotExist))
Expect(doesNotExist.Intersection(in9)).To(Equal(doesNotExist))
Expect(doesNotExist.Intersection(in19)).To(Equal(doesNotExist))
Expect(doesNotExist.Intersection(notIn12)).To(Equal(doesNotExist))
Expect(doesNotExist.Intersection(greaterThan1)).To(Equal(doesNotExist))
Expect(doesNotExist.Intersection(greaterThan9)).To(Equal(doesNotExist))
Expect(doesNotExist.Intersection(lessThan1)).To(Equal(doesNotExist))
Expect(doesNotExist.Intersection(lessThan9)).To(Equal(doesNotExist))
Expect(inA.Intersection(exists)).To(Equal(inA))
Expect(inA.Intersection(doesNotExist)).To(Equal(doesNotExist))
Expect(inA.Intersection(inA)).To(Equal(inA))
Expect(inA.Intersection(inB)).To(Equal(doesNotExist))
Expect(inA.Intersection(inAB)).To(Equal(inA))
Expect(inA.Intersection(notInA)).To(Equal(doesNotExist))
Expect(inA.Intersection(in1)).To(Equal(doesNotExist))
Expect(inA.Intersection(in9)).To(Equal(doesNotExist))
Expect(inA.Intersection(in19)).To(Equal(doesNotExist))
Expect(inA.Intersection(notIn12)).To(Equal(inA))
Expect(inA.Intersection(greaterThan1)).To(Equal(doesNotExist))
Expect(inA.Intersection(greaterThan9)).To(Equal(doesNotExist))
Expect(inA.Intersection(lessThan1)).To(Equal(doesNotExist))
Expect(inA.Intersection(lessThan9)).To(Equal(doesNotExist))
Expect(inB.Intersection(exists)).To(Equal(inB))
Expect(inB.Intersection(doesNotExist)).To(Equal(doesNotExist))
Expect(inB.Intersection(inA)).To(Equal(doesNotExist))
Expect(inB.Intersection(inB)).To(Equal(inB))
Expect(inB.Intersection(inAB)).To(Equal(inB))
Expect(inB.Intersection(notInA)).To(Equal(inB))
Expect(inB.Intersection(in1)).To(Equal(doesNotExist))
Expect(inB.Intersection(in9)).To(Equal(doesNotExist))
Expect(inB.Intersection(in19)).To(Equal(doesNotExist))
Expect(inB.Intersection(notIn12)).To(Equal(inB))
Expect(inB.Intersection(greaterThan1)).To(Equal(doesNotExist))
Expect(inB.Intersection(greaterThan9)).To(Equal(doesNotExist))
Expect(inB.Intersection(lessThan1)).To(Equal(doesNotExist))
Expect(inB.Intersection(lessThan9)).To(Equal(doesNotExist))
Expect(inAB.Intersection(exists)).To(Equal(inAB))
Expect(inAB.Intersection(doesNotExist)).To(Equal(doesNotExist))
Expect(inAB.Intersection(inA)).To(Equal(inA))
Expect(inAB.Intersection(inB)).To(Equal(inB))
Expect(inAB.Intersection(inAB)).To(Equal(inAB))
Expect(inAB.Intersection(notInA)).To(Equal(inB))
Expect(inAB.Intersection(in1)).To(Equal(doesNotExist))
Expect(inAB.Intersection(in9)).To(Equal(doesNotExist))
Expect(inAB.Intersection(in19)).To(Equal(doesNotExist))
Expect(inAB.Intersection(notIn12)).To(Equal(inAB))
Expect(inAB.Intersection(greaterThan1)).To(Equal(doesNotExist))
Expect(inAB.Intersection(greaterThan9)).To(Equal(doesNotExist))
Expect(inAB.Intersection(lessThan1)).To(Equal(doesNotExist))
Expect(inAB.Intersection(lessThan9)).To(Equal(doesNotExist))
Expect(notInA.Intersection(exists)).To(Equal(notInA))
Expect(notInA.Intersection(doesNotExist)).To(Equal(doesNotExist))
Expect(notInA.Intersection(inA)).To(Equal(doesNotExist))
Expect(notInA.Intersection(inB)).To(Equal(inB))
Expect(notInA.Intersection(inAB)).To(Equal(inB))
Expect(notInA.Intersection(notInA)).To(Equal(notInA))
Expect(notInA.Intersection(in1)).To(Equal(in1))
Expect(notInA.Intersection(in9)).To(Equal(in9))
Expect(notInA.Intersection(in19)).To(Equal(in19))
Expect(notInA.Intersection(notIn12)).To(Equal(&Requirement{Key: "key", complement: true, values: sets.NewString("A", "1", "2")}))
Expect(notInA.Intersection(greaterThan1)).To(Equal(greaterThan1))
Expect(notInA.Intersection(greaterThan9)).To(Equal(greaterThan9))
Expect(notInA.Intersection(lessThan1)).To(Equal(lessThan1))
Expect(notInA.Intersection(lessThan9)).To(Equal(lessThan9))
Expect(in1.Intersection(exists)).To(Equal(in1))
Expect(in1.Intersection(doesNotExist)).To(Equal(doesNotExist))
Expect(in1.Intersection(inA)).To(Equal(doesNotExist))
Expect(in1.Intersection(inB)).To(Equal(doesNotExist))
Expect(in1.Intersection(inAB)).To(Equal(doesNotExist))
Expect(in1.Intersection(notInA)).To(Equal(in1))
Expect(in1.Intersection(in1)).To(Equal(in1))
Expect(in1.Intersection(in9)).To(Equal(doesNotExist))
Expect(in1.Intersection(in19)).To(Equal(in1))
Expect(in1.Intersection(notIn12)).To(Equal(doesNotExist))
Expect(in1.Intersection(greaterThan1)).To(Equal(doesNotExist))
Expect(in1.Intersection(greaterThan9)).To(Equal(doesNotExist))
Expect(in1.Intersection(lessThan1)).To(Equal(doesNotExist))
Expect(in1.Intersection(lessThan9)).To(Equal(in1))
Expect(in9.Intersection(exists)).To(Equal(in9))
Expect(in9.Intersection(doesNotExist)).To(Equal(doesNotExist))
Expect(in9.Intersection(inA)).To(Equal(doesNotExist))
Expect(in9.Intersection(inB)).To(Equal(doesNotExist))
Expect(in9.Intersection(inAB)).To(Equal(doesNotExist))
Expect(in9.Intersection(notInA)).To(Equal(in9))
Expect(in9.Intersection(in1)).To(Equal(doesNotExist))
Expect(in9.Intersection(in9)).To(Equal(in9))
Expect(in9.Intersection(in19)).To(Equal(in9))
Expect(in9.Intersection(notIn12)).To(Equal(in9))
Expect(in9.Intersection(greaterThan1)).To(Equal(in9))
Expect(in9.Intersection(greaterThan9)).To(Equal(doesNotExist))
Expect(in9.Intersection(lessThan1)).To(Equal(doesNotExist))
Expect(in9.Intersection(lessThan9)).To(Equal(doesNotExist))
Expect(in19.Intersection(exists)).To(Equal(in19))
Expect(in19.Intersection(doesNotExist)).To(Equal(doesNotExist))
Expect(in19.Intersection(inA)).To(Equal(doesNotExist))
Expect(in19.Intersection(inB)).To(Equal(doesNotExist))
Expect(in19.Intersection(inAB)).To(Equal(doesNotExist))
Expect(in19.Intersection(notInA)).To(Equal(in19))
Expect(in19.Intersection(in1)).To(Equal(in1))
Expect(in19.Intersection(in9)).To(Equal(in9))
Expect(in19.Intersection(in19)).To(Equal(in19))
Expect(in19.Intersection(notIn12)).To(Equal(in9))
Expect(in19.Intersection(greaterThan1)).To(Equal(in9))
Expect(in19.Intersection(greaterThan9)).To(Equal(doesNotExist))
Expect(in19.Intersection(lessThan1)).To(Equal(doesNotExist))
Expect(in19.Intersection(lessThan9)).To(Equal(in1))
Expect(notIn12.Intersection(exists)).To(Equal(notIn12))
Expect(notIn12.Intersection(doesNotExist)).To(Equal(doesNotExist))
Expect(notIn12.Intersection(inA)).To(Equal(inA))
Expect(notIn12.Intersection(inB)).To(Equal(inB))
Expect(notIn12.Intersection(inAB)).To(Equal(inAB))
Expect(notIn12.Intersection(notInA)).To(Equal(&Requirement{Key: "key", complement: true, values: sets.NewString("A", "1", "2")}))
Expect(notIn12.Intersection(in1)).To(Equal(doesNotExist))
Expect(notIn12.Intersection(in9)).To(Equal(in9))
Expect(notIn12.Intersection(in19)).To(Equal(in9))
Expect(notIn12.Intersection(notIn12)).To(Equal(notIn12))
Expect(notIn12.Intersection(greaterThan1)).To(Equal(&Requirement{Key: "key", complement: true, greaterThan: greaterThan1.greaterThan, values: sets.NewString("2")}))
Expect(notIn12.Intersection(greaterThan9)).To(Equal(&Requirement{Key: "key", complement: true, greaterThan: greaterThan9.greaterThan, values: sets.NewString()}))
Expect(notIn12.Intersection(lessThan1)).To(Equal(&Requirement{Key: "key", complement: true, lessThan: lessThan1.lessThan, values: sets.NewString()}))
Expect(notIn12.Intersection(lessThan9)).To(Equal(&Requirement{Key: "key", complement: true, lessThan: lessThan9.lessThan, values: sets.NewString("1", "2")}))
Expect(greaterThan1.Intersection(exists)).To(Equal(greaterThan1))
Expect(greaterThan1.Intersection(doesNotExist)).To(Equal(doesNotExist))
Expect(greaterThan1.Intersection(inA)).To(Equal(doesNotExist))
Expect(greaterThan1.Intersection(inB)).To(Equal(doesNotExist))
Expect(greaterThan1.Intersection(inAB)).To(Equal(doesNotExist))
Expect(greaterThan1.Intersection(notInA)).To(Equal(greaterThan1))
Expect(greaterThan1.Intersection(in1)).To(Equal(doesNotExist))
Expect(greaterThan1.Intersection(in9)).To(Equal(in9))
Expect(greaterThan1.Intersection(in19)).To(Equal(in9))
Expect(greaterThan1.Intersection(notIn12)).To(Equal(&Requirement{Key: "key", complement: true, greaterThan: greaterThan1.greaterThan, values: sets.NewString("2")}))
Expect(greaterThan1.Intersection(greaterThan1)).To(Equal(greaterThan1))
Expect(greaterThan1.Intersection(greaterThan9)).To(Equal(greaterThan9))
Expect(greaterThan1.Intersection(lessThan1)).To(Equal(doesNotExist))
Expect(greaterThan1.Intersection(lessThan9)).To(Equal(&Requirement{Key: "key", complement: true, greaterThan: greaterThan1.greaterThan, lessThan: lessThan9.lessThan, values: sets.NewString()}))
Expect(greaterThan9.Intersection(exists)).To(Equal(greaterThan9))
Expect(greaterThan9.Intersection(doesNotExist)).To(Equal(doesNotExist))
Expect(greaterThan9.Intersection(inA)).To(Equal(doesNotExist))
Expect(greaterThan9.Intersection(inB)).To(Equal(doesNotExist))
Expect(greaterThan9.Intersection(inAB)).To(Equal(doesNotExist))
Expect(greaterThan9.Intersection(notInA)).To(Equal(greaterThan9))
Expect(greaterThan9.Intersection(in1)).To(Equal(doesNotExist))
Expect(greaterThan9.Intersection(in9)).To(Equal(doesNotExist))
Expect(greaterThan9.Intersection(in19)).To(Equal(doesNotExist))
Expect(greaterThan9.Intersection(notIn12)).To(Equal(greaterThan9))
Expect(greaterThan9.Intersection(greaterThan1)).To(Equal(greaterThan9))
Expect(greaterThan9.Intersection(greaterThan9)).To(Equal(greaterThan9))
Expect(greaterThan9.Intersection(lessThan1)).To(Equal(doesNotExist))
Expect(greaterThan9.Intersection(lessThan9)).To(Equal(doesNotExist))
Expect(lessThan1.Intersection(exists)).To(Equal(lessThan1))
Expect(lessThan1.Intersection(doesNotExist)).To(Equal(doesNotExist))
Expect(lessThan1.Intersection(inA)).To(Equal(doesNotExist))
Expect(lessThan1.Intersection(inB)).To(Equal(doesNotExist))
Expect(lessThan1.Intersection(inAB)).To(Equal(doesNotExist))
Expect(lessThan1.Intersection(notInA)).To(Equal(lessThan1))
Expect(lessThan1.Intersection(in1)).To(Equal(doesNotExist))
Expect(lessThan1.Intersection(in9)).To(Equal(doesNotExist))
Expect(lessThan1.Intersection(in19)).To(Equal(doesNotExist))
Expect(lessThan1.Intersection(notIn12)).To(Equal(lessThan1))
Expect(lessThan1.Intersection(greaterThan1)).To(Equal(doesNotExist))
Expect(lessThan1.Intersection(greaterThan9)).To(Equal(doesNotExist))
Expect(lessThan1.Intersection(lessThan1)).To(Equal(lessThan1))
Expect(lessThan1.Intersection(lessThan9)).To(Equal(lessThan1))
Expect(lessThan9.Intersection(exists)).To(Equal(lessThan9))
Expect(lessThan9.Intersection(doesNotExist)).To(Equal(doesNotExist))
Expect(lessThan9.Intersection(inA)).To(Equal(doesNotExist))
Expect(lessThan9.Intersection(inB)).To(Equal(doesNotExist))
Expect(lessThan9.Intersection(inAB)).To(Equal(doesNotExist))
Expect(lessThan9.Intersection(notInA)).To(Equal(lessThan9))
Expect(lessThan9.Intersection(in1)).To(Equal(in1))
Expect(lessThan9.Intersection(in9)).To(Equal(doesNotExist))
Expect(lessThan9.Intersection(in19)).To(Equal(in1))
Expect(lessThan9.Intersection(notIn12)).To(Equal(&Requirement{Key: "key", complement: true, lessThan: lessThan9.lessThan, values: sets.NewString("1", "2")}))
Expect(lessThan9.Intersection(greaterThan1)).To(Equal(&Requirement{Key: "key", complement: true, greaterThan: greaterThan1.greaterThan, lessThan: lessThan9.lessThan, values: sets.NewString()}))
Expect(lessThan9.Intersection(greaterThan9)).To(Equal(doesNotExist))
Expect(lessThan9.Intersection(lessThan1)).To(Equal(lessThan1))
Expect(lessThan9.Intersection(lessThan9)).To(Equal(lessThan9))
})
})
Context("Has", func() {
It("should have the right values", func() {
Expect(exists.Has("A")).To(BeTrue())
Expect(doesNotExist.Has("A")).To(BeFalse())
Expect(inA.Has("A")).To(BeTrue())
Expect(inB.Has("A")).To(BeFalse())
Expect(inAB.Has("A")).To(BeTrue())
Expect(notInA.Has("A")).To(BeFalse())
Expect(in1.Has("A")).To(BeFalse())
Expect(in9.Has("A")).To(BeFalse())
Expect(in19.Has("A")).To(BeFalse())
Expect(notIn12.Has("A")).To(BeTrue())
Expect(greaterThan1.Has("A")).To(BeFalse())
Expect(greaterThan9.Has("A")).To(BeFalse())
Expect(lessThan1.Has("A")).To(BeFalse())
Expect(lessThan9.Has("A")).To(BeFalse())
Expect(exists.Has("B")).To(BeTrue())
Expect(doesNotExist.Has("B")).To(BeFalse())
Expect(inA.Has("B")).To(BeFalse())
Expect(inB.Has("B")).To(BeTrue())
Expect(inAB.Has("B")).To(BeTrue())
Expect(notInA.Has("B")).To(BeTrue())
Expect(in1.Has("B")).To(BeFalse())
Expect(in9.Has("B")).To(BeFalse())
Expect(in19.Has("B")).To(BeFalse())
Expect(notIn12.Has("B")).To(BeTrue())
Expect(greaterThan1.Has("B")).To(BeFalse())
Expect(greaterThan9.Has("B")).To(BeFalse())
Expect(lessThan1.Has("B")).To(BeFalse())
Expect(lessThan9.Has("B")).To(BeFalse())
Expect(exists.Has("1")).To(BeTrue())
Expect(doesNotExist.Has("1")).To(BeFalse())
Expect(inA.Has("1")).To(BeFalse())
Expect(inB.Has("1")).To(BeFalse())
Expect(inAB.Has("1")).To(BeFalse())
Expect(notInA.Has("1")).To(BeTrue())
Expect(in1.Has("1")).To(BeTrue())
Expect(in9.Has("1")).To(BeFalse())
Expect(in19.Has("1")).To(BeTrue())
Expect(notIn12.Has("1")).To(BeFalse())
Expect(greaterThan1.Has("1")).To(BeFalse())
Expect(greaterThan9.Has("1")).To(BeFalse())
Expect(lessThan1.Has("1")).To(BeFalse())
Expect(lessThan9.Has("1")).To(BeTrue())
Expect(exists.Has("2")).To(BeTrue())
Expect(doesNotExist.Has("2")).To(BeFalse())
Expect(inA.Has("2")).To(BeFalse())
Expect(inB.Has("2")).To(BeFalse())
Expect(inAB.Has("2")).To(BeFalse())
Expect(notInA.Has("2")).To(BeTrue())
Expect(in1.Has("2")).To(BeFalse())
Expect(in9.Has("2")).To(BeFalse())
Expect(in19.Has("2")).To(BeFalse())
Expect(notIn12.Has("2")).To(BeFalse())
Expect(greaterThan1.Has("2")).To(BeTrue())
Expect(greaterThan9.Has("2")).To(BeFalse())
Expect(lessThan1.Has("2")).To(BeFalse())
Expect(lessThan9.Has("2")).To(BeTrue())
Expect(exists.Has("9")).To(BeTrue())
Expect(doesNotExist.Has("9")).To(BeFalse())
Expect(inA.Has("9")).To(BeFalse())
Expect(inB.Has("9")).To(BeFalse())
Expect(inAB.Has("9")).To(BeFalse())
Expect(notInA.Has("9")).To(BeTrue())
Expect(in1.Has("9")).To(BeFalse())
Expect(in9.Has("9")).To(BeTrue())
Expect(in19.Has("9")).To(BeTrue())
Expect(notIn12.Has("9")).To(BeTrue())
Expect(greaterThan1.Has("9")).To(BeTrue())
Expect(greaterThan9.Has("9")).To(BeFalse())
Expect(lessThan1.Has("9")).To(BeFalse())
Expect(lessThan9.Has("9")).To(BeFalse())
})
})
Context("Operator", func() {
It("should return the right operator", func() {
Expect(exists.Operator()).To(Equal(v1.NodeSelectorOpExists))
Expect(doesNotExist.Operator()).To(Equal(v1.NodeSelectorOpDoesNotExist))
Expect(inA.Operator()).To(Equal(v1.NodeSelectorOpIn))
Expect(inB.Operator()).To(Equal(v1.NodeSelectorOpIn))
Expect(inAB.Operator()).To(Equal(v1.NodeSelectorOpIn))
Expect(notInA.Operator()).To(Equal(v1.NodeSelectorOpNotIn))
Expect(in1.Operator()).To(Equal(v1.NodeSelectorOpIn))
Expect(in9.Operator()).To(Equal(v1.NodeSelectorOpIn))
Expect(in19.Operator()).To(Equal(v1.NodeSelectorOpIn))
Expect(notIn12.Operator()).To(Equal(v1.NodeSelectorOpNotIn))
Expect(greaterThan1.Operator()).To(Equal(v1.NodeSelectorOpExists))
Expect(greaterThan9.Operator()).To(Equal(v1.NodeSelectorOpExists))
Expect(lessThan1.Operator()).To(Equal(v1.NodeSelectorOpExists))
Expect(lessThan9.Operator()).To(Equal(v1.NodeSelectorOpExists))
})
})
Context("Len", func() {
It("should have the correct length", func() {
Expect(exists.Len()).To(Equal(math.MaxInt64))
Expect(doesNotExist.Len()).To(Equal(0))
Expect(inA.Len()).To(Equal(1))
Expect(inB.Len()).To(Equal(1))
Expect(inAB.Len()).To(Equal(2))
Expect(notInA.Len()).To(Equal(math.MaxInt64 - 1))
Expect(in1.Len()).To(Equal(1))
Expect(in9.Len()).To(Equal(1))
Expect(in19.Len()).To(Equal(2))
Expect(notIn12.Len()).To(Equal(math.MaxInt64 - 2))
Expect(greaterThan1.Len()).To(Equal(math.MaxInt64))
Expect(greaterThan9.Len()).To(Equal(math.MaxInt64))
Expect(lessThan1.Len()).To(Equal(math.MaxInt64))
Expect(lessThan9.Len()).To(Equal(math.MaxInt64))
})
})
Context("Any", func() {
It("should return any", func() {
Expect(exists.Any()).ToNot(BeEmpty())
Expect(doesNotExist.Any()).To(BeEmpty())
Expect(inA.Any()).To(Equal("A"))
Expect(inB.Any()).To(Equal("B"))
Expect(inAB.Any()).To(Or(Equal("A"), Equal("B")))
Expect(notInA.Any()).ToNot(Or(BeEmpty(), Equal("A")))
Expect(in1.Any()).To(Equal("1"))
Expect(in9.Any()).To(Equal("9"))
Expect(in19.Any()).To(Or(Equal("1"), Equal("9")))
Expect(notIn12.Any()).ToNot(Or(BeEmpty(), Equal("1"), Equal("2")))
Expect(strconv.Atoi(greaterThan1.Any())).To(BeNumerically(">=", 1))
Expect(strconv.Atoi(greaterThan9.Any())).To(And(BeNumerically(">=", 9), BeNumerically("<", math.MaxInt64)))
Expect(lessThan1.Any()).To(Equal("0"))
Expect(strconv.Atoi(lessThan9.Any())).To(And(BeNumerically(">=", 0), BeNumerically("<", 9)))
})
})
Context("String", func() {
It("should print the right string", func() {
Expect(exists.String()).To(Equal("key Exists"))
Expect(doesNotExist.String()).To(Equal("key DoesNotExist"))
Expect(inA.String()).To(Equal("key In [A]"))
Expect(inB.String()).To(Equal("key In [B]"))
Expect(inAB.String()).To(Equal("key In [A B]"))
Expect(notInA.String()).To(Equal("key NotIn [A]"))
Expect(in1.String()).To(Equal("key In [1]"))
Expect(in9.String()).To(Equal("key In [9]"))
Expect(in19.String()).To(Equal("key In [1 9]"))
Expect(notIn12.String()).To(Equal("key NotIn [1 2]"))
Expect(greaterThan1.String()).To(Equal("key Exists >1"))
Expect(greaterThan9.String()).To(Equal("key Exists >9"))
Expect(lessThan1.String()).To(Equal("key Exists <1"))
Expect(lessThan9.String()).To(Equal("key Exists <9"))
Expect(greaterThan1.Intersection(lessThan9).String()).To(Equal("key Exists >1 <9"))
Expect(greaterThan9.Intersection(lessThan1).String()).To(Equal("key DoesNotExist"))
})
})
Context("NodeSelectorRequirements Conversion", func() {
It("should return the expected NodeSelectorRequirement", func() {
Expect(exists.NodeSelectorRequirement()).To(Equal(v1.NodeSelectorRequirement{Key: "key", Operator: v1.NodeSelectorOpExists}))
Expect(doesNotExist.NodeSelectorRequirement()).To(Equal(v1.NodeSelectorRequirement{Key: "key", Operator: v1.NodeSelectorOpDoesNotExist}))
Expect(inA.NodeSelectorRequirement()).To(Equal(v1.NodeSelectorRequirement{Key: "key", Operator: v1.NodeSelectorOpIn, Values: []string{"A"}}))
Expect(inB.NodeSelectorRequirement()).To(Equal(v1.NodeSelectorRequirement{Key: "key", Operator: v1.NodeSelectorOpIn, Values: []string{"B"}}))
Expect(inAB.NodeSelectorRequirement()).To(Equal(v1.NodeSelectorRequirement{Key: "key", Operator: v1.NodeSelectorOpIn, Values: []string{"A", "B"}}))
Expect(notInA.NodeSelectorRequirement()).To(Equal(v1.NodeSelectorRequirement{Key: "key", Operator: v1.NodeSelectorOpNotIn, Values: []string{"A"}}))
Expect(in1.NodeSelectorRequirement()).To(Equal(v1.NodeSelectorRequirement{Key: "key", Operator: v1.NodeSelectorOpIn, Values: []string{"1"}}))
Expect(in9.NodeSelectorRequirement()).To(Equal(v1.NodeSelectorRequirement{Key: "key", Operator: v1.NodeSelectorOpIn, Values: []string{"9"}}))
Expect(in19.NodeSelectorRequirement()).To(Equal(v1.NodeSelectorRequirement{Key: "key", Operator: v1.NodeSelectorOpIn, Values: []string{"1", "9"}}))
Expect(notIn12.NodeSelectorRequirement()).To(Equal(v1.NodeSelectorRequirement{Key: "key", Operator: v1.NodeSelectorOpNotIn, Values: []string{"1", "2"}}))
Expect(greaterThan1.NodeSelectorRequirement()).To(Equal(v1.NodeSelectorRequirement{Key: "key", Operator: v1.NodeSelectorOpGt, Values: []string{"1"}}))
Expect(greaterThan9.NodeSelectorRequirement()).To(Equal(v1.NodeSelectorRequirement{Key: "key", Operator: v1.NodeSelectorOpGt, Values: []string{"9"}}))
Expect(lessThan1.NodeSelectorRequirement()).To(Equal(v1.NodeSelectorRequirement{Key: "key", Operator: v1.NodeSelectorOpLt, Values: []string{"1"}}))
Expect(lessThan9.NodeSelectorRequirement()).To(Equal(v1.NodeSelectorRequirement{Key: "key", Operator: v1.NodeSelectorOpLt, Values: []string{"9"}}))
})
})
})
| 466 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func TestScheduling(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Scheduling")
}
| 28 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"fmt"
"github.com/samber/lo"
"go.uber.org/multierr"
v1 "k8s.io/api/core/v1"
cloudproviderapi "k8s.io/cloud-provider/api"
)
var KnownEphemeralTaints = []v1.Taint{
{Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoSchedule},
{Key: v1.TaintNodeUnreachable, Effect: v1.TaintEffectNoSchedule},
{Key: cloudproviderapi.TaintExternalCloudProvider, Effect: v1.TaintEffectNoSchedule, Value: "true"},
}
// Taints is a decorated alias type for []v1.Taint
type Taints []v1.Taint
// Tolerates returns true if the pod tolerates all taints.
func (ts Taints) Tolerates(pod *v1.Pod) (errs error) {
for i := range ts {
taint := ts[i]
tolerates := false
for _, t := range pod.Spec.Tolerations {
tolerates = tolerates || t.ToleratesTaint(&taint)
}
if !tolerates {
errs = multierr.Append(errs, fmt.Errorf("did not tolerate %s=%s:%s", taint.Key, taint.Value, taint.Effect))
}
}
return errs
}
// Merge merges in taints with the passed in taints.
func (ts Taints) Merge(with Taints) Taints {
res := lo.Map(ts, func(t v1.Taint, _ int) v1.Taint {
return t
})
for _, taint := range with {
if _, ok := lo.Find(res, func(t v1.Taint) bool {
return taint.MatchTaint(&t)
}); !ok {
res = append(res, taint)
}
}
return res
}
| 64 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"context"
"fmt"
"sort"
"github.com/samber/lo"
csitranslation "k8s.io/csi-translation-lib"
"knative.dev/pkg/logging"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/karpenter-core/pkg/utils/pretty"
)
const (
IsDefaultStorageClassAnnotation = "storageclass.kubernetes.io/is-default-class"
)
// translator is a CSI Translator that translates in-tree plugin names to their out-of-tree CSI driver names
var translator = csitranslation.New()
// changeMonitor is a change monitor for global volumeUsage logging
var changeMonitor = pretty.NewChangeMonitor()
// VolumeUsage tracks volume limits on a per node basis. The number of volumes that can be mounted varies by instance
// type. We need to be aware and track the mounted volume usage to inform our awareness of which pods can schedule to
// which nodes.
type VolumeUsage struct {
volumes volumes
podVolumes map[types.NamespacedName]volumes
}
func NewVolumeUsage() *VolumeUsage {
return &VolumeUsage{
volumes: volumes{},
podVolumes: map[types.NamespacedName]volumes{},
}
}
type volumes map[string]sets.String
func (u volumes) Add(provisioner string, pvcID string) {
existing, ok := u[provisioner]
if !ok {
existing = sets.NewString()
u[provisioner] = existing
}
existing.Insert(pvcID)
}
func (u volumes) union(vol volumes) volumes {
cp := volumes{}
for k, v := range u {
cp[k] = sets.NewString(v.List()...)
}
for k, v := range vol {
existing, ok := cp[k]
if !ok {
existing = sets.NewString()
cp[k] = existing
}
existing.Insert(v.List()...)
}
return cp
}
func (u volumes) insert(volumes volumes) {
for k, v := range volumes {
existing, ok := u[k]
if !ok {
existing = sets.NewString()
u[k] = existing
}
existing.Insert(v.List()...)
}
}
func (u volumes) copy() volumes {
cp := volumes{}
for k, v := range u {
cp[k] = sets.NewString(v.List()...)
}
return cp
}
func (v *VolumeUsage) Add(ctx context.Context, kubeClient client.Client, pod *v1.Pod) {
podVolumes, err := v.validate(ctx, kubeClient, pod)
if err != nil {
logging.FromContext(ctx).Errorf("inconsistent state error adding volume, %s, please file an issue", err)
}
v.podVolumes[client.ObjectKeyFromObject(pod)] = podVolumes
v.volumes = v.volumes.union(podVolumes)
}
// VolumeCount stores a mapping between the driver name that provides volumes
// and the number of volumes associated with that driver
type VolumeCount map[string]int
// Exceeds returns true if the volume count exceeds the limits provided. If there is no value for a storage provider, it
// is treated as unlimited.
func (c VolumeCount) Exceeds(limits VolumeCount) bool {
for k, v := range c {
limit, hasLimit := limits[k]
if !hasLimit {
continue
}
if v > limit {
return true
}
}
return false
}
// Fits returns true if the rhs 'fits' within the volume count.
func (c VolumeCount) Fits(rhs VolumeCount) bool {
for k, v := range rhs {
limit, hasLimit := c[k]
if !hasLimit {
continue
}
if v > limit {
return false
}
}
return true
}
func (v *VolumeUsage) Validate(ctx context.Context, kubeClient client.Client, pod *v1.Pod) (VolumeCount, error) {
podVolumes, err := v.validate(ctx, kubeClient, pod)
if err != nil {
return nil, err
}
result := VolumeCount{}
for k, v := range v.volumes.union(podVolumes) {
result[k] += len(v)
}
return result, nil
}
//nolint:gocyclo
func (v *VolumeUsage) validate(ctx context.Context, kubeClient client.Client, pod *v1.Pod) (volumes, error) {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("pod", pod.Name))
podPVCs := volumes{}
defaultStorageClassName, err := v.discoverDefaultStorageClassName(ctx, kubeClient)
if err != nil {
return nil, fmt.Errorf("discovering default storage class, %w", err)
}
for _, volume := range pod.Spec.Volumes {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("volume", volume.Name))
var pvcID string
var storageClassName *string
var volumeName string
var pvc v1.PersistentVolumeClaim
if volume.PersistentVolumeClaim != nil {
if err := kubeClient.Get(ctx, client.ObjectKey{Namespace: pod.Namespace, Name: volume.PersistentVolumeClaim.ClaimName}, &pvc); err != nil {
return nil, err
}
pvcID = fmt.Sprintf("%s/%s", pod.Namespace, volume.PersistentVolumeClaim.ClaimName)
storageClassName = pvc.Spec.StorageClassName
if storageClassName == nil || *storageClassName == "" {
storageClassName = defaultStorageClassName
}
volumeName = pvc.Spec.VolumeName
} else if volume.Ephemeral != nil {
// generated name per https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/#persistentvolumeclaim-naming
pvcID = fmt.Sprintf("%s/%s-%s", pod.Namespace, pod.Name, volume.Name)
storageClassName = volume.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName
if storageClassName == nil || *storageClassName == "" {
storageClassName = defaultStorageClassName
}
volumeName = volume.Ephemeral.VolumeClaimTemplate.Spec.VolumeName
} else {
continue
}
driverName, err := v.resolveDriver(ctx, kubeClient, volumeName, storageClassName)
if err != nil {
return nil, err
}
// might be a non-CSI driver, something we don't currently handle
if driverName != "" {
podPVCs.Add(driverName, pvcID)
}
}
return podPVCs, nil
}
func (v *VolumeUsage) discoverDefaultStorageClassName(ctx context.Context, kubeClient client.Client) (*string, error) {
storageClassList := &storagev1.StorageClassList{}
if err := kubeClient.List(ctx, storageClassList); err != nil {
return nil, err
}
// Find all StorageClasses that have the default annotation
defaults := lo.Filter(storageClassList.Items, func(sc storagev1.StorageClass, _ int) bool {
return sc.Annotations[IsDefaultStorageClassAnnotation] == "true"
})
if len(defaults) == 0 {
return nil, nil
}
// Sort the default StorageClasses by timestamp and take the newest one
// https://github.com/kubernetes/kubernetes/pull/110559
sort.Slice(defaults, func(i, j int) bool {
return defaults[i].CreationTimestamp.After(defaults[j].CreationTimestamp.Time)
})
return lo.ToPtr(defaults[0].Name), nil
}
// resolveDriver resolves the storage driver name in the following order:
// 1. If the PV associated with the pod volume is using CSI.driver in its spec, then use that name
// 2. If the StorageClass associated with the PV has a Provisioner
func (v *VolumeUsage) resolveDriver(ctx context.Context, kubeClient client.Client, volumeName string, storageClassName *string) (string, error) {
// We can track the volume usage by the CSI Driver name which is pulled from the storage class for dynamic
// volumes, or if it's bound/static we can pull the volume name
if volumeName != "" {
driverName, err := v.driverFromVolume(ctx, kubeClient, volumeName)
if err != nil {
return "", err
}
if driverName != "" {
return driverName, nil
}
}
if storageClassName != nil && *storageClassName != "" {
driverName, err := v.driverFromSC(ctx, kubeClient, *storageClassName)
if err != nil {
return "", err
}
if driverName != "" {
return driverName, nil
}
}
// Driver name wasn't able to resolve for this volume. In this case, we just ignore the
// volume and move on to the other volumes that the pod has
return "", nil
}
// driverFromSC resolves the storage driver name by getting the Provisioner name from the StorageClass
func (v *VolumeUsage) driverFromSC(ctx context.Context, kubeClient client.Client, storageClassName string) (string, error) {
var sc storagev1.StorageClass
if err := kubeClient.Get(ctx, client.ObjectKey{Name: storageClassName}, &sc); err != nil {
return "", err
}
// Check if the provisioner name is an in-tree plugin name
if csiName, err := translator.GetCSINameFromInTreeName(sc.Provisioner); err == nil {
if changeMonitor.HasChanged(fmt.Sprintf("sc/%s", storageClassName), nil) {
logging.FromContext(ctx).With("storage-class", sc.Name, "provisioner", sc.Provisioner).Errorf("StorageClass .spec.provisioner uses an in-tree storage plugin which is unsupported by Karpenter and is deprecated by Kubernetes. Scale-ups may fail because Karpenter will not discover driver limits. Create a new StorageClass with a .spec.provisioner referencing the CSI driver plugin name '%s'.", csiName)
}
}
return sc.Provisioner, nil
}
// driverFromVolume resolves the storage driver name by getting the CSI spec from inside the PersistentVolume
func (v *VolumeUsage) driverFromVolume(ctx context.Context, kubeClient client.Client, volumeName string) (string, error) {
var pv v1.PersistentVolume
if err := kubeClient.Get(ctx, client.ObjectKey{Name: volumeName}, &pv); err != nil {
return "", err
}
if pv.Spec.CSI != nil {
return pv.Spec.CSI.Driver, nil
} else if pv.Spec.AWSElasticBlockStore != nil {
if changeMonitor.HasChanged(fmt.Sprintf("pv/%s", pv.Name), nil) {
logging.FromContext(ctx).With("persistent-volume", pv.Name).Errorf("PersistentVolume source 'AWSElasticBlockStore' uses an in-tree storage plugin which is unsupported by Karpenter and is deprecated by Kubernetes. Scale-ups may fail because Karpenter will not discover driver limits. Use a PersistentVolume that references the 'CSI' volume source for Karpenter auto-scaling support.")
}
}
return "", nil
}
func (v *VolumeUsage) DeletePod(key types.NamespacedName) {
delete(v.podVolumes, key)
// volume names could be duplicated, so we re-create our volumes
v.volumes = volumes{}
for _, c := range v.podVolumes {
v.volumes.insert(c)
}
}
func (v *VolumeUsage) DeepCopy() *VolumeUsage {
if v == nil {
return nil
}
out := &VolumeUsage{}
v.DeepCopyInto(out)
return out
}
func (v *VolumeUsage) DeepCopyInto(out *VolumeUsage) {
out.volumes = v.volumes.copy()
out.podVolumes = map[types.NamespacedName]volumes{}
for k, v := range v.podVolumes {
out.podVolumes[k] = v.copy()
}
}
| 312 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"context"
"fmt"
"time"
"github.com/avast/retry-go"
"github.com/samber/lo"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
)
// CacheSyncingClient exists for tests that need to use custom fieldSelectors (thus, they need a client cache)
// and also need consistency in their testing by waiting for caches to sync after performing WRITE operations
// NOTE: This cache sync doesn't sync with third-party operations on the api-server
type CacheSyncingClient struct {
client.Client
}
// If we timeout on polling, the assumption is that the cache updated to a newer version
// and we missed the current WRITE operation that we just performed
var pollingOptions = []retry.Option{
retry.Attempts(100), // This whole poll should take ~1s
retry.Delay(time.Millisecond * 10),
retry.DelayType(retry.FixedDelay),
}
func (c *CacheSyncingClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error {
if err := c.Client.Create(ctx, obj, opts...); err != nil {
return err
}
_ = retry.Do(func() error {
if err := c.Client.Get(ctx, client.ObjectKeyFromObject(obj), obj); err != nil {
return fmt.Errorf("getting object, %w", err)
}
return nil
}, pollingOptions...)
return nil
}
func (c *CacheSyncingClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error {
if err := c.Client.Delete(ctx, obj, opts...); err != nil {
return err
}
_ = retry.Do(func() error {
if err := c.Client.Get(ctx, client.ObjectKeyFromObject(obj), obj); err != nil {
if errors.IsNotFound(err) {
return nil
}
return fmt.Errorf("getting object, %w", err)
}
if !obj.GetDeletionTimestamp().IsZero() {
return nil
}
return fmt.Errorf("object still exists")
}, pollingOptions...)
return nil
}
func (c *CacheSyncingClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error {
if err := c.Client.Update(ctx, obj, opts...); err != nil {
return err
}
_ = retry.Do(func() error {
return objectSynced(ctx, c.Client, obj)
}, pollingOptions...)
return nil
}
func (c *CacheSyncingClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error {
if err := c.Client.Patch(ctx, obj, patch, opts...); err != nil {
return err
}
_ = retry.Do(func() error {
return objectSynced(ctx, c.Client, obj)
}, pollingOptions...)
return nil
}
func (c *CacheSyncingClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error {
options := &client.DeleteAllOfOptions{}
for _, o := range opts {
o.ApplyToDeleteAllOf(options)
}
if err := c.Client.DeleteAllOf(ctx, obj, opts...); err != nil {
return err
}
metaList := &metav1.PartialObjectMetadataList{}
metaList.SetGroupVersionKind(lo.Must(apiutil.GVKForObject(obj, c.Scheme())))
_ = retry.Do(func() error {
listOptions := []client.ListOption{client.Limit(1)}
if options.ListOptions.Namespace != "" {
listOptions = append(listOptions, client.InNamespace(options.ListOptions.Namespace))
}
if err := c.Client.List(ctx, metaList, listOptions...); err != nil {
return fmt.Errorf("listing objects, %w", err)
}
if len(metaList.Items) != 0 {
return fmt.Errorf("objects still exist")
}
return nil
}, pollingOptions...)
return nil
}
func (c *CacheSyncingClient) Status() client.StatusWriter {
return &cacheSyncingStatusWriter{
client: c.Client,
}
}
type cacheSyncingStatusWriter struct {
client client.Client
}
func (c *cacheSyncingStatusWriter) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error {
if err := c.client.Status().Update(ctx, obj, opts...); err != nil {
return err
}
_ = retry.Do(func() error {
return objectSynced(ctx, c.client, obj)
}, pollingOptions...)
return nil
}
func (c *cacheSyncingStatusWriter) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error {
if err := c.client.Status().Patch(ctx, obj, patch, opts...); err != nil {
return err
}
_ = retry.Do(func() error {
return objectSynced(ctx, c.client, obj)
}, pollingOptions...)
return nil
}
func objectSynced(ctx context.Context, c client.Client, obj client.Object) error {
temp := obj.DeepCopyObject().(client.Object)
if err := c.Get(ctx, client.ObjectKeyFromObject(obj), temp); err != nil {
// If the object isn't found, we assume that the cache was synced since the Update operation must have caused
// the object to get completely removed (like a finalizer update)
return client.IgnoreNotFound(fmt.Errorf("getting object, %w", err))
}
if obj.GetResourceVersion() != temp.GetResourceVersion() {
return fmt.Errorf("object hasn't updated")
}
return nil
}
| 166 |
karpenter-core | aws | Go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"fmt"
"github.com/imdario/mergo"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// DaemonSetOptions customizes a DaemonSet.
type DaemonSetOptions struct {
metav1.ObjectMeta
Selector map[string]string
PodOptions PodOptions
}
// DaemonSet creates a test pod with defaults that can be overridden by DaemonSetOptions.
// Overrides are applied in order, with a last write wins semantic.
func DaemonSet(overrides ...DaemonSetOptions) *appsv1.DaemonSet {
options := DaemonSetOptions{}
for _, opts := range overrides {
if err := mergo.Merge(&options, opts, mergo.WithOverride); err != nil {
panic(fmt.Sprintf("Failed to merge daemonset options: %s", err))
}
}
if options.Name == "" {
options.Name = RandomName()
}
if options.Namespace == "" {
options.Namespace = "default"
}
if options.Selector == nil {
options.Selector = map[string]string{"app": options.Name}
}
return &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{Name: options.Name, Namespace: options.Namespace},
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: options.Selector},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{Labels: options.Selector},
Spec: Pod(options.PodOptions).Spec,
},
},
}
}
| 63 |
Subsets and Splits