repo_name
stringlengths 1
52
| repo_creator
stringclasses 6
values | programming_language
stringclasses 4
values | code
stringlengths 0
9.68M
| num_lines
int64 1
234k
|
---|---|---|---|---|
eks-anywhere | aws | Go | package config_test
import (
"testing"
"github.com/aws/eks-anywhere/pkg/config"
)
func TestNewVsphereUserConfig(t *testing.T) {
wantUsername := "FOO"
wantPassword := "BAR"
wantEnv := map[string]string{
config.EksavSphereUsernameKey: wantUsername,
config.EksavSpherePasswordKey: wantPassword,
config.EksavSphereCPUsernameKey: "",
config.EksavSphereCPPasswordKey: "",
}
for k, v := range wantEnv {
t.Setenv(k, v)
}
vusc := config.NewVsphereUserConfig()
if vusc.EksaVsphereUsername != wantUsername {
t.Fatalf("vusc.EksaVsphereUsername = %s, want %s", vusc.EksaVsphereUsername, wantUsername)
}
if vusc.EksaVsphereCPUsername != wantUsername {
t.Fatalf("vusc.EksaVsphereCPUsername = %s, want %s", vusc.EksaVsphereCPUsername, wantUsername)
}
if vusc.EksaVspherePassword != wantPassword {
t.Fatalf("vusc.EksaVspherePassword = %s, want %s", vusc.EksaVspherePassword, wantPassword)
}
if vusc.EksaVsphereCPPassword != wantPassword {
t.Fatalf("vusc.EksaVsphereCPPassword = %s, want %s", vusc.EksaVsphereCPPassword, wantPassword)
}
}
| 37 |
eks-anywhere | aws | Go | package constants
// Namespace constants.
const (
EksaSystemNamespace = "eksa-system"
EksaDiagnosticsNamespace = "eksa-diagnostics"
EksaControllerManagerDeployment = "eksa-controller-manager"
CapdSystemNamespace = "capd-system"
CapcSystemNamespace = "capc-system"
CapiKubeadmBootstrapSystemNamespace = "capi-kubeadm-bootstrap-system"
CapiKubeadmControlPlaneSystemNamespace = "capi-kubeadm-control-plane-system"
CapiSystemNamespace = "capi-system"
CapiWebhookSystemNamespace = "capi-webhook-system"
CapvSystemNamespace = "capv-system"
CaptSystemNamespace = "capt-system"
CapaSystemNamespace = "capa-system"
CapasSystemNamespace = "capas-system"
CapxSystemNamespace = "capx-system"
CertManagerNamespace = "cert-manager"
DefaultNamespace = "default"
EtcdAdmBootstrapProviderSystemNamespace = "etcdadm-bootstrap-provider-system"
EtcdAdmControllerSystemNamespace = "etcdadm-controller-system"
KubeNodeLeaseNamespace = "kube-node-lease"
KubePublicNamespace = "kube-public"
KubeSystemNamespace = "kube-system"
LocalPathStorageNamespace = "local-path-storage"
EtcdAdmBootstrapProviderName = "bootstrap-etcdadm-bootstrap"
EtcdadmControllerProviderName = "bootstrap-etcdadm-controller"
DefaultHttpsPort = "443"
DefaultWorkerNodeGroupName = "md-0"
DefaultNodeCidrMaskSize = 24
VSphereProviderName = "vsphere"
DockerProviderName = "docker"
AWSProviderName = "aws"
SnowProviderName = "snow"
TinkerbellProviderName = "tinkerbell"
CloudStackProviderName = "cloudstack"
NutanixProviderName = "nutanix"
// DefaultNutanixPrismCentralPort is the default port for Nutanix Prism Central.
DefaultNutanixPrismCentralPort = 9440
VSphereCredentialsName = "vsphere-credentials"
NutanixCredentialsName = "nutanix-credentials"
EksaLicenseName = "eksa-license"
EksaPackagesName = "eksa-packages"
CloudstackAnnotationSuffix = "cloudstack.anywhere.eks.amazonaws.com/v1alpha1"
FailureDomainLabelName = "cluster.x-k8s.io/failure-domain"
// CloudstackFailureDomainPlaceholder Provider specific keywork placeholder.
CloudstackFailureDomainPlaceholder = "ds.meta_data.failuredomain"
// DefaultCoreEKSARegistry is the default registry for eks-a core artifacts.
DefaultCoreEKSARegistry = "public.ecr.aws"
// DefaultCuratedPackagesRegistryRegex matches the default registry for curated packages in all regions.
DefaultCuratedPackagesRegistryRegex = "783794618700.dkr.ecr.*.amazonaws.com"
// Provider specific env vars.
VSphereUsernameKey = "VSPHERE_USERNAME"
VSpherePasswordKey = "VSPHERE_PASSWORD"
GovcUsernameKey = "GOVC_USERNAME"
GovcPasswordKey = "GOVC_PASSWORD"
SnowCredentialsKey = "AWS_B64ENCODED_CREDENTIALS"
SnowCertsKey = "AWS_B64ENCODED_CA_BUNDLES"
NutanixUsernameKey = "NUTANIX_USER"
NutanixPasswordKey = "NUTANIX_PASSWORD"
EksaNutanixUsernameKey = "EKSA_NUTANIX_USERNAME"
EksaNutanixPasswordKey = "EKSA_NUTANIX_PASSWORD"
RegistryUsername = "REGISTRY_USERNAME"
RegistryPassword = "REGISTRY_PASSWORD"
SecretKind = "Secret"
ConfigMapKind = "ConfigMap"
ClusterResourceSetKind = "ClusterResourceSet"
BottlerocketDefaultUser = "ec2-user"
UbuntuDefaultUser = "capv"
)
type Operation int
const (
Create Operation = 0
Upgrade Operation = 1
Delete Operation = 2
)
| 89 |
eks-anywhere | aws | Go | package controller
import (
"context"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/constants"
)
// GetCAPICluster reads a cluster-api Cluster for an eks-a cluster using a kube client
// If the CAPI cluster is not found, the method returns (nil, nil).
func GetCAPICluster(ctx context.Context, client client.Client, cluster *anywherev1.Cluster) (*clusterv1.Cluster, error) {
capiClusterName := clusterapi.ClusterName(cluster)
capiCluster := &clusterv1.Cluster{}
key := types.NamespacedName{Namespace: constants.EksaSystemNamespace, Name: capiClusterName}
err := client.Get(ctx, key, capiCluster)
if apierrors.IsNotFound(err) {
return nil, nil
}
if err != nil {
return nil, err
}
return capiCluster, nil
}
// CapiClusterObjectKey generates an ObjectKey for the CAPI cluster owned by
// the provided eks-a cluster.
func CapiClusterObjectKey(cluster *anywherev1.Cluster) client.ObjectKey {
// TODO: we should consider storing a reference to the CAPI cluster in the eksa cluster status
return client.ObjectKey{
Name: clusterapi.ClusterName(cluster),
Namespace: constants.EksaSystemNamespace,
}
}
// GetKubeadmControlPlane reads a cluster-api KubeadmControlPlane for an eks-a cluster using a kube client
// If the KubeadmControlPlane is not found, the method returns (nil, nil).
func GetKubeadmControlPlane(ctx context.Context, client client.Client, cluster *anywherev1.Cluster) (*controlplanev1.KubeadmControlPlane, error) {
kubeadmControlPlane, err := KubeadmControlPlane(ctx, client, cluster)
if apierrors.IsNotFound(err) {
return nil, nil
}
if err != nil {
return nil, err
}
return kubeadmControlPlane, nil
}
// KubeadmControlPlane reads a cluster-api KubeadmControlPlane for an eks-a cluster using a kube client.
func KubeadmControlPlane(ctx context.Context, client client.Client, cluster *anywherev1.Cluster) (*controlplanev1.KubeadmControlPlane, error) {
kubeadmControlPlane := &controlplanev1.KubeadmControlPlane{}
if err := client.Get(ctx, CAPIKubeadmControlPlaneKey(cluster), kubeadmControlPlane); err != nil {
return nil, err
}
return kubeadmControlPlane, nil
}
// CAPIKubeadmControlPlaneKey generates an ObjectKey for the CAPI Kubeadm control plane owned by
// the provided eks-a cluster.
func CAPIKubeadmControlPlaneKey(cluster *anywherev1.Cluster) client.ObjectKey {
return client.ObjectKey{
Name: clusterapi.KubeadmControlPlaneName(cluster),
Namespace: constants.EksaSystemNamespace,
}
}
// GetMachineDeployment reads a cluster-api MachineDeployment for an eks-a cluster using a kube client.
// If the MachineDeployment is not found, the method returns (nil, nil).
func GetMachineDeployment(ctx context.Context, client client.Client, machineDeploymentName string) (*clusterv1.MachineDeployment, error) {
machineDeployment := &clusterv1.MachineDeployment{}
key := types.NamespacedName{Namespace: constants.EksaSystemNamespace, Name: machineDeploymentName}
err := client.Get(ctx, key, machineDeployment)
if apierrors.IsNotFound(err) {
return nil, nil
}
if err != nil {
return nil, err
}
return machineDeployment, nil
}
// GetMachineDeployments reads all of cluster-api MachineDeployment for an eks-a cluster using a kube client.
func GetMachineDeployments(ctx context.Context, c client.Client, cluster *anywherev1.Cluster) ([]clusterv1.MachineDeployment, error) {
machineDeployments := &clusterv1.MachineDeploymentList{}
err := c.List(ctx, machineDeployments, client.MatchingLabels{clusterv1.ClusterNameLabel: cluster.Name}, client.InNamespace(constants.EksaSystemNamespace))
if err != nil {
return nil, err
}
return machineDeployments.Items, nil
}
| 105 |
eks-anywhere | aws | Go | package controller_test
import (
"context"
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
_ "github.com/aws/eks-anywhere/internal/test/envtest"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/controller"
)
func TestGetCAPIClusterSuccess(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
capiCluster := capiCluster()
client := fake.NewClientBuilder().WithObjects(eksaCluster, capiCluster).Build()
g.Expect(controller.GetCAPICluster(ctx, client, eksaCluster)).To(Equal(capiCluster))
}
func TestGetCAPIClusterNoCluster(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
client := fake.NewClientBuilder().WithObjects(eksaCluster).Build()
g.Expect(controller.GetCAPICluster(ctx, client, eksaCluster)).To(BeNil())
}
func TestGetCAPIClusterError(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
// This should make the client fail because CRDs are not registered
client := fake.NewClientBuilder().WithScheme(runtime.NewScheme()).Build()
_, err := controller.GetCAPICluster(ctx, client, eksaCluster)
g.Expect(err).To(MatchError(ContainSubstring("no kind is registered for the type")))
}
func TestGetKubeadmControlPlaneSuccess(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
kubeadmControlPlane := kubeadmControlPlane()
client := fake.NewClientBuilder().WithObjects(eksaCluster, kubeadmControlPlane).Build()
g.Expect(controller.GetKubeadmControlPlane(ctx, client, eksaCluster)).To(Equal(kubeadmControlPlane))
}
func TestGetKubeadmControlPlaneMissingKubeadmControlPlane(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
client := fake.NewClientBuilder().WithObjects(eksaCluster).Build()
g.Expect(controller.GetKubeadmControlPlane(ctx, client, eksaCluster)).To(BeNil())
}
func TestGetKubeadmControlPlaneError(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
// This should make the client fail because CRDs are not registered
client := fake.NewClientBuilder().WithScheme(runtime.NewScheme()).Build()
_, err := controller.GetKubeadmControlPlane(ctx, client, eksaCluster)
g.Expect(err).To(MatchError(ContainSubstring("no kind is registered for the type")))
}
func TestGetMachineDeploymentSuccess(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
machineDeployment := machineDeployment()
client := fake.NewClientBuilder().WithObjects(eksaCluster, machineDeployment).Build()
g.Expect(controller.GetMachineDeployment(ctx, client, "my-cluster")).To(Equal(machineDeployment))
}
func TestGetMachineDeploymentMissingMD(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
client := fake.NewClientBuilder().WithObjects(eksaCluster).Build()
g.Expect(controller.GetMachineDeployment(ctx, client, "test")).To(BeNil())
}
func TestGetMachineDeploymentsSuccess(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
md1 := machineDeployment()
md1.Name = "md-1"
md1.Labels = map[string]string{
clusterv1.ClusterNameLabel: eksaCluster.Name,
}
md2 := md1.DeepCopy()
md2.Name = "md-2"
client := fake.NewClientBuilder().WithObjects(eksaCluster, md1, md2).Build()
g.Expect(controller.GetMachineDeployments(ctx, client, eksaCluster)).To(Equal([]clusterv1.MachineDeployment{*md1, *md2}))
}
func TestGetMachineDeploymentsMachineDeploymentsInDifferentClusters(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
machineDeployment1 := machineDeployment()
machineDeployment1.Name = "md-1"
machineDeployment1.Labels = map[string]string{
clusterv1.ClusterNameLabel: eksaCluster.Name,
}
machineDeployment2 := machineDeployment()
machineDeployment2.Name = "md-2"
machineDeployment2.Labels = map[string]string{
clusterv1.ClusterNameLabel: "other-cluster",
}
client := fake.NewClientBuilder().WithObjects(eksaCluster, machineDeployment1, machineDeployment2).Build()
g.Expect(controller.GetMachineDeployments(ctx, client, eksaCluster)).To(Equal([]clusterv1.MachineDeployment{*machineDeployment1}))
}
func TestGetMachineDeploymentsError(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
// This should make the client fail because CRDs are not registered
client := fake.NewClientBuilder().WithScheme(runtime.NewScheme()).Build()
_, err := controller.GetMachineDeployments(ctx, client, eksaCluster)
g.Expect(err).To(MatchError(ContainSubstring("no kind is registered for the type")))
}
func TestGetMachineDeploymentError(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
// This should make the client fail because CRDs are not registered
client := fake.NewClientBuilder().WithScheme(runtime.NewScheme()).Build()
_, err := controller.GetMachineDeployment(ctx, client, "test")
g.Expect(err).To(MatchError(ContainSubstring("no kind is registered for the type")))
}
func TestGetCapiClusterObjectKey(t *testing.T) {
g := NewWithT(t)
eksaCluster := eksaCluster()
expected := types.NamespacedName{
Name: "my-cluster",
Namespace: "eksa-system",
}
key := controller.CapiClusterObjectKey(eksaCluster)
g.Expect(key).To(Equal(expected))
}
func eksaCluster() *anywherev1.Cluster {
return &anywherev1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
},
}
}
func capiCluster() *clusterv1.Cluster {
return &clusterv1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
APIVersion: clusterv1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "eksa-system",
},
}
}
func kubeadmControlPlane() *controlplanev1.KubeadmControlPlane {
return &controlplanev1.KubeadmControlPlane{
TypeMeta: metav1.TypeMeta{
Kind: "KubeadmControlPlane",
APIVersion: controlplanev1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "eksa-system",
},
}
}
func machineDeployment() *clusterv1.MachineDeployment {
return &clusterv1.MachineDeployment{
TypeMeta: metav1.TypeMeta{
Kind: "MachineDeployment",
APIVersion: clusterv1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "eksa-system",
},
}
}
| 223 |
eks-anywhere | aws | Go | package controller
import (
"time"
ctrl "sigs.k8s.io/controller-runtime"
)
// Result represents the result of a reconciliation
// It allows to express intent for a reconciliation interruption
// without necessarily requeueing the request.
type Result struct {
Result *ctrl.Result
}
// ToCtrlResult converts Result to a controller-runtime result.
func (r Result) ToCtrlResult() ctrl.Result {
if r.Result == nil {
return ctrl.Result{}
}
return *r.Result
}
// Return evaluates the intent of a Result to interrupt the reconciliation
// process or not.
func (r *Result) Return() bool {
return r.Result != nil
}
// ResultWithReturn creates a new Result that interrupts the reconciliation
// without requeueing.
func ResultWithReturn() Result {
return Result{Result: &ctrl.Result{}}
}
// ResultWithReturn creates a new Result that requeues the request after
// the provided duration.
func ResultWithRequeue(after time.Duration) Result {
return Result{Result: &ctrl.Result{RequeueAfter: after}}
}
| 42 |
eks-anywhere | aws | Go | package controller_test
import (
"testing"
"time"
. "github.com/onsi/gomega"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/aws/eks-anywhere/pkg/controller"
)
func TestResultToCtrlResult(t *testing.T) {
tests := []struct {
name string
in controller.Result
want ctrl.Result
}{
{
name: "no result",
in: controller.Result{},
want: ctrl.Result{},
},
{
name: "requeue result",
in: controller.Result{
Result: &ctrl.Result{
Requeue: true,
},
},
want: ctrl.Result{
Requeue: true,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.in.ToCtrlResult()).To(Equal(tt.want))
})
}
}
func TestResultToCtrlReturn(t *testing.T) {
tests := []struct {
name string
in controller.Result
want bool
}{
{
name: "no return",
in: controller.Result{},
want: false,
},
{
name: "return",
in: controller.Result{
Result: &ctrl.Result{
Requeue: true,
},
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.in.Return()).To(Equal(tt.want))
})
}
}
func TestResultWithReturn(t *testing.T) {
g := NewWithT(t)
r := controller.ResultWithReturn()
g.Expect(r.Return()).To(BeTrue())
g.Expect(r.ToCtrlResult().Requeue).To(BeFalse())
}
func ResultWithRequeue(t *testing.T) {
g := NewWithT(t)
r := controller.ResultWithRequeue(2 * time.Second)
g.Expect(r.Return()).To(BeTrue())
g.Expect(r.ToCtrlResult().RequeueAfter).To(Equal(2 * time.Second))
}
| 86 |
eks-anywhere | aws | Go | package controller
import (
"context"
"github.com/go-logr/logr"
)
// Phase represents a generic reconciliation phase for a cluster spec.
type Phase[O any] func(ctx context.Context, log logr.Logger, obj O) (Result, error)
// PhaseRunner allows to execute Phases in order.
type PhaseRunner[O any] struct {
phases []Phase[O]
}
// NewPhaseRunner creates a new PhaseRunner without any Phases.
func NewPhaseRunner[O any]() PhaseRunner[O] {
return PhaseRunner[O]{}
}
// Register adds a phase to the runnner.
func (r PhaseRunner[O]) Register(phases ...Phase[O]) PhaseRunner[O] {
r.phases = append(r.phases, phases...)
return r
}
// Run will execute phases in the order they were registered until a phase
// returns an error or a Result that requests to an interruption.
func (r PhaseRunner[O]) Run(ctx context.Context, log logr.Logger, obj O) (Result, error) {
for _, p := range r.phases {
if r, err := p(ctx, log, obj); r.Return() {
return r, nil
} else if err != nil {
return Result{}, err
}
}
return Result{}, nil
}
| 41 |
eks-anywhere | aws | Go | package controller_test
import (
"context"
"errors"
"testing"
"time"
"github.com/go-logr/logr"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/controller"
)
func TestPhaseRunnerRunError(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
phase1 := newPhase()
phase3 := newPhase()
r := controller.NewPhaseRunner[*cluster.Spec]().Register(
phase1.run,
phaseReturnError,
phase3.run,
)
_, err := r.Run(ctx, test.NewNullLogger(), &cluster.Spec{})
g.Expect(err).To(HaveOccurred())
g.Expect(phase1.executed).To(BeTrue())
g.Expect(phase3.executed).To(BeFalse())
}
func TestPhaseRunnerRunRequeue(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
phase1 := newPhase()
phase3 := newPhase()
r := controller.NewPhaseRunner[*cluster.Spec]().Register(
phase1.run,
phaseReturnRequeue,
phase3.run,
)
result, err := r.Run(ctx, test.NewNullLogger(), &cluster.Spec{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(phase1.executed).To(BeTrue())
g.Expect(phase3.executed).To(BeFalse())
g.Expect(result.ToCtrlResult().RequeueAfter).To(Equal(1 * time.Second))
}
func TestPhaseRunnerRunAllPhasesFinished(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
phase1 := newPhase()
phase2 := newPhase()
phase3 := newPhase()
r := controller.NewPhaseRunner[*cluster.Spec]().Register(
phase1.run,
phase2.run,
phase3.run,
)
result, err := r.Run(ctx, test.NewNullLogger(), &cluster.Spec{})
g.Expect(err).NotTo(HaveOccurred())
g.Expect(phase1.executed).To(BeTrue())
g.Expect(phase2.executed).To(BeTrue())
g.Expect(phase3.executed).To(BeTrue())
g.Expect(result.Result).To(BeNil())
}
func newPhase() *phase {
return &phase{}
}
type phase struct {
executed bool
}
func (p *phase) run(ctx context.Context, log logr.Logger, clusterSpec *cluster.Spec) (controller.Result, error) {
p.executed = true
return controller.Result{}, nil
}
func phaseReturnError(ctx context.Context, log logr.Logger, clusterSpec *cluster.Spec) (controller.Result, error) {
return controller.Result{}, errors.New("running phase")
}
func phaseReturnRequeue(ctx context.Context, log logr.Logger, clusterSpec *cluster.Spec) (controller.Result, error) {
return controller.ResultWithRequeue(1 * time.Second), nil
}
| 92 |
eks-anywhere | aws | Go | package clientutil
import "sigs.k8s.io/controller-runtime/pkg/client"
// AddAnnotation adds an annotation to the given object.
// If the annotation already exists, it overwrites its value.
func AddAnnotation(o client.Object, key, value string) {
a := o.GetAnnotations()
if a == nil {
a = make(map[string]string, 1)
}
a[key] = value
o.SetAnnotations(a)
}
// AddLabel adds a label to the given object.
// If the label already exists, it overwrites its value.
func AddLabel(o client.Object, key, value string) {
l := o.GetLabels()
if l == nil {
l = make(map[string]string, 1)
}
l[key] = value
o.SetLabels(l)
}
| 26 |
eks-anywhere | aws | Go | package clientutil_test
import (
"testing"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/eks-anywhere/pkg/controller/clientutil"
)
func TestAddAnnotation(t *testing.T) {
tests := []struct {
name string
obj client.Object
key, value string
}{
{
name: "empty annotations",
obj: &corev1.ConfigMap{},
key: "my-annotation",
value: "my-value",
},
{
name: "non empty annotations",
obj: &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
"a": "b",
},
},
},
key: "my-annotation",
value: "my-value",
},
{
name: "annotation present same value",
obj: &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
"my-annotation": "my-value",
},
},
},
key: "my-annotation",
value: "my-value",
},
{
name: "annotation present diff value",
obj: &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
"my-annotation": "other-value",
},
},
},
key: "my-annotation",
value: "my-value",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
clientutil.AddAnnotation(tt.obj, tt.key, tt.value)
g.Expect(tt.obj.GetAnnotations()).To(HaveKeyWithValue(tt.key, tt.value))
})
}
}
func TestAddLabel(t *testing.T) {
tests := []struct {
name string
obj client.Object
key, value string
}{
{
name: "empty labels",
obj: &corev1.ConfigMap{},
key: "my-label",
value: "my-value",
},
{
name: "non empty labels",
obj: &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"a": "b",
},
},
},
key: "my-label",
value: "my-value",
},
{
name: "label present same value",
obj: &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"my-label": "my-value",
},
},
},
key: "my-label",
value: "my-value",
},
{
name: "label present diff value",
obj: &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"my-label": "other-value",
},
},
},
key: "my-label",
value: "my-value",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
clientutil.AddLabel(tt.obj, tt.key, tt.value)
g.Expect(tt.obj.GetLabels()).To(HaveKeyWithValue(tt.key, tt.value))
})
}
}
| 129 |
eks-anywhere | aws | Go | package clientutil
import (
"context"
"github.com/pkg/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func DeleteYaml(ctx context.Context, c client.Client, yaml []byte) error {
objs, err := YamlToClientObjects(yaml)
if err != nil {
return err
}
return deleteObjects(ctx, c, objs)
}
func deleteObjects(ctx context.Context, c client.Client, objs []client.Object) error {
for _, o := range objs {
if err := deleteObject(ctx, c, o); err != nil {
return err
}
}
return nil
}
func deleteObject(ctx context.Context, c client.Client, obj client.Object) error {
if err := c.Delete(ctx, obj); err != nil {
return errors.Wrapf(err, "deleting object %s, %s/%s", obj.GetObjectKind().GroupVersionKind(), obj.GetNamespace(), obj.GetName())
}
return nil
}
| 36 |
eks-anywhere | aws | Go | package clientutil_test
import (
"context"
"testing"
. "github.com/onsi/gomega"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterapiv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/aws/eks-anywhere/pkg/controller/clientutil"
)
func TestDeleteYamlSuccess(t *testing.T) {
tests := []struct {
name string
initialObjs []client.Object
yaml []byte
}{
{
name: "delete single object",
initialObjs: []client.Object{
cluster("cluster-1"),
},
yaml: []byte(`apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: cluster-1
namespace: default
spec:
controlPlaneEndpoint:
host: 1.1.1.1
port: 8080`),
},
{
name: "delete multiple objects",
yaml: []byte(`apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: cluster-1
namespace: default
spec:
paused: true
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: cluster-2
namespace: default
spec:
paused: true`),
initialObjs: []client.Object{
cluster("cluster-1"),
cluster("cluster-2"),
},
},
}
ctx := context.Background()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
c := fake.NewClientBuilder().WithObjects(tt.initialObjs...).Build()
g.Expect(clientutil.DeleteYaml(ctx, c, tt.yaml)).To(Succeed(), "Failed to delete with DeleteYaml()")
for _, o := range tt.initialObjs {
key := client.ObjectKey{
Namespace: "default",
Name: o.GetName(),
}
cluster := &clusterapiv1.Cluster{}
err := c.Get(ctx, key, cluster)
g.Expect(apierrors.IsNotFound(err)).To(BeTrue(), "Object should have been deleted")
}
})
}
}
func cluster(name string) *clusterapiv1.Cluster {
c := &clusterapiv1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
APIVersion: clusterapiv1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: "default",
},
}
return c
}
func TestDeleteYamlError(t *testing.T) {
tests := []struct {
name string
yaml []byte
wantErr string
}{
{
name: "invalid yaml",
yaml: []byte(`x`),
wantErr: "error unmarshaling JSON",
},
{
name: "error deleting",
yaml: []byte(`apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: cluster-1
namespace: default
spec:
paused: true`),
wantErr: "deleting object cluster.x-k8s.io/v1beta1, Kind=Cluster, default/cluster-1",
},
}
ctx := context.Background()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
c := fake.NewClientBuilder().Build()
g.Expect(clientutil.DeleteYaml(ctx, c, tt.yaml)).To(MatchError(ContainSubstring(tt.wantErr)))
})
}
}
| 131 |
eks-anywhere | aws | Go | package clientutil
import (
"context"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
)
// Kubeclient implements kubernetes.Client interface using a
// client.Client as the underlying implementation.
type KubeClient struct {
client client.Client
}
func NewKubeClient(client client.Client) *KubeClient {
return &KubeClient{
client: client,
}
}
// Get retrieves an obj for the given name and namespace from the Kubernetes Cluster.
func (c *KubeClient) Get(ctx context.Context, name, namespace string, obj kubernetes.Object) error {
return c.client.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, obj)
}
// List retrieves list of objects. On a successful call, Items field
// in the list will be populated with the result returned from the server.
func (c *KubeClient) List(ctx context.Context, list kubernetes.ObjectList) error {
return c.client.List(ctx, list)
}
// Create saves the object obj in the Kubernetes cluster.
func (c *KubeClient) Create(ctx context.Context, obj kubernetes.Object) error {
return c.client.Create(ctx, obj)
}
// Update updates the given obj in the Kubernetes cluster.
func (c *KubeClient) Update(ctx context.Context, obj kubernetes.Object) error {
return c.client.Update(ctx, obj)
}
// Delete deletes the given obj from Kubernetes cluster.
func (c *KubeClient) Delete(ctx context.Context, obj kubernetes.Object) error {
return c.client.Delete(ctx, obj)
}
// DeleteAllOf deletes all objects of the given type matching the given options.
func (c *KubeClient) DeleteAllOf(ctx context.Context, obj kubernetes.Object, opts ...kubernetes.DeleteAllOfOption) error {
o := &kubernetes.DeleteAllOfOptions{}
for _, opt := range opts {
opt.ApplyToDeleteAllOf(o)
}
clientOptions := &client.DeleteAllOfOptions{}
clientOptions.LabelSelector = labels.SelectorFromValidatedSet(o.HasLabels)
clientOptions.Namespace = o.Namespace
return c.client.DeleteAllOf(ctx, obj, clientOptions)
}
| 63 |
eks-anywhere | aws | Go | package clientutil_test
import (
"context"
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/aws/eks-anywhere/internal/test/envtest"
_ "github.com/aws/eks-anywhere/internal/test/envtest"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/controller/clientutil"
)
func TestKubeClientGet(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
cluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "default",
},
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.GroupVersion.String(),
},
}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(cluster).Build()
client := clientutil.NewKubeClient(cl)
receiveCluster := &anywherev1.Cluster{}
g.Expect(client.Get(ctx, "my-cluster", "default", receiveCluster)).To(Succeed())
g.Expect(receiveCluster).To(Equal(cluster))
}
func TestKubeClientGetNotFound(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects().Build()
client := clientutil.NewKubeClient(cl)
receiveCluster := &anywherev1.Cluster{}
g.Expect(client.Get(ctx, "my-cluster", "default", receiveCluster)).Error()
}
func TestKubeClientList(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
cluster1 := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "default",
},
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.GroupVersion.String(),
},
}
cluster2 := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster-2",
Namespace: "default",
},
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.GroupVersion.String(),
},
}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(cluster1, cluster2).Build()
client := clientutil.NewKubeClient(cl)
receiveClusters := &anywherev1.ClusterList{}
g.Expect(client.List(ctx, receiveClusters)).To(Succeed())
g.Expect(receiveClusters.Items).To(ConsistOf(*cluster1, *cluster2))
}
func TestKubeClientCreate(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
cluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "default",
},
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.GroupVersion.String(),
},
}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects().Build()
client := clientutil.NewKubeClient(cl)
g.Expect(client.Create(ctx, cluster)).To(Succeed())
api := envtest.NewAPIExpecter(t, cl)
api.ShouldEventuallyExist(ctx, cluster)
}
func TestKubeClientUpdate(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
cluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "default",
},
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.GroupVersion.String(),
},
}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(cluster).Build()
client := clientutil.NewKubeClient(cl)
updatedCluster := cluster.DeepCopy()
updatedCluster.Spec.KubernetesVersion = anywherev1.Kube126
g.Expect(client.Update(ctx, updatedCluster)).To(Succeed())
api := envtest.NewAPIExpecter(t, cl)
api.ShouldEventuallyMatch(ctx, cluster, func(g Gomega) {
g.Expect(cluster).To(BeComparableTo(updatedCluster))
})
}
func TestKubeClientDelete(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
cluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "default",
},
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.GroupVersion.String(),
},
}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(cluster).Build()
client := clientutil.NewKubeClient(cl)
g.Expect(client.Delete(ctx, cluster)).To(Succeed())
api := envtest.NewAPIExpecter(t, cl)
api.ShouldEventuallyNotExist(ctx, cluster)
}
func TestKubeClientDeleteAllOf(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
cluster1 := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "default",
},
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.GroupVersion.String(),
},
}
cluster2 := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster-2",
Namespace: "default",
},
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.GroupVersion.String(),
},
}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(cluster1, cluster2).Build()
client := clientutil.NewKubeClient(cl)
opts := &kubernetes.DeleteAllOfOptions{
Namespace: "default",
}
g.Expect(client.DeleteAllOf(ctx, &anywherev1.Cluster{}, opts)).To(Succeed())
api := envtest.NewAPIExpecter(t, cl)
api.ShouldEventuallyNotExist(ctx, cluster1)
api.ShouldEventuallyNotExist(ctx, cluster2)
}
| 194 |
eks-anywhere | aws | Go | package clientutil
import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
func ObjectsToClientObjects[T client.Object](objs []T) []client.Object {
runtimeObjs := make([]client.Object, 0, len(objs))
for _, o := range objs {
runtimeObjs = append(runtimeObjs, o)
}
return runtimeObjs
}
| 15 |
eks-anywhere | aws | Go | package clientutil_test
import (
"testing"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/controller/clientutil"
)
func TestObjectsToClientObjectsKubernetesObjects(t *testing.T) {
g := NewWithT(t)
cm1 := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "cm-1",
},
}
cm2 := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "cm-2",
},
}
objs := []kubernetes.Object{cm1, cm2}
g.Expect(clientutil.ObjectsToClientObjects(objs)).To(ConsistOf(cm1, cm2))
}
func TestObjectsToClientObjectsEnvtestObjects(t *testing.T) {
g := NewWithT(t)
cm1 := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "cm-1",
},
}
cm2 := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "cm-2",
},
}
objs := []client.Object{cm1, cm2}
g.Expect(clientutil.ObjectsToClientObjects(objs)).To(ConsistOf(cm1, cm2))
}
| 48 |
eks-anywhere | aws | Go | package clientutil
import (
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/eks-anywhere/pkg/utils/unstructured"
)
func YamlToClientObjects(yamlObjects []byte) ([]client.Object, error) {
unstructuredObjs, err := unstructured.YamlToUnstructured(yamlObjects)
if err != nil {
return nil, err
}
objs := make([]client.Object, 0, len(unstructuredObjs))
// Use a numbered loop to avoid problems when retrieving the pointer
for i := range unstructuredObjs {
objs = append(objs, &unstructuredObjs[i])
}
return objs, nil
}
| 23 |
eks-anywhere | aws | Go | package clientutil_test
import (
"testing"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/eks-anywhere/pkg/controller/clientutil"
)
func TestYamlToClientObjects(t *testing.T) {
tests := []struct {
name string
yaml []byte
want map[string]client.Object
}{
{
name: "two objects",
yaml: []byte(`apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: cluster-1
namespace: ns-1
spec:
paused: true
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: cluster-2
namespace: ns-1
spec:
controlPlaneEndpoint:
host: 1.1.1.1
port: 8080`),
want: map[string]client.Object{
"cluster-1": &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "cluster.x-k8s.io/v1beta1",
"kind": "Cluster",
"metadata": map[string]interface{}{
"name": "cluster-1",
"namespace": "ns-1",
},
"spec": map[string]interface{}{
"paused": true,
},
},
},
"cluster-2": &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "cluster.x-k8s.io/v1beta1",
"kind": "Cluster",
"metadata": map[string]interface{}{
"name": "cluster-2",
"namespace": "ns-1",
},
"spec": map[string]interface{}{
"controlPlaneEndpoint": map[string]interface{}{
"host": "1.1.1.1",
"port": float64(8080),
},
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
got, err := clientutil.YamlToClientObjects(tt.yaml)
g.Expect(err).To(BeNil(), "YamlToClientObjects() returned an error")
g.Expect(len(got)).To(Equal(len(tt.want)), "Should have got %d objects", len(tt.want))
for _, obj := range got {
g.Expect(equality.Semantic.DeepDerivative(obj, tt.want[obj.GetName()])).To(BeTrue(), "Returned object %s is not equal to expected object", obj.GetName())
}
})
}
}
| 84 |
eks-anywhere | aws | Go | package clusters
import (
"context"
"time"
"github.com/go-logr/logr"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/controller-runtime/pkg/client"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/controller"
)
// CheckControlPlaneReady is a controller helper to check whether a CAPI cluster CP for
// an eks-a cluster is ready or not. This is intended to be used from cluster reconcilers
// due its signature and that it returns controller results with appropriate wait times whenever
// the cluster is not ready.
func CheckControlPlaneReady(ctx context.Context, client client.Client, log logr.Logger, cluster *anywherev1.Cluster) (controller.Result, error) {
capiCluster, err := controller.GetCAPICluster(ctx, client, cluster)
if err != nil {
return controller.Result{}, err
}
if capiCluster == nil {
log.Info("CAPI cluster does not exist yet, requeuing")
return controller.ResultWithRequeue(5 * time.Second), nil
}
if !conditions.IsTrue(capiCluster, clusterapi.ControlPlaneReadyCondition) {
log.Info("CAPI control plane is not ready yet, requeuing")
// TODO: eventually this can be implemented with controller watches
return controller.ResultWithRequeue(30 * time.Second), nil
}
log.Info("CAPI control plane is ready")
return controller.Result{}, nil
}
| 40 |
eks-anywhere | aws | Go | package clusters_test
import (
"context"
"testing"
"time"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/aws/eks-anywhere/internal/test"
_ "github.com/aws/eks-anywhere/internal/test/envtest"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/controller/clusters"
)
func TestCheckControlPlaneReadyItIsReady(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
capiCluster := capiCluster(func(c *clusterv1.Cluster) {
c.Status.Conditions = clusterv1.Conditions{
{
Type: clusterapi.ControlPlaneReadyCondition,
Status: corev1.ConditionTrue,
},
}
})
client := fake.NewClientBuilder().WithObjects(eksaCluster, capiCluster).Build()
result, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(result).To(Equal(controller.Result{}))
}
func TestCheckControlPlaneReadyNoCluster(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
client := fake.NewClientBuilder().WithObjects(eksaCluster).Build()
result, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(result).To(Equal(
controller.Result{Result: &controllerruntime.Result{RequeueAfter: 5 * time.Second}}),
)
}
func TestCheckControlPlaneReadyNotReady(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
capiCluster := capiCluster()
client := fake.NewClientBuilder().WithObjects(eksaCluster, capiCluster).Build()
result, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(result).To(Equal(
controller.Result{Result: &controllerruntime.Result{RequeueAfter: 30 * time.Second}}),
)
}
func TestCheckControlPlaneReadyErrorReading(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
// This should make the client fail because CRDs are not registered
client := fake.NewClientBuilder().WithScheme(runtime.NewScheme()).Build()
_, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster)
g.Expect(err).To(MatchError(ContainSubstring("no kind is registered for the type")))
}
func eksaCluster() *anywherev1.Cluster {
return &anywherev1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
},
}
}
type capiClusterOpt func(*clusterv1.Cluster)
func capiCluster(opts ...capiClusterOpt) *clusterv1.Cluster {
c := &clusterv1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
APIVersion: clusterv1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "eksa-system",
},
}
for _, opt := range opts {
opt(c)
}
return c
}
| 117 |
eks-anywhere | aws | Go | package clusters
import (
"context"
"reflect"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/controller/clientutil"
"github.com/aws/eks-anywhere/pkg/controller/serverside"
)
// ControlPlane represents a CAPI spec for a kubernetes cluster.
type ControlPlane struct {
Cluster *clusterv1.Cluster
// ProviderCluster is the provider-specific resource that holds the details
// for provisioning the infrastructure, referenced in Cluster.Spec.InfrastructureRef
ProviderCluster client.Object
KubeadmControlPlane *controlplanev1.KubeadmControlPlane
// ControlPlaneMachineTemplate is the provider-specific machine template referenced
// in KubeadmControlPlane.Spec.MachineTemplate.InfrastructureRef
ControlPlaneMachineTemplate client.Object
EtcdCluster *etcdv1.EtcdadmCluster
// EtcdMachineTemplate is the provider-specific machine template referenced
// in EtcdCluster.Spec.InfrastructureTemplate
EtcdMachineTemplate client.Object
// Other includes any other provider-specific objects that need to be reconciled
// as part of the control plane.
Other []client.Object
}
// AllObjects returns all the control plane objects.
func (cp *ControlPlane) AllObjects() []client.Object {
objs := make([]client.Object, 0, 6+len(cp.Other))
objs = append(objs, cp.Cluster, cp.ProviderCluster, cp.KubeadmControlPlane)
if !reflect.ValueOf(cp.ControlPlaneMachineTemplate).IsNil() {
objs = append(objs, cp.ControlPlaneMachineTemplate)
}
if cp.EtcdCluster != nil {
objs = append(objs, cp.EtcdCluster, cp.EtcdMachineTemplate)
}
objs = append(objs, cp.Other...)
return objs
}
// ReconcileControlPlane orchestrates the ControlPlane reconciliation logic.
func ReconcileControlPlane(ctx context.Context, c client.Client, cp *ControlPlane) (controller.Result, error) {
if cp.EtcdCluster == nil {
// For stacked etcd, we don't need orchestration, apply directly
return controller.Result{}, applyAllControlPlaneObjects(ctx, c, cp)
}
cluster := &clusterv1.Cluster{}
err := c.Get(ctx, client.ObjectKeyFromObject(cp.Cluster), cluster)
if apierrors.IsNotFound(err) {
// If the CAPI cluster doesn't exist, this is a new cluster, create all objects
return controller.Result{}, applyAllControlPlaneObjects(ctx, c, cp)
}
if err != nil {
return controller.Result{}, errors.Wrap(err, "reading CAPI cluster")
}
externalEtcdNamespace := cluster.Spec.ManagedExternalEtcdRef.Namespace
// This can happen when a user has a workload cluster that is older than the following PR, causing cluster
// reconcilation to fail. By inferring namespace from clusterv1.Cluster, we will be able to retrieve the object correctly.
// PR: https://github.com/aws/eks-anywhere/pull/4025
// TODO: See if it is possible to propagate the namespace field in the clusterv1.Cluster object in cluster-api like the other refs.
if externalEtcdNamespace == "" {
externalEtcdNamespace = cluster.Namespace
}
etcdadmCluster := &etcdv1.EtcdadmCluster{}
key := client.ObjectKey{
Name: cluster.Spec.ManagedExternalEtcdRef.Name,
Namespace: externalEtcdNamespace,
}
if err = c.Get(ctx, key, etcdadmCluster); err != nil {
return controller.Result{}, errors.Wrap(err, "reading etcdadm cluster")
}
if !equality.Semantic.DeepDerivative(cp.EtcdCluster.Spec, etcdadmCluster.Spec) {
// If the etcdadm cluster has changes, this will require a rolling upgrade
// Mark the etcdadm cluster as upgrading and pause the kcp reconciliation
// The CAPI cluster and etcdadm cluster controller will take care of removing
// these annotation at the right time to orchestrate the kcp upgrade
clientutil.AddAnnotation(cp.EtcdCluster, etcdv1.UpgradeInProgressAnnotation, "true")
clientutil.AddAnnotation(cp.KubeadmControlPlane, clusterv1.PausedAnnotation, "true")
}
return controller.Result{}, applyAllControlPlaneObjects(ctx, c, cp)
}
func applyAllControlPlaneObjects(ctx context.Context, c client.Client, cp *ControlPlane) error {
if err := serverside.ReconcileObjects(ctx, c, cp.AllObjects()); err != nil {
return errors.Wrap(err, "applying control plane objects")
}
return nil
}
| 114 |
eks-anywhere | aws | Go | package clusters_test
import (
"context"
"testing"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
dockerv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/eks-anywhere/internal/test/envtest"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/controller/clusters"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
func TestReconcileControlPlaneStackedEtcd(t *testing.T) {
g := NewWithT(t)
c := env.Client()
api := envtest.NewAPIExpecter(t, c)
ctx := context.Background()
ns := env.CreateNamespaceForTest(ctx, t)
cp := controlPlaneStackedEtcd(ns)
g.Expect(clusters.ReconcileControlPlane(ctx, c, cp)).To(Equal(controller.Result{}))
api.ShouldEventuallyExist(ctx, cp.Cluster)
api.ShouldEventuallyExist(ctx, cp.KubeadmControlPlane)
api.ShouldEventuallyExist(ctx, cp.ControlPlaneMachineTemplate)
api.ShouldEventuallyExist(ctx, cp.ProviderCluster)
}
func TestReconcileControlPlaneExternalEtcdNewCluster(t *testing.T) {
g := NewWithT(t)
c := env.Client()
api := envtest.NewAPIExpecter(t, c)
ctx := context.Background()
ns := env.CreateNamespaceForTest(ctx, t)
cp := controlPlaneExternalEtcd(ns)
g.Expect(clusters.ReconcileControlPlane(ctx, c, cp)).To(Equal(controller.Result{}))
api.ShouldEventuallyExist(ctx, cp.Cluster)
api.ShouldEventuallyExist(ctx, cp.KubeadmControlPlane)
api.ShouldEventuallyExist(ctx, cp.ControlPlaneMachineTemplate)
api.ShouldEventuallyExist(ctx, cp.ProviderCluster)
api.ShouldEventuallyExist(ctx, cp.EtcdCluster)
api.ShouldEventuallyExist(ctx, cp.EtcdMachineTemplate)
}
func TestReconcileControlPlaneExternalEtcdUpgradeWithDiff(t *testing.T) {
g := NewWithT(t)
c := env.Client()
api := envtest.NewAPIExpecter(t, c)
ctx := context.Background()
ns := env.CreateNamespaceForTest(ctx, t)
cp := controlPlaneExternalEtcd(ns)
envtest.CreateObjs(ctx, t, c, cp.AllObjects()...)
cp.EtcdCluster.Spec.Replicas = ptr.Int32(5)
g.Expect(clusters.ReconcileControlPlane(ctx, c, cp)).To(Equal(controller.Result{}))
api.ShouldEventuallyExist(ctx, cp.Cluster)
api.ShouldEventuallyExist(ctx, cp.KubeadmControlPlane)
api.ShouldEventuallyExist(ctx, cp.ControlPlaneMachineTemplate)
api.ShouldEventuallyExist(ctx, cp.ProviderCluster)
api.ShouldEventuallyExist(ctx, cp.EtcdCluster)
api.ShouldEventuallyExist(ctx, cp.EtcdMachineTemplate)
etcdadmCluster := &etcdv1.EtcdadmCluster{ObjectMeta: cp.EtcdCluster.ObjectMeta}
api.ShouldEventuallyMatch(
ctx,
etcdadmCluster,
func(g Gomega) {
g.Expect(etcdadmCluster.Spec.Replicas).To(HaveValue(BeEquivalentTo(5)), "etcdadm replicas should have been updated")
g.Expect(etcdadmCluster.Annotations).To(
HaveKeyWithValue(etcdv1.UpgradeInProgressAnnotation, "true"),
"etcdadm upgrading annotation should have been added",
)
},
)
kcp := &etcdv1.EtcdadmCluster{ObjectMeta: cp.KubeadmControlPlane.ObjectMeta}
api.ShouldEventuallyMatch(
ctx,
etcdadmCluster,
func(g Gomega) {
g.Expect(kcp.Annotations).To(
HaveKeyWithValue(clusterv1.PausedAnnotation, "true"),
"kcp paused annotation should have been added",
)
},
)
}
func TestReconcileControlPlaneExternalEtcdUpgradeWithNoDiff(t *testing.T) {
g := NewWithT(t)
c := env.Client()
api := envtest.NewAPIExpecter(t, c)
ctx := context.Background()
ns := env.CreateNamespaceForTest(ctx, t)
cp := controlPlaneExternalEtcd(ns)
envtest.CreateObjs(ctx, t, c, cp.AllObjects()...)
g.Expect(clusters.ReconcileControlPlane(ctx, c, cp)).To(Equal(controller.Result{}))
api.ShouldEventuallyExist(ctx, cp.Cluster)
api.ShouldEventuallyExist(ctx, cp.KubeadmControlPlane)
api.ShouldEventuallyExist(ctx, cp.ControlPlaneMachineTemplate)
api.ShouldEventuallyExist(ctx, cp.ProviderCluster)
api.ShouldEventuallyExist(ctx, cp.EtcdCluster)
api.ShouldEventuallyExist(ctx, cp.EtcdMachineTemplate)
}
func TestReconcileControlPlaneExternalEtcdUpgradeWithNoNamespace(t *testing.T) {
g := NewWithT(t)
c := env.Client()
api := envtest.NewAPIExpecter(t, c)
ctx := context.Background()
ns := env.CreateNamespaceForTest(ctx, t)
cp := controlPlaneExternalEtcd(ns)
cp.Cluster.Spec.ManagedExternalEtcdRef.Namespace = ""
envtest.CreateObjs(ctx, t, c, cp.AllObjects()...)
g.Expect(clusters.ReconcileControlPlane(ctx, c, cp)).To(Equal(controller.Result{}))
api.ShouldEventuallyExist(ctx, cp.Cluster)
api.ShouldEventuallyExist(ctx, cp.KubeadmControlPlane)
api.ShouldEventuallyExist(ctx, cp.ControlPlaneMachineTemplate)
api.ShouldEventuallyExist(ctx, cp.ProviderCluster)
api.ShouldEventuallyExist(ctx, cp.EtcdCluster)
api.ShouldEventuallyExist(ctx, cp.EtcdMachineTemplate)
}
func controlPlaneStackedEtcd(namespace string) *clusters.ControlPlane {
return &clusters.ControlPlane{
Cluster: &clusterv1.Cluster{
TypeMeta: metav1.TypeMeta{
APIVersion: "cluster.x-k8s.io/v1beta1",
Kind: "Cluster",
},
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: namespace,
},
},
KubeadmControlPlane: &controlplanev1.KubeadmControlPlane{
TypeMeta: metav1.TypeMeta{
APIVersion: "controlplane.cluster.x-k8s.io/v1beta1",
Kind: "KubeadmControlPlane",
},
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: namespace,
},
},
ProviderCluster: &dockerv1.DockerCluster{
TypeMeta: metav1.TypeMeta{
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
Kind: "DockerCluster",
},
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: namespace,
},
},
ControlPlaneMachineTemplate: &dockerv1.DockerMachineTemplate{
TypeMeta: metav1.TypeMeta{
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
Kind: "DockerMachineTemplate",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: "my-cluster-cp",
},
},
}
}
func controlPlaneExternalEtcd(namespace string) *clusters.ControlPlane {
cp := controlPlaneStackedEtcd(namespace)
cp.EtcdCluster = &etcdv1.EtcdadmCluster{
TypeMeta: metav1.TypeMeta{
APIVersion: "etcdcluster.cluster.x-k8s.io/v1beta1",
Kind: "EtcdadmCluster",
},
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: namespace,
},
}
cp.Cluster.Spec.ManagedExternalEtcdRef = &corev1.ObjectReference{
Name: cp.EtcdCluster.Name,
Namespace: cp.EtcdCluster.Namespace,
}
cp.EtcdMachineTemplate = &dockerv1.DockerMachineTemplate{
TypeMeta: metav1.TypeMeta{
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
Kind: "DockerMachineTemplate",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: "my-cluster-etcd",
},
}
return cp
}
func TestControlPlaneAllObjects(t *testing.T) {
stackedCP := controlPlaneStackedEtcd("my-ns")
withOtherCP := controlPlaneStackedEtcd("my-ns")
secret := &corev1.Secret{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
ObjectMeta: metav1.ObjectMeta{
Name: "s",
Namespace: "eksa-system",
},
}
cm := &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Name: "cm",
Namespace: "eksa-system",
},
}
withOtherCP.Other = append(withOtherCP.Other, secret, cm)
externalCP := controlPlaneExternalEtcd("my-ns")
tests := []struct {
name string
cp *clusters.ControlPlane
want []client.Object
}{
{
name: "stacked etcd",
cp: stackedCP,
want: []client.Object{
stackedCP.Cluster,
stackedCP.KubeadmControlPlane,
stackedCP.ProviderCluster,
stackedCP.ControlPlaneMachineTemplate,
},
},
{
name: "external etcd",
cp: externalCP,
want: []client.Object{
externalCP.Cluster,
externalCP.KubeadmControlPlane,
externalCP.ProviderCluster,
externalCP.ControlPlaneMachineTemplate,
externalCP.EtcdCluster,
externalCP.EtcdMachineTemplate,
},
},
{
name: "stacked etcd with other",
cp: withOtherCP,
want: []client.Object{
stackedCP.Cluster,
stackedCP.KubeadmControlPlane,
stackedCP.ProviderCluster,
stackedCP.ControlPlaneMachineTemplate,
secret,
cm,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.cp.AllObjects()).To(ConsistOf(tt.want))
})
}
}
| 283 |
eks-anywhere | aws | Go | package clusters
import (
"context"
"github.com/go-logr/logr"
"github.com/pkg/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
// IPUniquenessValidator defines an interface for the methods to validate the control plane IP.
type IPUniquenessValidator interface {
ValidateControlPlaneIPUniqueness(cluster *anywherev1.Cluster) error
}
// IPValidator validates control plane IP.
type IPValidator struct {
ipUniquenessValidator IPUniquenessValidator
client client.Client
}
// NewIPValidator returns a new NewIPValidator.
func NewIPValidator(ipUniquenessValidator IPUniquenessValidator, client client.Client) *IPValidator {
return &IPValidator{
ipUniquenessValidator: ipUniquenessValidator,
client: client,
}
}
// ValidateControlPlaneIP only validates IP on cluster creation.
func (i *IPValidator) ValidateControlPlaneIP(ctx context.Context, log logr.Logger, spec *cluster.Spec) (controller.Result, error) {
capiCluster, err := controller.GetCAPICluster(ctx, i.client, spec.Cluster)
if err != nil {
return controller.Result{}, errors.Wrap(err, "validating control plane IP")
}
if capiCluster != nil {
// If CAPI cluster exists, the control plane IP has already been validated,
// and it's possibly already in use so no need to validate it again
log.Info("CAPI cluster already exists, skipping control plane IP validation")
return controller.Result{}, nil
}
if err := i.ipUniquenessValidator.ValidateControlPlaneIPUniqueness(spec.Cluster); err != nil {
spec.Cluster.Status.FailureMessage = ptr.String(err.Error())
log.Error(err, "Unavailable control plane IP")
return controller.ResultWithReturn(), nil
}
return controller.Result{}, nil
}
| 54 |
eks-anywhere | aws | Go | package clusters_test
import (
"context"
"errors"
"testing"
"github.com/go-logr/logr"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/aws/eks-anywhere/internal/test"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/controller/clusters"
"github.com/aws/eks-anywhere/pkg/controller/clusters/mocks"
)
func TestValidateControlPlaneIPSuccess(t *testing.T) {
tt := newIPValidatorTest(t)
client := fake.NewClientBuilder().Build()
ipUniquenessValidator := mocks.NewMockIPUniquenessValidator(gomock.NewController(t))
ipUniquenessValidator.EXPECT().ValidateControlPlaneIPUniqueness(tt.testCluster).Return(nil)
ipValidator := clusters.NewIPValidator(ipUniquenessValidator, client)
tt.Expect(ipValidator.ValidateControlPlaneIP(context.Background(), tt.logger, tt.spec)).To(Equal(controller.Result{}))
tt.Expect(tt.spec.Cluster.Status.FailureMessage).To(BeNil())
}
func TestValidateControlPlaneIPUnavailable(t *testing.T) {
tt := newIPValidatorTest(t)
client := fake.NewClientBuilder().Build()
ipUniquenessValidator := mocks.NewMockIPUniquenessValidator(gomock.NewController(t))
ipUniquenessValidator.EXPECT().ValidateControlPlaneIPUniqueness(tt.testCluster).Return(errors.New("already in use"))
ipValidator := clusters.NewIPValidator(ipUniquenessValidator, client)
tt.Expect(ipValidator.ValidateControlPlaneIP(context.Background(), tt.logger, tt.spec)).To(Equal(controller.ResultWithReturn()))
tt.Expect(tt.spec.Cluster.Status.FailureMessage).To(HaveValue(ContainSubstring("already in use")))
}
func TestValidateControlPlaneIPCapiClusterExists(t *testing.T) {
tt := newIPValidatorTest(t)
capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
c.Name = "test-cluster"
})
client := fake.NewClientBuilder().WithObjects(capiCluster).Build()
ipUniquenessValidator := mocks.NewMockIPUniquenessValidator(gomock.NewController(t))
ipValidator := clusters.NewIPValidator(ipUniquenessValidator, client)
tt.Expect(ipValidator.ValidateControlPlaneIP(context.Background(), tt.logger, tt.spec)).To(Equal(controller.Result{}))
tt.Expect(tt.spec.Cluster.Status.FailureMessage).To(BeNil())
}
type ipValidatorTest struct {
t testing.TB
*WithT
logger logr.Logger
spec *cluster.Spec
testCluster *anywherev1.Cluster
}
func newIPValidatorTest(t testing.TB) *ipValidatorTest {
logger := test.NewNullLogger()
spec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster = &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
Namespace: constants.EksaSystemNamespace,
},
Spec: anywherev1.ClusterSpec{
ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{
Endpoint: &anywherev1.Endpoint{
Host: "test-ip",
},
},
},
}
})
testCluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Namespace: constants.EksaSystemNamespace,
Name: "test-cluster",
},
Spec: anywherev1.ClusterSpec{
ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{
Endpoint: &anywherev1.Endpoint{
Host: "test-ip",
},
},
},
}
tt := &ipValidatorTest{
t: t,
WithT: NewWithT(t),
logger: logger,
spec: spec,
testCluster: testCluster,
}
return tt
}
| 104 |
eks-anywhere | aws | Go | package clusters_test
import (
"os"
"testing"
"github.com/aws/eks-anywhere/internal/test/envtest"
)
var env *envtest.Environment
func TestMain(m *testing.M) {
os.Exit(envtest.RunWithEnvironment(m, envtest.WithAssignment(&env)))
}
| 15 |
eks-anywhere | aws | Go | package clusters
import (
"context"
"github.com/go-logr/logr"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/controller"
)
// ProviderClusterReconciler reconciles a provider specific eks-a cluster.
type ProviderClusterReconciler interface {
// Reconcile handles the full cluster reconciliation.
Reconcile(ctx context.Context, log logr.Logger, cluster *anywherev1.Cluster) (controller.Result, error)
// ReconcileWorkerNodes handles only the worker node reconciliation. Intended to be used on self managed clusters.
ReconcileWorkerNodes(ctx context.Context, log logr.Logger, cluster *anywherev1.Cluster) (controller.Result, error)
}
// ProviderClusterReconcilerRegistry holds a collection of cluster provider reconcilers
// and ties them to different provider Datacenter kinds.
type ProviderClusterReconcilerRegistry struct {
reconcilers map[string]ProviderClusterReconciler
}
func newClusterReconcilerRegistry() ProviderClusterReconcilerRegistry {
return ProviderClusterReconcilerRegistry{
reconcilers: map[string]ProviderClusterReconciler{},
}
}
func (r *ProviderClusterReconcilerRegistry) add(datacenterKind string, reconciler ProviderClusterReconciler) {
r.reconcilers[datacenterKind] = reconciler
}
// Get returns ProviderClusterReconciler for a particular Datacenter kind.
func (r *ProviderClusterReconcilerRegistry) Get(datacenterKind string) ProviderClusterReconciler {
return r.reconcilers[datacenterKind]
}
// ProviderClusterReconcilerRegistryBuilder builds ProviderClusterReconcilerRegistry's.
type ProviderClusterReconcilerRegistryBuilder struct {
reconciler ProviderClusterReconcilerRegistry
}
// NewProviderClusterReconcilerRegistryBuilder returns a new empty ProviderClusterReconcilerRegistryBuilder.
func NewProviderClusterReconcilerRegistryBuilder() *ProviderClusterReconcilerRegistryBuilder {
return &ProviderClusterReconcilerRegistryBuilder{
reconciler: newClusterReconcilerRegistry(),
}
}
// Add accumulates a pair of datacenter kind a reconciler to be included in the final registry.
func (b *ProviderClusterReconcilerRegistryBuilder) Add(datacenterKind string, reconciler ProviderClusterReconciler) *ProviderClusterReconcilerRegistryBuilder {
b.reconciler.add(datacenterKind, reconciler)
return b
}
// Build returns a registry with all the previously added reconcilers.
func (b *ProviderClusterReconcilerRegistryBuilder) Build() ProviderClusterReconcilerRegistry {
r := b.reconciler
b.reconciler = newClusterReconcilerRegistry()
return r
}
| 65 |
eks-anywhere | aws | Go | package clusters_test
import (
"context"
"testing"
"github.com/go-logr/logr"
. "github.com/onsi/gomega"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/controller/clusters"
)
func TestProviderClusterReconcilerRegistryGet(t *testing.T) {
dummy1 := dummyProviderReconciler{name: "dummy1"}
dummy2 := dummyProviderReconciler{name: "dummy2"}
registry := clusters.NewProviderClusterReconcilerRegistryBuilder().
Add("dummy1", dummy1).
Add("dummy2", dummy2).
Build()
tests := []struct {
name string
datacenterKind string
want clusters.ProviderClusterReconciler
}{
{
name: "reconciler exists",
datacenterKind: "dummy1",
want: dummy1,
},
{
name: "reconciler does not exist",
datacenterKind: "dummy3",
want: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
if tt.want != nil {
g.Expect(registry.Get(tt.datacenterKind)).To(Equal(tt.want))
} else {
g.Expect(registry.Get(tt.datacenterKind)).To(BeNil())
}
})
}
}
type dummyProviderReconciler struct {
name string
}
func (dummyProviderReconciler) Reconcile(ctx context.Context, log logr.Logger, cluster *anywherev1.Cluster) (controller.Result, error) {
return controller.Result{}, nil
}
func (dummyProviderReconciler) ReconcileCNI(ctx context.Context, log logr.Logger, clusterSpec *cluster.Spec) (controller.Result, error) {
return controller.Result{}, nil
}
func (dummyProviderReconciler) ReconcileWorkerNodes(ctx context.Context, log logr.Logger, cluster *anywherev1.Cluster) (controller.Result, error) {
return controller.Result{}, nil
}
| 67 |
eks-anywhere | aws | Go | package clusters
import (
"context"
"github.com/pkg/errors"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/controller-runtime/pkg/client"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/controller"
)
// UpdateClusterStatusForControlPlane checks the current state of the Cluster's control plane and updates the
// Cluster status information.
func UpdateClusterStatusForControlPlane(ctx context.Context, client client.Client, cluster *anywherev1.Cluster) error {
kcp, err := controller.GetKubeadmControlPlane(ctx, client, cluster)
if err != nil {
return errors.Wrapf(err, "getting kubeadmcontrolplane")
}
updateControlPlaneInitializedCondition(cluster, kcp)
updateControlPlaneReadyCondition(cluster, kcp)
return nil
}
// UpdateClusterStatusForWorkers checks the current state of the Cluster's workers and updates the
// Cluster status information.
func UpdateClusterStatusForWorkers(ctx context.Context, client client.Client, cluster *anywherev1.Cluster) error {
machineDeployments, err := controller.GetMachineDeployments(ctx, client, cluster)
if err != nil {
return errors.Wrap(err, "getting machine deployments")
}
updateWorkersReadyCondition(cluster, machineDeployments)
return nil
}
// UpdateClusterStatusForCNI updates the Cluster status for the default cni before the control plane is ready. The CNI reconciler
// handles the rest of the logic for determining the condition and updating the status based on the current state of the cluster.
func UpdateClusterStatusForCNI(ctx context.Context, cluster *anywherev1.Cluster) {
if !conditions.IsTrue(cluster, anywherev1.ControlPlaneReadyCondition) {
conditions.MarkFalse(cluster, anywherev1.DefaultCNIConfiguredCondition, anywherev1.ControlPlaneNotReadyReason, clusterv1.ConditionSeverityInfo, "")
return
}
// Self managed clusters do not use the CNI reconciler, so this status would never get resolved.
// TODO: Remove after self-managed clusters are created with the controller in the CLI
if cluster.IsSelfManaged() {
ciliumCfg := cluster.Spec.ClusterNetwork.CNIConfig.Cilium
// Though it may be installed initially to successfully create the cluster,
// if the CNI is configured to skip upgrades, we mark the condition as "False"
if !ciliumCfg.IsManaged() {
conditions.MarkFalse(cluster, anywherev1.DefaultCNIConfiguredCondition, anywherev1.SkipUpgradesForDefaultCNIConfiguredReason, clusterv1.ConditionSeverityWarning, "Configured to skip default Cilium CNI upgrades")
return
}
// Otherwise, since the control plane is fully ready we can assume the CNI has been configured.
conditions.MarkTrue(cluster, anywherev1.DefaultCNIConfiguredCondition)
}
}
// updateControlPlaneReadyCondition updates the ControlPlaneReady condition, after checking the state of the control plane
// in the cluster.
func updateControlPlaneReadyCondition(cluster *anywherev1.Cluster, kcp *controlplanev1.KubeadmControlPlane) {
initializedCondition := conditions.Get(cluster, anywherev1.ControlPlaneInitializedCondition)
if initializedCondition.Status != "True" {
conditions.MarkFalse(cluster, anywherev1.ControlPlaneReadyCondition, initializedCondition.Reason, initializedCondition.Severity, initializedCondition.Message)
return
}
// We make sure to check that the status is up to date before using it
if kcp.Status.ObservedGeneration != kcp.ObjectMeta.Generation {
conditions.MarkFalse(cluster, anywherev1.ControlPlaneReadyCondition, anywherev1.OutdatedInformationReason, clusterv1.ConditionSeverityInfo, "")
return
}
// The control plane should be marked ready when the count specified in the spec is
// equal to the ready number of nodes in the cluster and they're all of the right version specified.
expected := cluster.Spec.ControlPlaneConfiguration.Count
totalReplicas := int(kcp.Status.Replicas)
// First, in the case of a rolling upgrade, we get the number of outdated nodes, and as long as there are some,
// we want to reflect in the message that the Cluster is in progres upgdating the old nodes with the
// the new machine spec.
updatedReplicas := int(kcp.Status.UpdatedReplicas)
totalOutdated := totalReplicas - updatedReplicas
if totalOutdated > 0 {
conditions.MarkFalse(cluster, anywherev1.ControlPlaneReadyCondition, anywherev1.RollingUpgradeInProgress, clusterv1.ConditionSeverityInfo, "Control plane nodes not up-to-date yet, %d rolling (%d up to date)", totalReplicas, updatedReplicas)
return
}
// Then, we check that the number of nodes in the cluster match the expected amount. If not, we
// mark that the Cluster is scaling up or scale down the control plane replicas to the expected amount.
if totalReplicas < expected {
conditions.MarkFalse(cluster, anywherev1.ControlPlaneReadyCondition, anywherev1.ScalingUpReason, clusterv1.ConditionSeverityInfo, "Scaling up control plane nodes, %d expected (%d actual)", expected, totalReplicas)
return
}
if totalReplicas > expected {
conditions.MarkFalse(cluster, anywherev1.ControlPlaneReadyCondition, anywherev1.ScalingDownReason, clusterv1.ConditionSeverityInfo, "Scaling down control plane nodes, %d expected (%d actual)", expected, totalReplicas)
return
}
readyReplicas := int(kcp.Status.ReadyReplicas)
if readyReplicas != expected {
conditions.MarkFalse(cluster, anywherev1.ControlPlaneReadyCondition, anywherev1.NodesNotReadyReason, clusterv1.ConditionSeverityInfo, "Control plane nodes not ready yet, %d expected (%d ready)", expected, readyReplicas)
return
}
conditions.MarkTrue(cluster, anywherev1.ControlPlaneReadyCondition)
}
// updateControlPlaneInitializedCondition updates the ControlPlaneInitialized condition if it hasn't already been set.
// This condition should be set only once.
func updateControlPlaneInitializedCondition(cluster *anywherev1.Cluster, kcp *controlplanev1.KubeadmControlPlane) {
// Return early if the ControlPlaneInitializedCondition is already "True"
if conditions.IsTrue(cluster, anywherev1.ControlPlaneInitializedCondition) {
return
}
if kcp == nil {
conditions.Set(cluster, controlPlaneInitializationInProgressCondition())
return
}
// We make sure to check that the status is up to date before using it
if kcp.Status.ObservedGeneration != kcp.ObjectMeta.Generation {
conditions.MarkFalse(cluster, anywherev1.ControlPlaneInitializedCondition, anywherev1.OutdatedInformationReason, clusterv1.ConditionSeverityInfo, "")
return
}
// Then, we'll check explicitly for that the control plane is available. This way, we do not rely on CAPI
// to implicitly to fill out our conditions reasons, and we can have custom messages.
available := conditions.IsTrue(kcp, controlplanev1.AvailableCondition)
if !available {
conditions.Set(cluster, controlPlaneInitializationInProgressCondition())
return
}
conditions.MarkTrue(cluster, anywherev1.ControlPlaneInitializedCondition)
}
// updateWorkersReadyCondition updates the WorkersReadyConditon condition after checking the state of the worker node groups
// in the cluster.
func updateWorkersReadyCondition(cluster *anywherev1.Cluster, machineDeployments []clusterv1.MachineDeployment) {
initializedCondition := conditions.Get(cluster, anywherev1.ControlPlaneInitializedCondition)
if initializedCondition.Status != "True" {
conditions.MarkFalse(cluster, anywherev1.WorkersReadyConditon, anywherev1.ControlPlaneNotInitializedReason, clusterv1.ConditionSeverityInfo, "")
return
}
totalExpected := 0
for _, wng := range cluster.Spec.WorkerNodeGroupConfigurations {
totalExpected += *wng.Count
}
// First, we need aggregate the number of nodes across worker node groups to be able to assess the condition of the workers
// as a whole.
totalReadyReplicas := 0
totalUpdatedReplicas := 0
totalReplicas := 0
for _, md := range machineDeployments {
// We make sure to check that the status is up to date before using the information from the machine deployment status.
if md.Status.ObservedGeneration != md.ObjectMeta.Generation {
conditions.MarkFalse(cluster, anywherev1.WorkersReadyConditon, anywherev1.OutdatedInformationReason, clusterv1.ConditionSeverityInfo, "Worker node group %s status not up to date yet", md.Name)
return
}
totalReadyReplicas += int(md.Status.ReadyReplicas)
totalUpdatedReplicas += int(md.Status.UpdatedReplicas)
totalReplicas += int(md.Status.Replicas)
}
// There may be worker nodes that are not up to date yet in the case of a rolling upgrade,
// so reflect that on the conditon with an appropriate message.
totalOutdated := totalReplicas - totalUpdatedReplicas
if totalOutdated > 0 {
conditions.MarkFalse(cluster, anywherev1.WorkersReadyConditon, anywherev1.RollingUpgradeInProgress, clusterv1.ConditionSeverityInfo, "Worker nodes not up-to-date yet, %d rolling (%d up to date)", totalReplicas, totalUpdatedReplicas)
return
}
// If the number of worker nodes replicas need to be scaled up.
if totalReplicas < totalExpected {
conditions.MarkFalse(cluster, anywherev1.WorkersReadyConditon, anywherev1.ScalingUpReason, clusterv1.ConditionSeverityInfo, "Scaling up worker nodes, %d expected (%d actual)", totalExpected, totalReplicas)
return
}
// If the number of worker nodes replicas need to be scaled down.
if totalReplicas > totalExpected {
conditions.MarkFalse(cluster, anywherev1.WorkersReadyConditon, anywherev1.ScalingDownReason, clusterv1.ConditionSeverityInfo, "Scaling down worker nodes, %d expected (%d actual)", totalExpected, totalReplicas)
return
}
if totalReadyReplicas != totalExpected {
conditions.MarkFalse(cluster, anywherev1.WorkersReadyConditon, anywherev1.NodesNotReadyReason, clusterv1.ConditionSeverityInfo, "Worker nodes not ready yet, %d expected (%d ready)", totalExpected, totalReadyReplicas)
return
}
conditions.MarkTrue(cluster, anywherev1.WorkersReadyConditon)
}
// controlPlaneInitializationInProgressCondition returns a new "False" condition for the ControlPlaneInitializationInProgress reason.
func controlPlaneInitializationInProgressCondition() *anywherev1.Condition {
return conditions.FalseCondition(anywherev1.ControlPlaneInitializedCondition, anywherev1.ControlPlaneInitializationInProgressReason, clusterv1.ConditionSeverityInfo, "The first control plane instance is not available yet")
}
| 213 |
eks-anywhere | aws | Go | package clusters_test
import (
"context"
"testing"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/runtime"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/aws/eks-anywhere/internal/test"
_ "github.com/aws/eks-anywhere/internal/test/envtest"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/controller/clusters"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
const controlPlaneInitalizationInProgressReason = "The first control plane instance is not available yet"
func TestUpdateClusterStatusForControlPlane(t *testing.T) {
g := NewWithT(t)
tests := []struct {
name string
kcp *controlplanev1.KubeadmControlPlane
controlPlaneCount int
conditions []anywherev1.Condition
wantCondition *anywherev1.Condition
}{
{
name: "kcp is nil",
kcp: nil,
conditions: []anywherev1.Condition{},
wantCondition: &anywherev1.Condition{
Type: "ControlPlaneInitialized",
Status: "False",
Severity: clusterv1.ConditionSeverityInfo,
Reason: anywherev1.ControlPlaneInitializationInProgressReason,
Message: controlPlaneInitalizationInProgressReason,
},
},
{
name: "control plane already initialized",
kcp: test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
kcp.Status.Conditions = clusterv1.Conditions{
{
Type: controlplanev1.AvailableCondition,
Status: "True",
},
}
}),
conditions: []anywherev1.Condition{
{
Type: anywherev1.ControlPlaneInitializedCondition,
Status: "True",
},
},
wantCondition: &anywherev1.Condition{
Type: anywherev1.ControlPlaneInitializedCondition,
Status: "True",
},
},
{
name: "kcp status outdated, generations do not match",
kcp: test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
kcp.ObjectMeta.Generation = 1
kcp.Status.ObservedGeneration = 0
}),
wantCondition: &anywherev1.Condition{
Type: anywherev1.ControlPlaneInitializedCondition,
Status: "False",
Severity: clusterv1.ConditionSeverityInfo,
Reason: anywherev1.OutdatedInformationReason,
Message: "",
},
},
{
name: "kcp not availabe yet",
kcp: test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
kcp.Status.Conditions = clusterv1.Conditions{
{
Type: controlplanev1.AvailableCondition,
Status: "False",
},
}
}),
conditions: []anywherev1.Condition{},
wantCondition: &anywherev1.Condition{
Type: anywherev1.ControlPlaneInitializedCondition,
Status: "False",
Severity: clusterv1.ConditionSeverityInfo,
Reason: anywherev1.ControlPlaneInitializationInProgressReason,
Message: controlPlaneInitalizationInProgressReason,
},
},
{
name: "kcp available",
kcp: test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
kcp.Status.Conditions = clusterv1.Conditions{
{
Type: controlplanev1.AvailableCondition,
Status: "True",
},
}
}),
conditions: []anywherev1.Condition{},
wantCondition: &anywherev1.Condition{
Type: anywherev1.ControlPlaneInitializedCondition,
Status: "True",
},
},
{
name: "control plane not initialized",
kcp: &controlplanev1.KubeadmControlPlane{},
controlPlaneCount: 1,
conditions: []anywherev1.Condition{
{
Type: anywherev1.ControlPlaneInitializedCondition,
Status: "False",
Severity: clusterv1.ConditionSeverityInfo,
Reason: anywherev1.ControlPlaneInitializationInProgressReason,
Message: controlPlaneInitalizationInProgressReason,
},
},
wantCondition: &anywherev1.Condition{
Type: anywherev1.ControlPlaneReadyCondition,
Status: "False",
Severity: clusterv1.ConditionSeverityInfo,
Reason: anywherev1.ControlPlaneInitializationInProgressReason,
Message: controlPlaneInitalizationInProgressReason,
},
},
{
name: "kubeadmcontrolplane status out of date",
kcp: test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
kcp.Generation = 1
kcp.Status.ObservedGeneration = 2
}),
controlPlaneCount: 1,
conditions: []anywherev1.Condition{
{
Type: anywherev1.ControlPlaneInitializedCondition,
Status: "True",
},
},
wantCondition: &anywherev1.Condition{
Type: anywherev1.ControlPlaneReadyCondition,
Status: "False",
Reason: anywherev1.OutdatedInformationReason,
Severity: clusterv1.ConditionSeverityInfo,
},
},
{
name: "scaling up control plane nodes",
kcp: test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
kcp.Status.Replicas = 1
kcp.Status.UpdatedReplicas = 1
kcp.Status.Conditions = []clusterv1.Condition{
{
Type: clusterv1.ReadyCondition,
Status: "False",
Severity: clusterv1.ConditionSeverityInfo,
},
}
}),
controlPlaneCount: 3,
conditions: []anywherev1.Condition{
{
Type: anywherev1.ControlPlaneInitializedCondition,
Status: "True",
},
},
wantCondition: &anywherev1.Condition{
Type: anywherev1.ControlPlaneReadyCondition,
Status: "False",
Reason: anywherev1.ScalingUpReason,
Severity: clusterv1.ConditionSeverityInfo,
Message: "Scaling up control plane nodes, 3 expected (1 actual)",
},
},
{
name: "scaling down control plane nodes",
kcp: test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
kcp.Status.Replicas = 3
kcp.Status.UpdatedReplicas = 3
kcp.Status.Conditions = []clusterv1.Condition{
{
Type: clusterv1.ReadyCondition,
Status: "False",
Severity: clusterv1.ConditionSeverityInfo,
},
}
}),
controlPlaneCount: 1,
conditions: []anywherev1.Condition{
{
Type: anywherev1.ControlPlaneInitializedCondition,
Status: "True",
},
},
wantCondition: &anywherev1.Condition{
Type: anywherev1.ControlPlaneReadyCondition,
Status: "False",
Reason: anywherev1.ScalingDownReason,
Severity: clusterv1.ConditionSeverityInfo,
Message: "Scaling down control plane nodes",
},
},
{
name: "control plane replicas out of date",
kcp: test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
kcp.Status.ReadyReplicas = 3
kcp.Status.Replicas = 3
kcp.Status.UpdatedReplicas = 1
kcp.Status.Conditions = []clusterv1.Condition{
{
Type: clusterv1.ReadyCondition,
Status: "False",
Severity: clusterv1.ConditionSeverityInfo,
},
}
}),
controlPlaneCount: 3,
conditions: []anywherev1.Condition{
{
Type: anywherev1.ControlPlaneInitializedCondition,
Status: "True",
},
},
wantCondition: &anywherev1.Condition{
Type: anywherev1.ControlPlaneReadyCondition,
Status: "False",
Reason: anywherev1.RollingUpgradeInProgress,
Severity: clusterv1.ConditionSeverityInfo,
Message: "Control plane nodes not up-to-date yet",
},
},
{
name: "control plane nodes not ready yet",
kcp: test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
kcp.Status.Replicas = 3
kcp.Status.ReadyReplicas = 2
kcp.Status.UpdatedReplicas = 3
kcp.Status.Conditions = []clusterv1.Condition{
{
Type: clusterv1.ReadyCondition,
Status: "True",
},
}
}),
controlPlaneCount: 3,
conditions: []anywherev1.Condition{
{
Type: anywherev1.ControlPlaneInitializedCondition,
Status: "True",
},
},
wantCondition: &anywherev1.Condition{
Type: anywherev1.ControlPlaneReadyCondition,
Status: "False",
Reason: anywherev1.NodesNotReadyReason,
Severity: clusterv1.ConditionSeverityInfo,
Message: "Control plane nodes not ready yet",
},
},
{
name: "control plane ready",
kcp: test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
kcp.Status.Replicas = 3
kcp.Status.ReadyReplicas = 3
kcp.Status.UpdatedReplicas = 3
kcp.Status.Conditions = []clusterv1.Condition{
{
Type: clusterv1.ReadyCondition,
Status: "True",
},
}
}),
controlPlaneCount: 3,
conditions: []anywherev1.Condition{
{
Type: anywherev1.ControlPlaneInitializedCondition,
Status: "True",
},
},
wantCondition: &anywherev1.Condition{
Type: anywherev1.ControlPlaneReadyCondition,
Status: "True",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
cluster := test.NewClusterSpec().Cluster
cluster.Name = "test-cluster"
cluster.Namespace = constants.EksaSystemNamespace
cluster.Spec.ControlPlaneConfiguration.Count = tt.controlPlaneCount
cluster.Status.Conditions = tt.conditions
objs := []runtime.Object{}
var client client.Client
if tt.kcp != nil {
tt.kcp.Name = cluster.Name
tt.kcp.Namespace = cluster.Namespace
objs = append(objs, tt.kcp)
}
client = fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
err := clusters.UpdateClusterStatusForControlPlane(ctx, client, cluster)
g.Expect(err).To(BeNil())
condition := conditions.Get(cluster, tt.wantCondition.Type)
g.Expect(condition).ToNot(BeNil())
g.Expect(condition.Type).To(Equal(tt.wantCondition.Type))
g.Expect(condition.Severity).To(Equal(tt.wantCondition.Severity))
g.Expect(condition.Status).To(Equal(tt.wantCondition.Status))
g.Expect(condition.Reason).To(Equal(tt.wantCondition.Reason))
g.Expect(condition.Message).To(ContainSubstring(tt.wantCondition.Message))
})
}
}
func TesUpdateClusterStatusForWorkers(t *testing.T) {
cluster := test.NewClusterSpec().Cluster
clusterName := "test-cluster"
g := NewWithT(t)
tests := []struct {
name string
machineDeployments []clusterv1.MachineDeployment
workerNodeGroupConfigurations []anywherev1.WorkerNodeGroupConfiguration
conditions []anywherev1.Condition
wantCondition *anywherev1.Condition
wantErr string
}{
{
name: "workers not ready, control plane not initialized",
workerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{},
machineDeployments: []clusterv1.MachineDeployment{},
conditions: []anywherev1.Condition{
{
Type: anywherev1.ControlPlaneInitializedCondition,
Status: "False",
Severity: clusterv1.ConditionSeverityInfo,
Reason: anywherev1.ControlPlaneInitializationInProgressReason,
Message: controlPlaneInitalizationInProgressReason,
},
},
wantCondition: &anywherev1.Condition{
Type: anywherev1.WorkersReadyConditon,
Status: "False",
Reason: anywherev1.ControlPlaneNotInitializedReason,
Severity: clusterv1.ConditionSeverityInfo,
},
},
{
name: "workers not ready, outdated information, one group",
workerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{
{
Count: ptr.Int(1),
},
},
machineDeployments: []clusterv1.MachineDeployment{
*test.MachineDeployment(func(md *clusterv1.MachineDeployment) {
md.ObjectMeta.Name = "md-0"
md.ObjectMeta.Generation = 1
md.ObjectMeta.Labels = map[string]string{
clusterv1.ClusterNameLabel: clusterName,
}
md.Status.ObservedGeneration = 0
}),
},
conditions: []anywherev1.Condition{
{
Type: anywherev1.ControlPlaneInitializedCondition,
Status: "True",
},
},
wantCondition: &anywherev1.Condition{
Type: anywherev1.WorkersReadyConditon,
Status: "False",
Reason: anywherev1.OutdatedInformationReason,
Severity: clusterv1.ConditionSeverityInfo,
},
},
{
name: "workers not ready, outdated information, two groups",
workerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{
{
Count: ptr.Int(1),
},
{
Count: ptr.Int(1),
},
},
machineDeployments: []clusterv1.MachineDeployment{
*test.MachineDeployment(func(md *clusterv1.MachineDeployment) {
md.ObjectMeta.Name = "md-0"
md.ObjectMeta.Generation = 1
md.ObjectMeta.Labels = map[string]string{
clusterv1.ClusterNameLabel: clusterName,
}
md.Status.ObservedGeneration = 0
}),
*test.MachineDeployment(func(md *clusterv1.MachineDeployment) {
md.ObjectMeta.Name = "md-1"
md.ObjectMeta.Generation = 1
md.Status.ObservedGeneration = 1
}),
},
conditions: []anywherev1.Condition{
{
Type: anywherev1.ControlPlaneInitializedCondition,
Status: "True",
},
},
wantCondition: &anywherev1.Condition{
Type: anywherev1.WorkersReadyConditon,
Status: "False",
Reason: anywherev1.OutdatedInformationReason,
Severity: clusterv1.ConditionSeverityInfo,
},
},
{
name: "workers not ready, nodes not up to date ",
workerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{
{
Count: ptr.Int(1),
},
{
Count: ptr.Int(2),
},
},
machineDeployments: []clusterv1.MachineDeployment{
*test.MachineDeployment(func(md *clusterv1.MachineDeployment) {
md.ObjectMeta.Name = "md-0"
md.ObjectMeta.Labels = map[string]string{
clusterv1.ClusterNameLabel: clusterName,
}
md.Status.Replicas = 1
md.Status.ReadyReplicas = 1
md.Status.UpdatedReplicas = 1
}),
*test.MachineDeployment(func(md *clusterv1.MachineDeployment) {
md.ObjectMeta.Name = "md-1"
md.ObjectMeta.Labels = map[string]string{
clusterv1.ClusterNameLabel: clusterName,
}
md.Status.Replicas = 2
md.Status.ReadyReplicas = 2
md.Status.UpdatedReplicas = 1
}),
},
conditions: []anywherev1.Condition{
{
Type: anywherev1.ControlPlaneInitializedCondition,
Status: "True",
},
},
wantCondition: &anywherev1.Condition{
Type: anywherev1.WorkersReadyConditon,
Status: "False",
Reason: anywherev1.RollingUpgradeInProgress,
Severity: clusterv1.ConditionSeverityInfo,
Message: "Worker nodes not up-to-date yet",
},
},
{
name: "workers not ready, scaling up",
workerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{
{
Count: ptr.Int(1),
},
{
Count: ptr.Int(2),
},
},
machineDeployments: []clusterv1.MachineDeployment{
*test.MachineDeployment(func(md *clusterv1.MachineDeployment) {
md.ObjectMeta.Name = "md-0"
md.ObjectMeta.Labels = map[string]string{
clusterv1.ClusterNameLabel: clusterName,
}
md.Status.Replicas = 0
md.Status.ReadyReplicas = 0
md.Status.UpdatedReplicas = 0
}),
*test.MachineDeployment(func(md *clusterv1.MachineDeployment) {
md.ObjectMeta.Name = "md-1"
md.ObjectMeta.Labels = map[string]string{
clusterv1.ClusterNameLabel: clusterName,
}
md.Status.Replicas = 2
md.Status.ReadyReplicas = 2
md.Status.UpdatedReplicas = 2
}),
},
conditions: []anywherev1.Condition{
{
Type: anywherev1.ControlPlaneInitializedCondition,
Status: "True",
},
},
wantCondition: &anywherev1.Condition{
Type: anywherev1.WorkersReadyConditon,
Status: "False",
Reason: anywherev1.ScalingUpReason,
Severity: clusterv1.ConditionSeverityInfo,
Message: "Scaling up worker nodes",
},
},
{
name: "workers not ready, scaling down",
workerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{
{
Count: ptr.Int(2),
},
{
Count: ptr.Int(1),
},
},
machineDeployments: []clusterv1.MachineDeployment{
*test.MachineDeployment(func(md *clusterv1.MachineDeployment) {
md.ObjectMeta.Name = "md-0"
md.ObjectMeta.Labels = map[string]string{
clusterv1.ClusterNameLabel: clusterName,
}
md.Status.Replicas = 2
md.Status.ReadyReplicas = 2
md.Status.UpdatedReplicas = 2
}),
*test.MachineDeployment(func(md *clusterv1.MachineDeployment) {
md.ObjectMeta.Name = "md-1"
md.ObjectMeta.Labels = map[string]string{
clusterv1.ClusterNameLabel: clusterName,
}
md.Status.Replicas = 2
md.Status.ReadyReplicas = 2
md.Status.UpdatedReplicas = 2
}),
},
conditions: []anywherev1.Condition{
{
Type: anywherev1.ControlPlaneInitializedCondition,
Status: "True",
},
},
wantCondition: &anywherev1.Condition{
Type: anywherev1.WorkersReadyConditon,
Status: "False",
Reason: anywherev1.ScalingDownReason,
Severity: clusterv1.ConditionSeverityInfo,
Message: "Scaling down worker nodes",
},
},
{
name: "workers not ready, nodes not ready yet",
workerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{
{
Count: ptr.Int(1),
},
{
Count: ptr.Int(2),
},
},
machineDeployments: []clusterv1.MachineDeployment{
*test.MachineDeployment(func(md *clusterv1.MachineDeployment) {
md.ObjectMeta.Name = "md-0"
md.ObjectMeta.Labels = map[string]string{
clusterv1.ClusterNameLabel: clusterName,
}
md.Status.ReadyReplicas = 1
md.Status.Replicas = 1
md.Status.UpdatedReplicas = 1
}),
*test.MachineDeployment(func(md *clusterv1.MachineDeployment) {
md.ObjectMeta.Name = "md-1"
md.ObjectMeta.Labels = map[string]string{
clusterv1.ClusterNameLabel: clusterName,
}
md.Status.ReadyReplicas = 0
md.Status.Replicas = 2
md.Status.UpdatedReplicas = 2
}),
},
conditions: []anywherev1.Condition{
{
Type: anywherev1.ControlPlaneInitializedCondition,
Status: "True",
},
},
wantCondition: &anywherev1.Condition{
Type: anywherev1.WorkersReadyConditon,
Status: "False",
Reason: anywherev1.NodesNotReadyReason,
Severity: clusterv1.ConditionSeverityInfo,
Message: "Worker nodes not ready yet",
},
},
{
name: "workers ready",
workerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{
{
Count: ptr.Int(1),
},
{
Count: ptr.Int(2),
},
},
machineDeployments: []clusterv1.MachineDeployment{
*test.MachineDeployment(func(md *clusterv1.MachineDeployment) {
md.ObjectMeta.Name = "md-0"
md.ObjectMeta.Labels = map[string]string{
clusterv1.ClusterNameLabel: clusterName,
}
md.Status.Replicas = 1
md.Status.ReadyReplicas = 1
md.Status.UpdatedReplicas = 1
}),
*test.MachineDeployment(func(md *clusterv1.MachineDeployment) {
md.ObjectMeta.Name = "md-1"
md.ObjectMeta.Labels = map[string]string{
clusterv1.ClusterNameLabel: clusterName,
}
md.Status.Replicas = 2
md.Status.ReadyReplicas = 2
md.Status.UpdatedReplicas = 2
}),
},
conditions: []anywherev1.Condition{
{
Type: anywherev1.ControlPlaneInitializedCondition,
Status: "True",
},
},
wantCondition: &anywherev1.Condition{
Type: anywherev1.WorkersReadyConditon,
Status: "True",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
cluster.Name = clusterName
cluster.Namespace = constants.EksaSystemNamespace
cluster.Spec.WorkerNodeGroupConfigurations = tt.workerNodeGroupConfigurations
cluster.Status.Conditions = tt.conditions
objs := []runtime.Object{}
var client client.Client
for _, md := range tt.machineDeployments {
objs = append(objs, md.DeepCopy())
}
client = fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
err := clusters.UpdateClusterStatusForWorkers(ctx, client, cluster)
g.Expect(err).To(BeNil())
condition := conditions.Get(cluster, tt.wantCondition.Type)
g.Expect(condition).ToNot(BeNil())
g.Expect(condition.Type).To(Equal(tt.wantCondition.Type))
g.Expect(condition.Severity).To(Equal(tt.wantCondition.Severity))
g.Expect(condition.Status).To(Equal(tt.wantCondition.Status))
g.Expect(condition.Reason).To(Equal(tt.wantCondition.Reason))
g.Expect(condition.Message).To(ContainSubstring(tt.wantCondition.Message))
})
}
}
func TestUpdateClusterStatusForCNI(t *testing.T) {
g := NewWithT(t)
tests := []struct {
name string
spec *cluster.Spec
conditions []anywherev1.Condition
wantCondition *anywherev1.Condition
skipUpgrade *bool
wantErr string
}{
{
name: "control plane is not ready",
skipUpgrade: ptr.Bool(false),
conditions: []anywherev1.Condition{
{
Type: anywherev1.ControlPlaneReadyCondition,
Status: "False",
},
},
wantCondition: &anywherev1.Condition{
Type: anywherev1.DefaultCNIConfiguredCondition,
Reason: anywherev1.ControlPlaneNotReadyReason,
Status: "False",
Severity: clusterv1.ConditionSeverityInfo,
},
},
{
name: "cilium is unmanaged",
skipUpgrade: ptr.Bool(true),
conditions: []anywherev1.Condition{
{
Type: anywherev1.ControlPlaneReadyCondition,
Status: "True",
},
},
wantCondition: &anywherev1.Condition{
Type: anywherev1.DefaultCNIConfiguredCondition,
Reason: anywherev1.SkipUpgradesForDefaultCNIConfiguredReason,
Status: "False",
Severity: clusterv1.ConditionSeverityWarning,
},
},
{
name: "cilium is managed",
skipUpgrade: ptr.Bool(false),
conditions: []anywherev1.Condition{
{
Type: anywherev1.ControlPlaneReadyCondition,
Status: "True",
},
},
wantCondition: &anywherev1.Condition{
Type: anywherev1.DefaultCNIConfiguredCondition,
Status: "True",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
spec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = "management-cluster"
s.Cluster.Spec.ClusterNetwork = anywherev1.ClusterNetwork{
CNIConfig: &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{
SkipUpgrade: tt.skipUpgrade,
},
},
}
s.Cluster.Spec.ManagementCluster = anywherev1.ManagementCluster{Name: "management-cluster"}
s.Cluster.Status.Conditions = tt.conditions
})
clusters.UpdateClusterStatusForCNI(ctx, spec.Cluster)
if tt.wantCondition != nil {
condition := conditions.Get(spec.Cluster, tt.wantCondition.Type)
g.Expect(condition).ToNot(BeNil())
g.Expect(condition.Type).To(Equal(tt.wantCondition.Type))
g.Expect(condition.Severity).To(Equal(tt.wantCondition.Severity))
g.Expect(condition.Status).To(Equal(tt.wantCondition.Status))
g.Expect(condition.Reason).To(Equal(tt.wantCondition.Reason))
g.Expect(condition.Message).To(ContainSubstring(tt.wantCondition.Message))
}
})
}
}
| 781 |
eks-anywhere | aws | Go | package clusters
import (
"context"
"fmt"
"github.com/go-logr/logr"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/controller"
)
// CleanupStatusAfterValidate removes errors from the cluster status. Intended to be used as a reconciler phase
// after all validation phases have been executed.
func CleanupStatusAfterValidate(_ context.Context, _ logr.Logger, spec *cluster.Spec) (controller.Result, error) {
spec.Cluster.Status.FailureMessage = nil
return controller.Result{}, nil
}
// ClusterValidator runs cluster level validations.
type ClusterValidator struct {
client client.Client
}
// NewClusterValidator returns a validator that will run cluster level validations.
func NewClusterValidator(client client.Client) *ClusterValidator {
return &ClusterValidator{
client: client,
}
}
// ValidateManagementClusterName checks if the management cluster specified in the workload cluster spec is valid.
func (v *ClusterValidator) ValidateManagementClusterName(ctx context.Context, log logr.Logger, cluster *anywherev1.Cluster) error {
mgmtCluster := &anywherev1.Cluster{}
mgmtClusterKey := client.ObjectKey{
Namespace: cluster.Namespace,
Name: cluster.Spec.ManagementCluster.Name,
}
if err := v.client.Get(ctx, mgmtClusterKey, mgmtCluster); err != nil {
if apierrors.IsNotFound(err) {
err := fmt.Errorf("unable to retrieve management cluster %v: %v", cluster.Spec.ManagementCluster.Name, err)
log.Error(err, "Invalid cluster configuration")
return err
}
}
if mgmtCluster.IsManaged() {
err := fmt.Errorf("%s is not a valid management cluster", mgmtCluster.Name)
log.Error(err, "Invalid cluster configuration")
return err
}
return nil
}
| 57 |
eks-anywhere | aws | Go | package clusters_test
import (
"context"
"errors"
"testing"
"github.com/go-logr/logr"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/aws/eks-anywhere/internal/test"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/controller/clusters"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
func TestCleanupStatusAfterValidate(t *testing.T) {
g := NewWithT(t)
spec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Status.FailureMessage = ptr.String("invalid cluster")
})
g.Expect(
clusters.CleanupStatusAfterValidate(context.Background(), test.NewNullLogger(), spec),
).To(Equal(controller.Result{}))
g.Expect(spec.Cluster.Status.FailureMessage).To(BeNil())
}
func TestValidateManagementClusterNameSuccess(t *testing.T) {
tt := newClusterValidatorTest(t)
objs := []runtime.Object{tt.cluster, tt.managementCluster}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(objs...).Build()
validator := clusters.NewClusterValidator(cl)
tt.Expect(validator.ValidateManagementClusterName(context.Background(), tt.logger, tt.cluster)).To(BeNil())
}
func TestValidateManagementClusterNameMissing(t *testing.T) {
tt := newClusterValidatorTest(t)
tt.cluster.Spec.ManagementCluster.Name = "missing"
objs := []runtime.Object{tt.cluster, tt.managementCluster}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(objs...).Build()
validator := clusters.NewClusterValidator(cl)
tt.Expect(validator.ValidateManagementClusterName(context.Background(), tt.logger, tt.cluster)).
To(MatchError(errors.New("unable to retrieve management cluster missing: clusters.anywhere.eks.amazonaws.com \"missing\" not found")))
}
func TestValidateManagementClusterNameInvalid(t *testing.T) {
tt := newClusterValidatorTest(t)
tt.managementCluster.SetManagedBy("differentCluster")
objs := []runtime.Object{tt.cluster, tt.managementCluster}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(objs...).Build()
validator := clusters.NewClusterValidator(cl)
tt.Expect(validator.ValidateManagementClusterName(context.Background(), tt.logger, tt.cluster)).
To(MatchError(errors.New("my-management-cluster is not a valid management cluster")))
}
type clusterValidatorTest struct {
*WithT
logger logr.Logger
cluster *anywherev1.Cluster
managementCluster *anywherev1.Cluster
}
func newClusterValidatorTest(t *testing.T) *clusterValidatorTest {
logger := test.NewNullLogger()
managementCluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-management-cluster",
Namespace: "my-namespace",
},
}
cluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "my-namespace",
},
}
cluster.SetManagedBy("my-management-cluster")
return &clusterValidatorTest{
WithT: NewWithT(t),
logger: logger,
cluster: cluster,
managementCluster: managementCluster,
}
}
| 101 |
eks-anywhere | aws | Go | package clusters
import (
"context"
"reflect"
"time"
"github.com/go-logr/logr"
"github.com/pkg/errors"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
kubeadmv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/collection"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/controller/serverside"
)
// Workers represents the CAPI spec for an eks-a cluster's workers.
type Workers struct {
Groups []WorkerGroup
// Other includes any other provider-specific objects that need to be reconciled
// as part of the worker groups.
Other []client.Object
}
// objects returns a list of API objects for a collection of worker groups.
func (w *Workers) objects() []client.Object {
objs := make([]client.Object, 0, len(w.Groups)*3+len(w.Other))
for _, g := range w.Groups {
objs = append(objs, g.objects()...)
}
objs = append(objs, w.Other...)
return objs
}
// WorkerGroup represents the CAPI spec for an eks-a worker group.
type WorkerGroup struct {
KubeadmConfigTemplate *kubeadmv1.KubeadmConfigTemplate
MachineDeployment *clusterv1.MachineDeployment
ProviderMachineTemplate client.Object
}
func (g *WorkerGroup) objects() []client.Object {
objs := []client.Object{g.KubeadmConfigTemplate, g.MachineDeployment}
if !reflect.ValueOf(g.ProviderMachineTemplate).IsNil() {
objs = append(objs, g.ProviderMachineTemplate)
}
return objs
}
// ToWorkers converts the generic clusterapi Workers definition to the concrete one defined
// here. It's just a helper for callers generating workers spec using the clusterapi package.
func ToWorkers[M clusterapi.Object[M]](capiWorkers *clusterapi.Workers[M]) *Workers {
w := &Workers{
Groups: make([]WorkerGroup, 0, len(capiWorkers.Groups)),
}
for _, g := range capiWorkers.Groups {
w.Groups = append(w.Groups, WorkerGroup{
MachineDeployment: g.MachineDeployment,
KubeadmConfigTemplate: g.KubeadmConfigTemplate,
ProviderMachineTemplate: g.ProviderMachineTemplate,
})
}
return w
}
// ReconcileWorkersForEKSA orchestrates the worker node reconciliation logic for a particular EKS-A cluster.
// It takes care of applying all desired objects in the Workers spec and deleting the
// old MachineDeployments that are not in it.
func ReconcileWorkersForEKSA(ctx context.Context, log logr.Logger, c client.Client, cluster *anywherev1.Cluster, w *Workers) (controller.Result, error) {
capiCluster, err := controller.GetCAPICluster(ctx, c, cluster)
if err != nil {
return controller.Result{}, errors.Wrap(err, "reconciling workers for EKS-A cluster")
}
if capiCluster == nil {
// cluster doesn't exist, this might be transient, requeuing
log.Info("CAPI cluster doesn't exist yet, this might be transient if the CP have just been created, requeueing")
return controller.ResultWithRequeue(5 * time.Second), nil
}
return ReconcileWorkers(ctx, c, capiCluster, w)
}
// ReconcileWorkers orchestrates the worker node reconciliation logic.
// It takes care of applying all desired objects in the Workers spec and deleting the
// old MachineDeployments that are not in it.
func ReconcileWorkers(ctx context.Context, c client.Client, cluster *clusterv1.Cluster, w *Workers) (controller.Result, error) {
if err := serverside.ReconcileObjects(ctx, c, w.objects()); err != nil {
return controller.Result{}, errors.Wrap(err, "applying worker nodes CAPI objects")
}
machineDeployments := &clusterv1.MachineDeploymentList{}
if err := c.List(ctx, machineDeployments,
client.MatchingLabels{clusterv1.ClusterNameLabel: cluster.Name},
client.InNamespace(cluster.Namespace)); err != nil {
return controller.Result{}, errors.Wrap(err, "listing current machine deployments")
}
desiredMachineDeploymentNames := collection.MapSet(w.Groups, func(g WorkerGroup) string {
return g.MachineDeployment.Name
})
var allErrs []error
for _, m := range machineDeployments.Items {
if !desiredMachineDeploymentNames.Contains(m.Name) {
if err := c.Delete(ctx, &m); err != nil {
allErrs = append(allErrs, err)
}
}
}
if len(allErrs) > 0 {
aggregate := utilerrors.NewAggregate(allErrs)
return controller.Result{}, errors.Wrap(aggregate, "deleting machine deployments during worker node reconciliation")
}
return controller.Result{}, nil
}
| 131 |
eks-anywhere | aws | Go | package clusters_test
import (
"context"
"testing"
"time"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
dockerv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/internal/test/envtest"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/controller/clusters"
)
func TestReconcileWorkersSuccess(t *testing.T) {
g := NewWithT(t)
c := env.Client()
api := envtest.NewAPIExpecter(t, c)
ctx := context.Background()
ns := env.CreateNamespaceForTest(ctx, t)
w := workers(ns)
cluster := &clusterv1.Cluster{
TypeMeta: metav1.TypeMeta{
APIVersion: "cluster.x-k8s.io/v1beta1",
Kind: "Cluster",
},
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: ns,
},
}
existingMachineDeployment1 := machineDeployment("my-cluster-md-1", ns)
existingMachineDeployment2 := machineDeployment("my-cluster-md-2", ns)
existingMachineDeployment3 := machineDeployment("my-other-cluster-md-1", ns)
existingMachineDeployment3.Labels[clusterv1.ClusterNameLabel] = "my-other-cluster"
envtest.CreateObjs(ctx, t, c,
existingMachineDeployment1,
existingMachineDeployment2,
existingMachineDeployment3,
)
g.Expect(clusters.ReconcileWorkers(ctx, c, cluster, w)).To(Equal(controller.Result{}))
api.ShouldEventuallyExist(ctx, w.Groups[0].MachineDeployment)
api.ShouldEventuallyExist(ctx, w.Groups[0].KubeadmConfigTemplate)
api.ShouldEventuallyExist(ctx, w.Groups[0].ProviderMachineTemplate)
api.ShouldEventuallyExist(ctx, w.Groups[1].MachineDeployment)
api.ShouldEventuallyExist(ctx, w.Groups[1].KubeadmConfigTemplate)
api.ShouldEventuallyExist(ctx, w.Groups[1].ProviderMachineTemplate)
api.ShouldEventuallyNotExist(ctx, existingMachineDeployment1)
api.ShouldEventuallyNotExist(ctx, existingMachineDeployment2)
api.ShouldEventuallyExist(ctx, existingMachineDeployment3)
}
func TestReconcileWorkersErrorApplyingObjects(t *testing.T) {
g := NewWithT(t)
c := env.Client()
ctx := context.Background()
ns := "fake-ns"
// ns doesn't exist, it will fail
w := workers(ns)
cluster := &clusterv1.Cluster{
TypeMeta: metav1.TypeMeta{
APIVersion: "cluster.x-k8s.io/v1beta1",
Kind: "Cluster",
},
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: ns,
},
}
g.Expect(clusters.ReconcileWorkers(ctx, c, cluster, w)).Error().To(
MatchError(ContainSubstring("applying worker nodes CAPI objects")),
)
}
func TestToWorkers(t *testing.T) {
g := NewWithT(t)
namespace := constants.EksaSystemNamespace
w := &clusterapi.Workers[*dockerv1.DockerMachineTemplate]{
Groups: []clusterapi.WorkerGroup[*dockerv1.DockerMachineTemplate]{
{
MachineDeployment: machineDeployment("my-cluster-md-0", namespace),
KubeadmConfigTemplate: kubeadmConfigTemplate("my-cluster-md-0-1", namespace),
ProviderMachineTemplate: dockerMachineTemplate("my-cluster-md-0-1", namespace),
},
{
MachineDeployment: machineDeployment("my-cluster-md-3", namespace),
KubeadmConfigTemplate: kubeadmConfigTemplate("my-cluster-md-3-1", namespace),
ProviderMachineTemplate: dockerMachineTemplate("my-cluster-md-3-1", namespace),
},
},
}
want := &clusters.Workers{
Groups: []clusters.WorkerGroup{
{
MachineDeployment: w.Groups[0].MachineDeployment,
KubeadmConfigTemplate: w.Groups[0].KubeadmConfigTemplate,
ProviderMachineTemplate: w.Groups[0].ProviderMachineTemplate,
},
{
MachineDeployment: w.Groups[1].MachineDeployment,
KubeadmConfigTemplate: w.Groups[1].KubeadmConfigTemplate,
ProviderMachineTemplate: w.Groups[1].ProviderMachineTemplate,
},
},
}
g.Expect(clusters.ToWorkers(w)).To(Equal(want))
}
func workers(namespace string) *clusters.Workers {
return &clusters.Workers{
Groups: []clusters.WorkerGroup{
{
MachineDeployment: machineDeployment("my-cluster-md-0", namespace),
KubeadmConfigTemplate: kubeadmConfigTemplate("my-cluster-md-0-1", namespace),
ProviderMachineTemplate: dockerMachineTemplate("my-cluster-md-0-1", namespace),
},
{
MachineDeployment: machineDeployment("my-cluster-md-3", namespace),
KubeadmConfigTemplate: kubeadmConfigTemplate("my-cluster-md-3-1", namespace),
ProviderMachineTemplate: dockerMachineTemplate("my-cluster-md-3-1", namespace),
},
},
}
}
func machineDeployment(name, namespace string) *clusterv1.MachineDeployment {
return &clusterv1.MachineDeployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "cluster.x-k8s.io/v1beta1",
Kind: "MachineDeployment",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
Labels: map[string]string{
clusterv1.ClusterNameLabel: "my-cluster",
},
},
Spec: clusterv1.MachineDeploymentSpec{
ClusterName: "my-cluster",
Template: clusterv1.MachineTemplateSpec{
Spec: clusterv1.MachineSpec{
ClusterName: "my-cluster",
},
},
},
}
}
func kubeadmConfigTemplate(name, namespace string) *bootstrapv1.KubeadmConfigTemplate {
return &bootstrapv1.KubeadmConfigTemplate{
TypeMeta: metav1.TypeMeta{
APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1",
Kind: "KubeadmConfigTemplate",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
}
}
func dockerMachineTemplate(name, namespace string) *dockerv1.DockerMachineTemplate {
return &dockerv1.DockerMachineTemplate{
TypeMeta: metav1.TypeMeta{
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
Kind: "DockerMachineTemplate",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: name,
},
}
}
func TestReconcileWorkersForEKSAErrorGettingCAPICluster(t *testing.T) {
g := NewWithT(t)
c := fake.NewClientBuilder().WithScheme(runtime.NewScheme()).Build()
ctx := context.Background()
ns := "ns"
w := workers(ns)
cluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: ns,
},
}
g.Expect(
clusters.ReconcileWorkersForEKSA(ctx, env.Manager().GetLogger(), c, cluster, w),
).Error().To(MatchError(ContainSubstring("reconciling workers for EKS-A cluster")))
}
func TestReconcileWorkersForEKSANoCAPICluster(t *testing.T) {
g := NewWithT(t)
c := env.Client()
ctx := context.Background()
ns := "ns"
w := workers(ns)
cluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: ns,
},
}
g.Expect(
clusters.ReconcileWorkersForEKSA(ctx, env.Manager().GetLogger(), c, cluster, w),
).To(Equal(controller.Result{Result: &reconcile.Result{RequeueAfter: 5 * time.Second}}))
}
func TestReconcileWorkersForEKSASuccess(t *testing.T) {
g := NewWithT(t)
c := env.Client()
api := envtest.NewAPIExpecter(t, c)
ctx := context.Background()
ns := env.CreateNamespaceForTest(ctx, t)
w := workers(ns)
capiCluster := &clusterv1.Cluster{
TypeMeta: metav1.TypeMeta{
APIVersion: "cluster.x-k8s.io/v1beta1",
Kind: "Cluster",
},
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: constants.EksaSystemNamespace,
},
}
cluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: ns,
},
}
envtest.CreateObjs(ctx, t, c,
test.Namespace(constants.EksaSystemNamespace),
capiCluster,
)
g.Expect(
clusters.ReconcileWorkersForEKSA(ctx, env.Manager().GetLogger(), c, cluster, w),
).To(Equal(controller.Result{}))
api.ShouldEventuallyExist(ctx, w.Groups[0].MachineDeployment)
api.ShouldEventuallyExist(ctx, w.Groups[0].KubeadmConfigTemplate)
api.ShouldEventuallyExist(ctx, w.Groups[0].ProviderMachineTemplate)
api.ShouldEventuallyExist(ctx, w.Groups[1].MachineDeployment)
api.ShouldEventuallyExist(ctx, w.Groups[1].KubeadmConfigTemplate)
api.ShouldEventuallyExist(ctx, w.Groups[1].ProviderMachineTemplate)
api.DeleteAndWait(ctx, capiCluster)
}
| 272 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/controller/clusters/ipvalidator.go
// Package mocks is a generated GoMock package.
package mocks
import (
reflect "reflect"
v1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
gomock "github.com/golang/mock/gomock"
)
// MockIPUniquenessValidator is a mock of IPUniquenessValidator interface.
type MockIPUniquenessValidator struct {
ctrl *gomock.Controller
recorder *MockIPUniquenessValidatorMockRecorder
}
// MockIPUniquenessValidatorMockRecorder is the mock recorder for MockIPUniquenessValidator.
type MockIPUniquenessValidatorMockRecorder struct {
mock *MockIPUniquenessValidator
}
// NewMockIPUniquenessValidator creates a new mock instance.
func NewMockIPUniquenessValidator(ctrl *gomock.Controller) *MockIPUniquenessValidator {
mock := &MockIPUniquenessValidator{ctrl: ctrl}
mock.recorder = &MockIPUniquenessValidatorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockIPUniquenessValidator) EXPECT() *MockIPUniquenessValidatorMockRecorder {
return m.recorder
}
// ValidateControlPlaneIPUniqueness mocks base method.
func (m *MockIPUniquenessValidator) ValidateControlPlaneIPUniqueness(cluster *v1alpha1.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateControlPlaneIPUniqueness", cluster)
ret0, _ := ret[0].(error)
return ret0
}
// ValidateControlPlaneIPUniqueness indicates an expected call of ValidateControlPlaneIPUniqueness.
func (mr *MockIPUniquenessValidatorMockRecorder) ValidateControlPlaneIPUniqueness(cluster interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateControlPlaneIPUniqueness", reflect.TypeOf((*MockIPUniquenessValidator)(nil).ValidateControlPlaneIPUniqueness), cluster)
}
| 50 |
eks-anywhere | aws | Go | package handlers
import (
"fmt"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/eks-anywhere/pkg/clusterapi"
)
// CAPIObjectToCluster returns a request handler that enqueues an EKS-A Cluster
// reconcile request for CAPI objects that contain the cluster name and namespace labels.
func CAPIObjectToCluster(log logr.Logger) handler.MapFunc {
return func(o client.Object) []reconcile.Request {
labels := o.GetLabels()
clusterName, ok := labels[clusterapi.EKSAClusterLabelName]
if !ok {
// Object not managed by an eks-a Cluster, don't enqueue
log.V(6).Info("Object not managed by an eks-a Cluster, ignoring", "type", fmt.Sprintf("%T", o), "name", o.GetName())
return nil
}
clusterNamespace := labels[clusterapi.EKSAClusterLabelNamespace]
if clusterNamespace == "" {
log.Info("Object managed by an eks-a Cluster but missing cluster namespace", "type", fmt.Sprintf("%T", o), "name", o.GetName())
return nil
}
log.Info("Enqueuing Cluster request coming from CAPI object", "type", fmt.Sprintf("%T", o), "name", o.GetName(), "cluster", clusterName)
return []reconcile.Request{{
NamespacedName: types.NamespacedName{
Namespace: clusterNamespace,
Name: clusterName,
},
}}
}
}
| 42 |
eks-anywhere | aws | Go | package handlers_test
import (
"testing"
"github.com/go-logr/logr"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/controller/handlers"
)
func TestCAPIObjectToCluster(t *testing.T) {
testCases := []struct {
testName string
obj client.Object
wantRequests []reconcile.Request
}{
{
testName: "no eksa managed",
obj: &clusterv1.Cluster{},
wantRequests: nil,
},
{
testName: " missing namespace",
obj: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
clusterapi.EKSAClusterLabelName: "my-cluster",
},
},
},
wantRequests: nil,
},
{
testName: "managed capi resource",
obj: &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
clusterapi.EKSAClusterLabelName: "my-cluster",
clusterapi.EKSAClusterLabelNamespace: "my-namespace",
},
},
},
wantRequests: []reconcile.Request{
{
NamespacedName: types.NamespacedName{
Name: "my-cluster",
Namespace: "my-namespace",
},
},
},
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
g := NewWithT(t)
handle := handlers.CAPIObjectToCluster(logr.New(logf.NullLogSink{}))
requests := handle(tt.obj)
g.Expect(requests).To(Equal(tt.wantRequests))
})
}
}
| 71 |
eks-anywhere | aws | Go | package handlers
import (
"github.com/go-logr/logr"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func ChildObjectToClusters(log logr.Logger) handler.MapFunc {
return func(o client.Object) []reconcile.Request {
requests := []reconcile.Request{}
for _, owner := range o.GetOwnerReferences() {
if owner.Kind == anywherev1.ClusterKind {
requests = append(requests, reconcileRequestForOwnerRef(o, owner))
}
}
if len(requests) == 0 {
log.V(6).Info("Object doesn't contain references to a Cluster", "kind", o.GetObjectKind(), "name", o.GetName())
}
return requests
}
}
func reconcileRequestForOwnerRef(o client.Object, owner metav1.OwnerReference) reconcile.Request {
return reconcile.Request{
NamespacedName: types.NamespacedName{
Name: owner.Name,
Namespace: o.GetNamespace(),
},
}
}
| 39 |
eks-anywhere | aws | Go | package handlers_test
import (
"testing"
"github.com/go-logr/logr"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/controller/handlers"
)
func TestChildObjectToClusters(t *testing.T) {
testCases := []struct {
testName string
obj client.Object
wantRequests []reconcile.Request
}{
{
testName: "two clusters",
obj: &anywherev1.OIDCConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "my-oidc",
Namespace: "my-namespace",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: anywherev1.GroupVersion.String(),
Kind: anywherev1.ClusterKind,
Name: "my-cluster",
},
{
APIVersion: anywherev1.GroupVersion.String(),
Kind: anywherev1.ClusterKind,
Name: "my-other-cluster",
},
},
},
},
wantRequests: []reconcile.Request{
{
NamespacedName: types.NamespacedName{
Name: "my-cluster",
Namespace: "my-namespace",
},
},
{
NamespacedName: types.NamespacedName{
Name: "my-other-cluster",
Namespace: "my-namespace",
},
},
},
},
{
testName: "no-clusters",
obj: &anywherev1.OIDCConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "my-oidc",
Namespace: "my-namespace",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: anywherev1.GroupVersion.String(),
Kind: "OtherObj",
Name: "my-obj",
},
},
},
},
wantRequests: []reconcile.Request{},
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
g := NewWithT(t)
handle := handlers.ChildObjectToClusters(logr.New(logf.NullLogSink{}))
requests := handle(tt.obj)
g.Expect(requests).To(Equal(tt.wantRequests))
})
}
}
| 87 |
eks-anywhere | aws | Go | package serverside
import (
"context"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/controller/clientutil"
)
type ObjectGenerator func() ([]kubernetes.Object, error)
// ObjectApplier helps reconcile kubernetes object using server side apply.
type ObjectApplier struct {
client client.Client
}
// NewObjectApplier builds a ObjectApplier.
func NewObjectApplier(client client.Client) *ObjectApplier {
return &ObjectApplier{
client: client,
}
}
// Apply uses server side apply to reconcile kubernetes objects returned by a generator
// Useful in reconcilers because it simplifies the reconciliation when generating API
// objects from another package, like a provider
// This is mostly a helper for generate objects + serverside apply.
func (a *ObjectApplier) Apply(ctx context.Context, generateObjects ObjectGenerator) (controller.Result, error) {
return controller.Result{}, reconcileKubernetesObjects(ctx, a.client, generateObjects)
}
func reconcileKubernetesObjects(ctx context.Context, client client.Client, generateObjects ObjectGenerator) error {
objs, err := generateObjects()
if err != nil {
return err
}
if err = ReconcileObjects(ctx, client, clientutil.ObjectsToClientObjects(objs)); err != nil {
return err
}
return nil
}
| 47 |
eks-anywhere | aws | Go | package serverside_test
import (
"context"
"errors"
"testing"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/controller/serverside"
)
func TestObjectApplierApplySuccess(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
namespace := env.CreateNamespaceForTest(ctx, t)
generator := generator(namespace)
a := serverside.NewObjectApplier(env.Client())
result, err := a.Apply(ctx, generator)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(result).To(Equal(controller.Result{}))
}
func TestObjectApplierApplyErrorFromGenerator(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
a := serverside.NewObjectApplier(env.Client())
_, err := a.Apply(ctx, func() ([]kubernetes.Object, error) {
return nil, errors.New("failed generating")
})
g.Expect(err).To(MatchError(ContainSubstring("failed generating")))
}
func TestObjectApplierApplyErrorApplying(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
a := serverside.NewObjectApplier(env.Client())
_, err := a.Apply(ctx, func() ([]kubernetes.Object, error) {
// this is an invalid object
return []kubernetes.Object{&corev1.ConfigMap{}}, nil
})
g.Expect(err).To(MatchError(ContainSubstring("resource name may not be empty")))
}
func generator(namespace string) serverside.ObjectGenerator {
return func() ([]kubernetes.Object, error) {
return []kubernetes.Object{
&corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Name: "cm-1",
Namespace: namespace,
},
},
&corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Name: "cm-1",
Namespace: namespace,
},
},
}, nil
}
}
| 78 |
eks-anywhere | aws | Go | package serverside_test
import (
"os"
"testing"
"github.com/aws/eks-anywhere/internal/test/envtest"
)
var env *envtest.Environment
func TestMain(m *testing.M) {
os.Exit(envtest.RunWithEnvironment(m, envtest.WithAssignment(&env)))
}
| 15 |
eks-anywhere | aws | Go | package serverside
import (
"context"
"github.com/pkg/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/eks-anywhere/pkg/controller/clientutil"
)
const fieldManager = "eks-a-controller"
func ReconcileYaml(ctx context.Context, c client.Client, yaml []byte) error {
objs, err := clientutil.YamlToClientObjects(yaml)
if err != nil {
return err
}
return ReconcileObjects(ctx, c, objs)
}
func ReconcileObjects(ctx context.Context, c client.Client, objs []client.Object) error {
for _, o := range objs {
if err := ReconcileObject(ctx, c, o); err != nil {
return err
}
}
return nil
}
func ReconcileObject(ctx context.Context, c client.Client, obj client.Object) error {
// Server side apply
err := c.Patch(ctx, obj, client.Apply, client.FieldOwner(fieldManager), client.ForceOwnership)
if err != nil {
return errors.Wrapf(err, "failed to reconcile object %s, %s/%s", obj.GetObjectKind().GroupVersionKind(), obj.GetNamespace(), obj.GetName())
}
return nil
}
| 42 |
eks-anywhere | aws | Go | package serverside_test
import (
"context"
"strings"
"testing"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterapiv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/eks-anywhere/pkg/controller/serverside"
)
func TestReconcileYaml(t *testing.T) {
cluster1 := newCluster("cluster-1")
cluster2 := newCluster("cluster-2")
tests := []struct {
name string
initialObjs []*clusterapiv1.Cluster
yaml []byte
expectedObjs []*clusterapiv1.Cluster
}{
{
name: "new object",
yaml: []byte(`apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: cluster-1
namespace: #namespace#
spec:
controlPlaneEndpoint:
host: 1.1.1.1
port: 8080`),
expectedObjs: []*clusterapiv1.Cluster{
updatedCluster(cluster1, func(c capiCluster) {
c.Spec.ControlPlaneEndpoint.Port = 8080
c.Spec.ControlPlaneEndpoint.Host = "1.1.1.1"
}),
},
},
{
name: "existing object",
yaml: []byte(`apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: cluster-1
namespace: #namespace#
spec:
paused: true`),
initialObjs: []*clusterapiv1.Cluster{
cluster1.DeepCopy(),
},
expectedObjs: []*clusterapiv1.Cluster{
updatedCluster(cluster1, func(c capiCluster) { c.Spec.Paused = true }),
},
},
{
name: "new and existing object",
yaml: []byte(`apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: cluster-1
namespace: #namespace#
spec:
paused: true
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: cluster-2
namespace: #namespace#
spec:
paused: true`),
initialObjs: []*clusterapiv1.Cluster{
cluster1.DeepCopy(),
},
expectedObjs: []*clusterapiv1.Cluster{
updatedCluster(cluster1, func(c capiCluster) { c.Spec.Paused = true }),
updatedCluster(cluster2, func(c capiCluster) { c.Spec.Paused = true }),
},
},
}
c := env.Client()
reader := env.APIReader()
ctx := context.Background()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
ns := env.CreateNamespaceForTest(ctx, t)
for _, o := range tt.initialObjs {
o.SetNamespace(ns)
if err := c.Create(ctx, o); err != nil {
t.Fatal(err)
}
}
tt.yaml = []byte(strings.ReplaceAll(string(tt.yaml), "#namespace#", ns))
g.Expect(serverside.ReconcileYaml(ctx, c, tt.yaml)).To(Succeed(), "Failed to reconcile with ReconcileYaml()")
for _, o := range tt.expectedObjs {
key := client.ObjectKey{
Namespace: ns,
Name: o.GetName(),
}
cluster := &clusterapiv1.Cluster{}
g.Expect(reader.Get(ctx, key, cluster)).To(Succeed(), "Failed getting obj from cluster")
g.Expect(
equality.Semantic.DeepDerivative(o.Spec, cluster.Spec),
).To(BeTrue(), "Object spec in cluster is not equal to expected object spec:\n Actual:\n%#v\n Expected:\n%#v", cluster.Spec, o.Spec)
}
})
}
}
type capiCluster = *clusterapiv1.Cluster
func newCluster(name string, changes ...func(capiCluster)) *clusterapiv1.Cluster {
c := &clusterapiv1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
APIVersion: clusterapiv1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
}
for _, change := range changes {
change(c)
}
return c
}
func updatedCluster(cluster *clusterapiv1.Cluster, f func(*clusterapiv1.Cluster)) *clusterapiv1.Cluster {
copy := cluster.DeepCopy()
f(copy)
return copy
}
| 150 |
eks-anywhere | aws | Go | package crypto
import (
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"math/big"
"net"
"time"
)
type certificategenerator struct{}
type CertificateGenerator interface {
GenerateIamAuthSelfSignCertKeyPair() ([]byte, []byte, error)
}
func NewCertificateGenerator() CertificateGenerator {
return &certificategenerator{}
}
func (cg *certificategenerator) GenerateIamAuthSelfSignCertKeyPair() ([]byte, []byte, error) {
bitSize := 2048
privateKey, err := cg.generatePrivateKey(bitSize)
if err != nil || privateKey == nil {
return nil, nil, fmt.Errorf("failed to generate private key for self sign cert: %v", err)
}
notBefore, notAfter := cg.getCertLifeTime()
serialNumber, err := cg.generateCertSerialNumber()
if err != nil {
return nil, nil, fmt.Errorf("failed to generate serial number for self sign cert: %v", err)
}
template := cg.generateAwsIamAuthCertTemplate(serialNumber, notBefore, notAfter)
certBytes, err := cg.generateSelfSignCertificate(template, privateKey)
if err != nil {
return nil, nil, fmt.Errorf("failed to generate certificate for self sign cert: %v", err)
}
keyBytes := cg.encodePrivateKey(privateKey)
return cg.encodeToPEM(certBytes, "CERTIFICATE"), cg.encodeToPEM(keyBytes, "RSA PRIVATE KEY"), nil
}
func (cg *certificategenerator) generatePrivateKey(bitSize int) (*rsa.PrivateKey, error) {
// Private Key generation
privateKey, err := rsa.GenerateKey(rand.Reader, bitSize)
if err != nil {
return nil, err
}
// Validate Private Key
err = privateKey.Validate()
if err != nil {
return nil, err
}
return privateKey, nil
}
func (cg *certificategenerator) getCertLifeTime() (time.Time, time.Time) {
// lifetime of the CA certificate
lifeTime := time.Hour * 24 * 365 * 100
lifeTimeStart := time.Now()
lifeTimeEnd := lifeTimeStart.Add(lifeTime)
return lifeTimeStart, lifeTimeEnd
}
func (cg *certificategenerator) generateCertSerialNumber() (*big.Int, error) {
// choose a random 128 bit serial number
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, err
}
return serialNumber, nil
}
func (cg *certificategenerator) generateAwsIamAuthCertTemplate(serialNumber *big.Int, notBefore, notAfter time.Time) x509.Certificate {
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
CommonName: "aws-iam-authenticator",
},
NotBefore: notBefore,
NotAfter: notAfter,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
IsCA: true,
IPAddresses: []net.IP{net.ParseIP("127.0.0.1")},
DNSNames: []string{"localhost"},
}
return template
}
func (cg *certificategenerator) generateSelfSignCertificate(template x509.Certificate, privateKey *rsa.PrivateKey) ([]byte, error) {
certBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey)
if err != nil {
return nil, err
}
return certBytes, nil
}
func (cg *certificategenerator) encodePrivateKey(privateKey *rsa.PrivateKey) []byte {
// ASN.1 DER format
return x509.MarshalPKCS1PrivateKey(privateKey)
}
func (cg *certificategenerator) encodeToPEM(bytes []byte, blockType string) []byte {
block := pem.Block{
Type: blockType,
Headers: nil,
Bytes: bytes,
}
return pem.EncodeToMemory(&block)
}
| 124 |
eks-anywhere | aws | Go | package crypto_test
import (
"testing"
"github.com/aws/eks-anywhere/pkg/crypto"
)
func TestGenerateIamAuthSelfSignCertKeyPairSuccess(t *testing.T) {
certGen := crypto.NewCertificateGenerator()
_, _, err := certGen.GenerateIamAuthSelfSignCertKeyPair()
if err != nil {
t.Fatalf("certificategenerator.GenerateIamAuthSelfSignCertKeyPair()\n error = %v\n wantErr = nil", err)
}
}
| 16 |
eks-anywhere | aws | Go | package crypto
import (
"bytes"
cryptorand "crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"fmt"
"io"
"golang.org/x/crypto/ssh"
"github.com/aws/eks-anywhere/pkg/filewriter"
)
// SshKeysize is the key size used when calling NewSshKeyPair().
const SshKeySize = 4096
// NewSshKeyPair creates an RSA public key pair and writes each part to privateOut and publicOut. The output
// written to privateOut and pulicKeyOut is formatted as ssh-keygen would format keys.
// The private key part is PEM encoded with the key data formatted in PKCS1, ASN.1 DER as typically done by
// the ssh-keygen GNU tool. The public key is formatted as an SSH Authorized Key suitable for storing on servers.
func NewSshKeyPair(privateOut, publicOut io.Writer) error {
private, err := rsa.GenerateKey(cryptorand.Reader, SshKeySize)
if err != nil {
return fmt.Errorf("generate key: %v", err)
}
privateEncoded := pem.EncodeToMemory(&pem.Block{
Type: "RSA PRIVATE KEY",
Headers: nil,
Bytes: x509.MarshalPKCS1PrivateKey(private),
})
public, err := ssh.NewPublicKey(&private.PublicKey)
if err != nil {
return err
}
publicEncoded := ssh.MarshalAuthorizedKey(public)
if _, err := privateOut.Write(privateEncoded); err != nil {
return err
}
if _, err := publicOut.Write(publicEncoded); err != nil {
return err
}
return nil
}
// NewSshKeyPairUsingFileWriter provides a mechanism for generating SSH key pairs and writing them to the writer
// direcftory context. It exists to create compatibility with filewriter.FileWriter and compliment older code.
// The string returned is a path to the private key written to disk using writer.
// The bytes returned are the public key formated as specified in NewSshKeyPair().
func NewSshKeyPairUsingFileWriter(writer filewriter.FileWriter, privateKeyFilename, publicKeyFilename string) (string, []byte, error) {
var private, public bytes.Buffer
if err := NewSshKeyPair(&private, &public); err != nil {
return "", nil, fmt.Errorf("generating key pair: %v", err)
}
privateKeyPath, err := writer.Write(privateKeyFilename, private.Bytes(), filewriter.PersistentFile, filewriter.Permission0600)
if err != nil {
return "", nil, fmt.Errorf("writing private key: %v", err)
}
if _, err := writer.Write(publicKeyFilename, public.Bytes(), filewriter.PersistentFile, filewriter.Permission0600); err != nil {
return "", nil, fmt.Errorf("writing public key: %v", err)
}
return privateKeyPath, public.Bytes(), nil
}
| 75 |
eks-anywhere | aws | Go | package crypto_test
import (
"bytes"
"crypto/x509"
"encoding/pem"
"testing"
"github.com/onsi/gomega"
"golang.org/x/crypto/ssh"
"github.com/aws/eks-anywhere/pkg/crypto"
)
func TestNewSshKeyPair(t *testing.T) {
g := gomega.NewWithT(t)
var priv, pub bytes.Buffer
err := crypto.NewSshKeyPair(&priv, &pub)
g.Expect(err).ToNot(gomega.HaveOccurred())
block, _ := pem.Decode(priv.Bytes())
privKey, err := x509.ParsePKCS1PrivateKey(block.Bytes)
g.Expect(err).ToNot(gomega.HaveOccurred())
pubKey, _, _, _, err := ssh.ParseAuthorizedKey(pub.Bytes())
g.Expect(err).ToNot(gomega.HaveOccurred())
g.Expect(&privKey.PublicKey).To(gomega.BeEquivalentTo(pubKey))
}
| 33 |
eks-anywhere | aws | Go | package crypto
import (
"strings"
)
// This is what we currently support as the default. In the future,
// we can make this customizable and return a wider range of
// supported names.
func secureCipherSuiteNames() []string {
return []string{"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"}
}
func SecureCipherSuitesString() string {
return strings.Join(secureCipherSuiteNames(), ",")
}
| 17 |
eks-anywhere | aws | Go | package crypto_test
import (
"reflect"
"testing"
"github.com/stretchr/testify/assert"
"github.com/aws/eks-anywhere/pkg/crypto"
)
var validCipherSuitesString = "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
func TestSecureCipherSuiteNames(t *testing.T) {
string := crypto.SecureCipherSuitesString()
if !reflect.DeepEqual(string, validCipherSuitesString) {
assert.Equal(t, validCipherSuitesString, string, "cipher suites don't match")
}
}
| 20 |
eks-anywhere | aws | Go | package crypto
import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"fmt"
"net"
)
type DefaultTlsValidator struct{}
type TlsValidator interface {
ValidateCert(host, port, caCertContent string) error
IsSignedByUnknownAuthority(host, port string) (bool, error)
}
func NewTlsValidator() TlsValidator {
return &DefaultTlsValidator{}
}
// IsSignedByUnknownAuthority determines if the url is signed by an unknown authority.
func (tv *DefaultTlsValidator) IsSignedByUnknownAuthority(host, port string) (bool, error) {
conf := &tls.Config{
InsecureSkipVerify: false,
}
_, err := tls.Dial("tcp", net.JoinHostPort(host, port), conf)
if err != nil {
if _, ok := err.(*tls.CertificateVerificationError); ok {
return true, nil
}
return false, err
}
return false, nil
}
// ValidateCert parses the cert, ensures that the cert format is valid and verifies that the cert is valid for the url.
func (tv *DefaultTlsValidator) ValidateCert(host, port, caCertContent string) error {
// Validates that the cert format is valid
block, _ := pem.Decode([]byte(caCertContent))
if block == nil {
return fmt.Errorf("failed to parse certificate PEM")
}
providedCert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return fmt.Errorf("failed to parse certificate: %v", err)
}
roots := x509.NewCertPool()
roots.AddCert(providedCert)
conf := &tls.Config{
InsecureSkipVerify: false,
RootCAs: roots,
}
// Verifies that the cert is valid by making a connection to the endpoint
endpoint := net.JoinHostPort(host, port)
conn, err := tls.Dial("tcp", endpoint, conf)
if err != nil {
return fmt.Errorf("verifying tls connection to host with custom CA: %v", err)
}
if err = conn.Close(); err != nil {
return fmt.Errorf("closing tls connection to %v: %v", endpoint, err)
}
return nil
}
| 67 |
eks-anywhere | aws | Go | package crypto_test
import (
"crypto/tls"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/aws/eks-anywhere/pkg/crypto"
)
const (
endpoint = "127.0.0.1"
invalidEndpoint = "invalid-endpoint.local"
/*
This certificate was generated using the following commands and is valid only for `127.0.0.1`
openssl genrsa -out ca.key 2048
openssl req -new -x509 -sha256 -days 3650 -key ca.key -out ca.crt
openssl req -newkey rsa:2048 -nodes -keyout server.key -out server.csr
openssl x509 -req -extfile <(printf "subjectAltName=IP:127.0.0.1") -sha256 -days 365 -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt
*/
caCert = `
-----BEGIN CERTIFICATE-----
MIIDazCCAlOgAwIBAgIUWn739ioGaXBxeHg8FNAHHfag37IwDQYJKoZIhvcNAQEL
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMjA4MTgyMTM0MTRaFw0zMjA4
MTUyMTM0MTRaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
AQUAA4IBDwAwggEKAoIBAQDhDQ1KESLi3DtHTQllnLZ7wasKgcz6bDF5QmI6hQL2
2CRLF1GWw8xg3qTTDPy0FwEYq+8dAdRqE6Lft/ZzpNtXyFa8iPZdH5egNqxS2rrd
xXKicu5ce4MDj/hmpDsfEKJKKOVl8u0vUUccmcsGaS6bqVrXJvenNbeYOXOKjuIG
Z8jDRx906G//uMsUn+ISfB91aFyHRvYfmRp1aQY1i5qxr0oCMUiG6VOBY9mvYZB+
CQbJVv0Tldmtpx0phGRZycIvAGHkxMvylyepZG3NaiYABJnV5ZtpXEmcHJnXrkeU
seLa1HQt9uyO9phw7jJl6uhmXmNIjSI7E2PacnknyDpnAgMBAAGjUzBRMB0GA1Ud
DgQWBBRcM9kTVIvbh3LriH71BhVQwU5EZDAfBgNVHSMEGDAWgBRcM9kTVIvbh3Lr
iH71BhVQwU5EZDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBa
pRUG9O9oPd2VorOEOiQa/kUHXT8i3zyljOv9TUGaWB7KvyJPy8V0UMCXQdLfuNp9
Jj8nhQpVN0LpDxq0jMkhWC21RdcCQ8uBpIZb87qvUNxBcHRnWHvVFFrb1l6d2TzT
EAdbMOOj8rWHhuq0rGZkhYz7hUUK873YZP9FMuhppiGcapDmfUJpR4956AYtkv8f
rvMLWhytaYxZJQrN2r8uNsklhQytJc9ZjfgGOmHkSvxUPkG6e4bts2leFVBK/g8m
NlyAQFLn7C06paTuNQkjtXypFT1ndHy4+hYewW+Yz9KvpmdmIZ4UqjEspX8vA3Lr
JvkUkvQfzDkQWnyL7D6D
-----END CERTIFICATE-----
`
serverKey = `
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDOAsPAhhGBid9s
fabdjKPkqt8gl9Kp52CXTkgVGR1t0FQy1oYo5vTAOb2gliUTIs3uoxr3T7xf3O6v
Sxu/TjUV+/6G1NNjid0Nohzt37riauwPOIUNQ8eZ8DThhddS23kopQg3iGSO7GVI
jLhD9KqAA8BYa6+AFodnJfS4xC5MUFTSYQYWReI5UPPvMAwb/Qi2MDh42I3Y5m1Y
Nq+07+uCrqt1gFdR6sapB86LTF0/KRO5BBA8LKU1LKDnibS9ydBCThD9rJyYrXTp
IIa8gHZvDpgidHe3Eq3IdKeIgrULIgqP5Gt0u4uTage1AnAKzq2C/wwheSZJ+oKX
aiwnQgljAgMBAAECggEAcfsNm2OSI/3IZBpRTJsXwtjXgwz9PYwK4SwqraGrqqpt
K4ONzuuZ1xEbXdI0yRWkorgTn2m6a7+tP8dqnroCnRtDhA4Utyn76CMdpm7203cd
DH7U/rXVpdJFL9IBhJJxwo8ssK0CFiGtGCrjeJXSD+oDbeiGvXO5jtRtRh0lEIsS
oAbRC2ZM41WlCfUIXHvrPmTk0OuLAYO+q0GQnyWtaAbhYyRtk8XuuM+RuD5hOoQu
yRJJK/F0y2BhJ4jCGVUIPnGuepW5er+Wb5nCK+U05EYRvCbZGHo1rxk1yuev65KT
88k+tbUFvrfuERpNdi8GrjVzFu2XCmjpi9kFomtYoQKBgQD5sC/8kwGIYIZXSrxX
P4IKNsWwAlYwdHaE8AD9F6vw1rJ1DATmMp4pKFnJc2NAVCEfBg5LiVWuDGlJ4Ymg
Sa74rkhRBqMK2d9Oma7n+j5ILfJRANCdvRbdD6d83sal4u/RIhQZAx82YBrcASaE
6iv6S0Ya6SbtdF866Jnc7qyrbQKBgQDTN+0yKdWBSYmvMv7NgSYTsKNqO5jYQlwU
688LXn+7bekcq7F9NTBp406/MxPGIPXfbNTS+Ph0FtmspjHw6YjwzPhNBjq1EiKq
QW9npmO5jeAch3FgfZ4R5EV2/wnl9ZmPQ2qVCtEz71nP1DhU2z7HdRaM4P8uW3BF
Isd86wc2DwKBgCJuDh/o8YQpszykPJZXVoosBVSA7fueg51PLwO3WOlL4a3MK3zG
rBKG0uK5e40qTKrnfd8in+LxKS+b3wtwPaVi+uvZW3AqnOVMwdaRJjdzxn8u+pVV
tqpi9zh7y66iPWl8JoNQb+RimjGOIw6e79OCv7cEQW7q5hrMajMR4lN9AoGBAMgU
hVNsf3xOLefRlb8T5P7n55TdSacqDVIgImvxo2vn7Nek6Kfjt63GjjTebI/VbzOr
Q1tqTuihMKe0c0Bz6K26bEeCbCBUQpQnEiIMYxFFjRNZVhQCSrdGFmtnone8lC86
vH7c1VmuFNSjgo0Xdru4dZkUFYZTReGn1XLGrHkPAoGAHpmv09bLs51SpSSo1w2u
az3O3LMNWXsdeDYRu3KYUMSEkZiowiHSdRSp194OBm4NQ+8qBqpb3e07VxMeS8Ue
oWvhek8oBOpk7yCoi4zB/wi0ceKgjq0t2jJ/KMuiCqgm2EkUbT6MVp1lV0uoT13J
VQ9QJpWnSd6LTuoWuzCbY80=
-----END PRIVATE KEY-----
`
serverCert = `
-----BEGIN CERTIFICATE-----
MIIDKzCCAhOgAwIBAgIUQRi1UwgnimZ++kgEcfbQE5GZ8d8wDQYJKoZIhvcNAQEL
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMjA4MTgyMTM1MDdaFw0yMzA4
MTgyMTM1MDdaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
AQUAA4IBDwAwggEKAoIBAQDOAsPAhhGBid9sfabdjKPkqt8gl9Kp52CXTkgVGR1t
0FQy1oYo5vTAOb2gliUTIs3uoxr3T7xf3O6vSxu/TjUV+/6G1NNjid0Nohzt37ri
auwPOIUNQ8eZ8DThhddS23kopQg3iGSO7GVIjLhD9KqAA8BYa6+AFodnJfS4xC5M
UFTSYQYWReI5UPPvMAwb/Qi2MDh42I3Y5m1YNq+07+uCrqt1gFdR6sapB86LTF0/
KRO5BBA8LKU1LKDnibS9ydBCThD9rJyYrXTpIIa8gHZvDpgidHe3Eq3IdKeIgrUL
IgqP5Gt0u4uTage1AnAKzq2C/wwheSZJ+oKXaiwnQgljAgMBAAGjEzARMA8GA1Ud
EQQIMAaHBH8AAAEwDQYJKoZIhvcNAQELBQADggEBADiqHRze1eQvXbdItJOLppOl
b2YDpXTeoXtyjPVDii1ut29uGoWzuoHjb8XzY1wbKThPz6Pw3iIL26v8i7y8KLzH
LW64pz8CYxchELCuqv+a1K07an82uPnGynrEXz5rP9yOdN3+g1GDGEVdw0ziBDPc
++pGmKe0Wi6V4FOexNSJHOHkIEnxk6rhYi/450grNIkDki3f4saJcT9mB+nMgGl7
F8Wd/nMAlxKt39q4PTaNz+KohZByCJZ9BRx412B6H1hqtrUXv6sdJJrAE8IrPUmM
obFNEP4CAqPBBGeml9PF+9V9sW1HXHd095LerFJFZ0B6bNnwRLA6E9cSzo5RgIY=
-----END CERTIFICATE-----
`
incorrectCert = `
-----BEGIN CERTIFICATE-----
MIID/jCCAuagAwIBAgIUO/ncrEaWxLUqZ8IioBVCRl1P2R4wDQYJKoZIhvcNAQEL
BQAwgagxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApXYXNoaW5ndG9uMRAwDgYDVQQH
DAdTZWF0dGxlMRwwGgYDVQQKDBNBbWF6b24gV2ViIFNlcnZpY2VzMRUwEwYDVQQL
DAxFS1MgQW55d2hlcmUxGDAWBgNVBAMMD3VuaXQtdGVzdC5sb2NhbDEjMCEGCSqG
SIb3DQEJARYUdGVzdEB1bml0LXRlc3QubG9jYWwwHhcNMjExMTE2MjMwNzUzWhcN
MjIxMTE2MjMwNzUzWjCBqDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCldhc2hpbmd0
b24xEDAOBgNVBAcMB1NlYXR0bGUxHDAaBgNVBAoME0FtYXpvbiBXZWIgU2Vydmlj
ZXMxFTATBgNVBAsMDEVLUyBBbnl3aGVyZTEYMBYGA1UEAwwPdW5pdC10ZXN0Lmxv
Y2FsMSMwIQYJKoZIhvcNAQkBFhR0ZXN0QHVuaXQtdGVzdC5sb2NhbDCCASIwDQYJ
KoZIhvcNAQEBBQADggEPADCCAQoCggEBALC+5yZrxn8sy7WilquxsjRjqCzaUoio
i31TlU1lRCI1HhgCjAE5xvMMS1vd1lhXxx7VdOS4b5UO+S+IAOjWWTQDfX7H+hOm
AIAU45ejaUtQDZ7hjdHXyfIOhi5Qb+D4ZLiMAQEe/EHLpB0dxBu+KD0nlBxHKUQY
HT1s41u9J4gOjhB+oVAQZmWvoTt0v5iPrljOfjsHPV4HqDUxPh9ngeL3a7AkMxIG
Nf4nh7hqFKKwGMgifAXG3k4aec/gOIKBEt9Ns43uTn45zKHkL2C4NHTGjFGWlnT8
ixxUW3bXFTI6LjKzllprYimGaMiyMPSOEtXOFV2xHedv39Qaq6yp4/sCAwEAAaMe
MBwwGgYDVR0RBBMwEYIPdW5pdC10ZXN0LmxvY2FsMA0GCSqGSIb3DQEBCwUAA4IB
AQCUHw792stgHCPJ6qYD6ij1ehp4yAdXKdaOykTwOxvPvcjdKxnwBYJ7feU+Wh6B
fauk1tpUp6tEF/FzFXdGoYfMvMJtHeS57dFf/mx9uUgHXnhgIYFDe4bM+4W2LHHC
mbvwOYSLAyPfjhihryCRSmIk2X+JYhTSnTXUwKkacEMn3BiEyTfZG9pzr/aIpsIE
e/rwWa9a4BdrhqTBK6VWtTvNYbRaDVP8cbQPVl0qdIyqbTPI/QrITGchY2Qk/eoS
zwaAnAW1ZiriAbeFx+xOaO1fETVSm+5Poyl97r5Mmu97+3IpoWHFPO2z4Os9vn3q
XsKvL2lz2uQY+ZbrfvrL20p2
-----END CERTIFICATE-----
`
invalidCert = `
-----BEGIN CERTIFICATE-----
invalidCert
-----END CERTIFICATE-----
`
)
func TestIsSignedByUnknownAuthority(t *testing.T) {
certSvr, err := runTestServerWithCert(serverCert, serverKey)
if err != nil {
t.Fatalf("starting test server with certs: %v", err)
}
defer certSvr.Close()
certServerPort := strings.Split(certSvr.URL, ":")[2]
svr, err := runTestServer()
if err != nil {
t.Fatalf("starting test server: %v", err)
}
defer svr.Close()
serverPort := strings.Split(svr.URL, ":")[2]
tests := []struct {
testName string
endpoint string
port string
wantCert bool
wantError bool
}{
{
testName: "valid cert",
endpoint: endpoint,
port: certServerPort,
wantCert: true,
wantError: false,
},
{
testName: "invalid endpoint",
endpoint: invalidEndpoint,
port: serverPort,
wantCert: false,
wantError: true,
},
}
for _, tc := range tests {
t.Run(tc.testName, func(t *testing.T) {
tv := crypto.NewTlsValidator()
hasCert, err := tv.IsSignedByUnknownAuthority(tc.endpoint, tc.port)
if (err != nil) != tc.wantError {
t.Fatalf("IsSignedByUnknownAuthority() error = %v, wantError %v", err, tc.wantError)
}
if hasCert != tc.wantCert {
t.Fatalf("IsSignedByUnknownAuthority() returned %v, want %v", hasCert, tc.wantCert)
}
})
}
}
func TestValidateCert(t *testing.T) {
svr, err := runTestServerWithCert(serverCert, serverKey)
if err != nil {
t.Fatalf("starting test server with certs: %v", err)
}
defer svr.Close()
serverPort := strings.Split(svr.URL, ":")[2]
tests := []struct {
testName string
endpoint string
port string
caCert string
wantError bool
}{
{
testName: "valid cert",
endpoint: endpoint,
port: serverPort,
caCert: caCert,
wantError: false,
},
{
testName: "invalid endpoint",
endpoint: invalidEndpoint,
port: serverPort,
caCert: caCert,
wantError: true,
},
{
testName: "incorrect cert",
endpoint: endpoint,
port: serverPort,
caCert: incorrectCert,
wantError: true,
},
{
testName: "invalid cert format",
endpoint: endpoint,
port: serverPort,
caCert: invalidCert,
wantError: true,
},
}
for _, tc := range tests {
t.Run(tc.testName, func(t *testing.T) {
tv := crypto.NewTlsValidator()
err := tv.ValidateCert(tc.endpoint, tc.port, tc.caCert)
if (err != nil) != tc.wantError {
t.Fatalf("ValidateCert() error = %v, wantError %v", err, tc.wantError)
}
})
}
}
func runTestServerWithCert(serverCert, serverKey string) (*httptest.Server, error) {
mux := http.NewServeMux()
svr := httptest.NewUnstartedServer(mux)
certificate, err := tls.X509KeyPair([]byte(serverCert), []byte(serverKey))
if err != nil {
return nil, fmt.Errorf("creating key pair: %v", err)
}
svr.TLS = &tls.Config{
Certificates: []tls.Certificate{certificate},
}
svr.StartTLS()
return svr, nil
}
func runTestServer() (*httptest.Server, error) {
mux := http.NewServeMux()
svr := httptest.NewServer(mux)
return svr, nil
}
| 253 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/crypto/certificategen.go
// Package mocks is a generated GoMock package.
package mocks
import (
reflect "reflect"
gomock "github.com/golang/mock/gomock"
)
// MockCertificateGenerator is a mock of CertificateGenerator interface.
type MockCertificateGenerator struct {
ctrl *gomock.Controller
recorder *MockCertificateGeneratorMockRecorder
}
// MockCertificateGeneratorMockRecorder is the mock recorder for MockCertificateGenerator.
type MockCertificateGeneratorMockRecorder struct {
mock *MockCertificateGenerator
}
// NewMockCertificateGenerator creates a new mock instance.
func NewMockCertificateGenerator(ctrl *gomock.Controller) *MockCertificateGenerator {
mock := &MockCertificateGenerator{ctrl: ctrl}
mock.recorder = &MockCertificateGeneratorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockCertificateGenerator) EXPECT() *MockCertificateGeneratorMockRecorder {
return m.recorder
}
// GenerateIamAuthSelfSignCertKeyPair mocks base method.
func (m *MockCertificateGenerator) GenerateIamAuthSelfSignCertKeyPair() ([]byte, []byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GenerateIamAuthSelfSignCertKeyPair")
ret0, _ := ret[0].([]byte)
ret1, _ := ret[1].([]byte)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// GenerateIamAuthSelfSignCertKeyPair indicates an expected call of GenerateIamAuthSelfSignCertKeyPair.
func (mr *MockCertificateGeneratorMockRecorder) GenerateIamAuthSelfSignCertKeyPair() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateIamAuthSelfSignCertKeyPair", reflect.TypeOf((*MockCertificateGenerator)(nil).GenerateIamAuthSelfSignCertKeyPair))
}
| 51 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/crypto/validator.go
// Package mocks is a generated GoMock package.
package mocks
import (
reflect "reflect"
gomock "github.com/golang/mock/gomock"
)
// MockTlsValidator is a mock of TlsValidator interface.
type MockTlsValidator struct {
ctrl *gomock.Controller
recorder *MockTlsValidatorMockRecorder
}
// MockTlsValidatorMockRecorder is the mock recorder for MockTlsValidator.
type MockTlsValidatorMockRecorder struct {
mock *MockTlsValidator
}
// NewMockTlsValidator creates a new mock instance.
func NewMockTlsValidator(ctrl *gomock.Controller) *MockTlsValidator {
mock := &MockTlsValidator{ctrl: ctrl}
mock.recorder = &MockTlsValidatorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockTlsValidator) EXPECT() *MockTlsValidatorMockRecorder {
return m.recorder
}
// IsSignedByUnknownAuthority mocks base method.
func (m *MockTlsValidator) IsSignedByUnknownAuthority(host, port string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IsSignedByUnknownAuthority", host, port)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// IsSignedByUnknownAuthority indicates an expected call of IsSignedByUnknownAuthority.
func (mr *MockTlsValidatorMockRecorder) IsSignedByUnknownAuthority(host, port interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsSignedByUnknownAuthority", reflect.TypeOf((*MockTlsValidator)(nil).IsSignedByUnknownAuthority), host, port)
}
// ValidateCert mocks base method.
func (m *MockTlsValidator) ValidateCert(host, port, caCertContent string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateCert", host, port, caCertContent)
ret0, _ := ret[0].(error)
return ret0
}
// ValidateCert indicates an expected call of ValidateCert.
func (mr *MockTlsValidatorMockRecorder) ValidateCert(host, port, caCertContent interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateCert", reflect.TypeOf((*MockTlsValidator)(nil).ValidateCert), host, port, caCertContent)
}
| 64 |
eks-anywhere | aws | Go | package curatedpackages
import (
"context"
"encoding/json"
"fmt"
"strings"
"sigs.k8s.io/yaml"
packagesv1 "github.com/aws/eks-anywhere-packages/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/semver"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
const (
ImageRepositoryName = "eks-anywhere-packages-bundles"
)
type Reader interface {
ReadBundlesForVersion(eksaVersion string) (*releasev1.Bundles, error)
}
type BundleRegistry interface {
GetRegistryBaseRef(ctx context.Context) (string, error)
}
type BundleReader struct {
kubeConfig string
clusterName string
kubectl KubectlRunner
bundleManager Manager
registry BundleRegistry
}
func NewBundleReader(kubeConfig string, clusterName string, k KubectlRunner, bm Manager, reg BundleRegistry) *BundleReader {
return &BundleReader{
kubeConfig: kubeConfig,
clusterName: clusterName,
kubectl: k,
bundleManager: bm,
registry: reg,
}
}
func (b *BundleReader) GetLatestBundle(ctx context.Context, kubeVersion string) (*packagesv1.PackageBundle, error) {
if len(kubeVersion) > 0 {
return b.getLatestBundleFromRegistry(ctx, kubeVersion)
}
return b.getActiveBundleFromCluster(ctx)
}
func (b *BundleReader) getLatestBundleFromRegistry(ctx context.Context, kubeVersion string) (*packagesv1.PackageBundle, error) {
registryBaseRef, err := b.registry.GetRegistryBaseRef(ctx)
if err != nil {
return nil, err
}
kubeSemVer, err := semver.New(kubeVersion + ".0")
if err != nil {
return nil, err
}
return b.bundleManager.LatestBundle(ctx, registryBaseRef, fmt.Sprintf("%d", kubeSemVer.Major), fmt.Sprintf("%d", kubeSemVer.Minor), "")
}
func (b *BundleReader) getActiveBundleFromCluster(ctx context.Context) (*packagesv1.PackageBundle, error) {
// Active BundleReader is set at the bundle Controller
bundleController, err := b.GetActiveController(ctx)
if err != nil {
return nil, err
}
bundle, err := b.getPackageBundle(ctx, bundleController.Spec.ActiveBundle)
if err != nil {
return nil, err
}
return bundle, nil
}
func (b *BundleReader) getPackageBundle(ctx context.Context, bundleName string) (*packagesv1.PackageBundle, error) {
params := []string{"get", "packageBundle", "-o", "json", "--kubeconfig", b.kubeConfig, "--namespace", constants.EksaPackagesName, bundleName}
if bundleName == "" {
return nil, fmt.Errorf("no bundle name specified")
}
stdOut, err := b.kubectl.ExecuteCommand(ctx, params...)
if err != nil {
return nil, err
}
obj := &packagesv1.PackageBundle{}
if err := json.Unmarshal(stdOut.Bytes(), obj); err != nil {
return nil, fmt.Errorf("unmarshaling package bundle: %w", err)
}
return obj, nil
}
func (b *BundleReader) GetActiveController(ctx context.Context) (*packagesv1.PackageBundleController, error) {
params := []string{"get", "packageBundleController", "-o", "json", "--kubeconfig", b.kubeConfig, "--namespace", constants.EksaPackagesName, b.clusterName}
stdOut, err := b.kubectl.ExecuteCommand(ctx, params...)
if err != nil {
return nil, err
}
obj := &packagesv1.PackageBundleController{}
if err := json.Unmarshal(stdOut.Bytes(), obj); err != nil {
return nil, fmt.Errorf("unmarshaling active package bundle controller: %w", err)
}
return obj, nil
}
func (b *BundleReader) UpgradeBundle(ctx context.Context, controller *packagesv1.PackageBundleController, newBundleVersion string) error {
controller.Spec.ActiveBundle = newBundleVersion
controllerYaml, err := yaml.Marshal(controller)
if err != nil {
return err
}
params := []string{"apply", "-f", "-", "--kubeconfig", b.kubeConfig}
stdOut, err := b.kubectl.ExecuteFromYaml(ctx, controllerYaml, params...)
if err != nil {
return err
}
fmt.Print(&stdOut)
return nil
}
func GetPackageBundleRef(vb releasev1.VersionsBundle) (string, error) {
packageController := vb.PackageController
// Use package controller registry to fetch packageBundles.
// Format of controller image is: <uri>/<env_type>/<repository_name>
controllerImage := strings.Split(packageController.Controller.Image(), "/")
major, minor, err := parseKubeVersion(vb.KubeVersion)
if err != nil {
logger.MarkFail("unable to parse kubeversion", "error", err)
return "", fmt.Errorf("unable to parse kubeversion %s %v", vb.KubeVersion, err)
}
latestBundle := fmt.Sprintf("v%s-%s-%s", major, minor, "latest")
registryBaseRef := fmt.Sprintf("%s/%s/%s:%s", controllerImage[0], controllerImage[1], "eks-anywhere-packages-bundles", latestBundle)
return registryBaseRef, nil
}
| 140 |
eks-anywhere | aws | Go | package curatedpackages
import (
"context"
packagesv1 "github.com/aws/eks-anywhere-packages/api/v1alpha1"
)
type Manager interface {
LatestBundle(ctx context.Context, baseRef string, kubeMajor string, kubeMinor string, clusterName string) (*packagesv1.PackageBundle, error)
}
| 12 |
eks-anywhere | aws | Go | package curatedpackages_test
import (
"bytes"
"context"
"encoding/json"
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"sigs.k8s.io/yaml"
packagesv1 "github.com/aws/eks-anywhere-packages/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
"github.com/aws/eks-anywhere/pkg/curatedpackages/mocks"
"github.com/aws/eks-anywhere/pkg/version"
)
type bundleTest struct {
*WithT
ctx context.Context
kubeConfig string
kubeVersion string
kubeMajor string
kubeMinor string
cluster string
kubectl *mocks.MockKubectlRunner
bundleManager *mocks.MockManager
Command *curatedpackages.BundleReader
activeBundle string
bundleCtrl *packagesv1.PackageBundleController
packageBundle *packagesv1.PackageBundle
registry *mocks.MockBundleRegistry
cliVersion version.Info
}
func newBundleTest(t *testing.T) *bundleTest {
ctrl := gomock.NewController(t)
k := mocks.NewMockKubectlRunner(ctrl)
bm := mocks.NewMockManager(ctrl)
kubeConfig := "test.kubeconfig"
kubeVersion := "1.21"
kubeMajor := "1"
kubeMinor := "21"
cluster := "billy"
registry := mocks.NewMockBundleRegistry(ctrl)
activeBundle := "v1.21-1000"
cliVersion := version.Info{GitVersion: "v1.0.0"}
bundleCtrl := packagesv1.PackageBundleController{
Spec: packagesv1.PackageBundleControllerSpec{
ActiveBundle: activeBundle,
},
}
packageBundle := packagesv1.PackageBundle{
Spec: packagesv1.PackageBundleSpec{
Packages: []packagesv1.BundlePackage{
{
Name: "harbor",
},
},
},
}
return &bundleTest{
WithT: NewWithT(t),
ctx: context.Background(),
kubeConfig: kubeConfig,
kubeVersion: kubeVersion,
kubeMajor: kubeMajor,
kubeMinor: kubeMinor,
cluster: cluster,
kubectl: k,
bundleManager: bm,
bundleCtrl: &bundleCtrl,
packageBundle: &packageBundle,
activeBundle: activeBundle,
registry: registry,
cliVersion: cliVersion,
}
}
func TestGetLatestBundleFromClusterSucceeds(t *testing.T) {
tt := newBundleTest(t)
tt.kubectl.EXPECT().ExecuteCommand(tt.ctx, gomock.Any()).Return(convertJsonToBytes(tt.bundleCtrl), nil)
tt.kubectl.EXPECT().ExecuteCommand(tt.ctx, gomock.Any()).Return(convertJsonToBytes(tt.packageBundle), nil)
tt.Command = curatedpackages.NewBundleReader(tt.kubeConfig, tt.cluster, tt.kubectl, tt.bundleManager, tt.registry)
result, err := tt.Command.GetLatestBundle(tt.ctx, "")
tt.Expect(err).To(BeNil())
tt.Expect(result.Spec.Packages[0].Name).To(BeEquivalentTo(tt.packageBundle.Spec.Packages[0].Name))
}
func TestGetLatestBundleFromClusterFailsNoBundleName(t *testing.T) {
tt := newBundleTest(t)
noActiveBundle := tt.bundleCtrl
noActiveBundle.Spec.ActiveBundle = ""
tt.kubectl.EXPECT().ExecuteCommand(tt.ctx, gomock.Any()).Return(convertJsonToBytes(noActiveBundle), nil)
tt.Command = curatedpackages.NewBundleReader(tt.kubeConfig, tt.cluster, tt.kubectl, tt.bundleManager, tt.registry)
result, err := tt.Command.GetLatestBundle(tt.ctx, "")
tt.Expect(err).To(MatchError(ContainSubstring("no bundle name specified")))
tt.Expect(result).To(BeNil())
}
func TestGetLatestBundleFromRegistrySucceeds(t *testing.T) {
tt := newBundleTest(t)
baseRef := "test_host/test_env/test_controller"
tt.registry.EXPECT().GetRegistryBaseRef(tt.ctx).Return(baseRef, nil)
tt.bundleManager.EXPECT().LatestBundle(tt.ctx, baseRef, tt.kubeMajor, tt.kubeMinor, "").Return(tt.packageBundle, nil)
tt.Command = curatedpackages.NewBundleReader(tt.kubeConfig, "", tt.kubectl, tt.bundleManager, tt.registry)
result, err := tt.Command.GetLatestBundle(tt.ctx, tt.kubeVersion)
tt.Expect(err).To(BeNil())
tt.Expect(result.Spec.Packages[0].Name).To(BeEquivalentTo(tt.packageBundle.Spec.Packages[0].Name))
}
func TestLatestBundleFromClusterUnknownBundle(t *testing.T) {
tt := newBundleTest(t)
tt.kubectl.EXPECT().ExecuteCommand(tt.ctx, gomock.Any()).Return(convertJsonToBytes(tt.bundleCtrl), nil)
tt.kubectl.EXPECT().ExecuteCommand(tt.ctx, gomock.Any()).Return(bytes.Buffer{}, errors.New("error reading bundle"))
tt.Command = curatedpackages.NewBundleReader(tt.kubeConfig, tt.cluster, tt.kubectl, tt.bundleManager, tt.registry)
_, err := tt.Command.GetLatestBundle(tt.ctx, "")
tt.Expect(err).To(MatchError(ContainSubstring("error reading bundle")))
}
func TestGetLatestBundleFromRegistryWhenError(t *testing.T) {
tt := newBundleTest(t)
tt.registry.EXPECT().GetRegistryBaseRef(tt.ctx).Return("", errors.New("registry doesn't exist"))
tt.Command = curatedpackages.NewBundleReader(tt.kubeConfig, "", tt.kubectl, tt.bundleManager, tt.registry)
_, err := tt.Command.GetLatestBundle(tt.ctx, tt.kubeVersion)
tt.Expect(err).To(MatchError(ContainSubstring("registry doesn't exist")))
}
func TestLatestBundleFromClusterUnknownCtrl(t *testing.T) {
tt := newBundleTest(t)
tt.kubectl.EXPECT().ExecuteCommand(tt.ctx, gomock.Any()).Return(bytes.Buffer{}, errors.New("error fetching controller"))
tt.Command = curatedpackages.NewBundleReader(tt.kubeConfig, tt.cluster, tt.kubectl, tt.bundleManager, tt.registry)
_, err := tt.Command.GetLatestBundle(tt.ctx, "")
tt.Expect(err).To(MatchError(ContainSubstring("error fetching controller")))
}
func TestUpgradeBundleSucceeds(t *testing.T) {
tt := newBundleTest(t)
params := []string{"apply", "-f", "-", "--kubeconfig", tt.kubeConfig}
newBundle := "new-bundle"
expectedCtrl := packagesv1.PackageBundleController{
Spec: packagesv1.PackageBundleControllerSpec{
ActiveBundle: newBundle,
},
}
ctrl, err := yaml.Marshal(expectedCtrl)
tt.Expect(err).To(BeNil())
tt.kubectl.EXPECT().ExecuteFromYaml(tt.ctx, ctrl, params).Return(bytes.Buffer{}, nil)
tt.Command = curatedpackages.NewBundleReader(tt.kubeConfig, tt.cluster, tt.kubectl, tt.bundleManager, tt.registry)
err = tt.Command.UpgradeBundle(tt.ctx, tt.bundleCtrl, newBundle)
tt.Expect(err).To(BeNil())
tt.Expect(tt.bundleCtrl.Spec.ActiveBundle).To(Equal(newBundle))
}
func TestUpgradeBundleFails(t *testing.T) {
tt := newBundleTest(t)
params := []string{"apply", "-f", "-", "--kubeconfig", tt.kubeConfig}
newBundle := "new-bundle"
expectedCtrl := packagesv1.PackageBundleController{
Spec: packagesv1.PackageBundleControllerSpec{
ActiveBundle: newBundle,
},
}
ctrl, err := yaml.Marshal(expectedCtrl)
tt.Expect(err).To(BeNil())
tt.kubectl.EXPECT().ExecuteFromYaml(tt.ctx, ctrl, params).Return(bytes.Buffer{}, errors.New("unable to apply yaml"))
tt.Command = curatedpackages.NewBundleReader(tt.kubeConfig, tt.cluster, tt.kubectl, tt.bundleManager, tt.registry)
err = tt.Command.UpgradeBundle(tt.ctx, tt.bundleCtrl, newBundle)
tt.Expect(err).NotTo(BeNil())
}
func convertJsonToBytes(obj interface{}) bytes.Buffer {
b, _ := json.Marshal(obj)
return *bytes.NewBuffer(b)
}
| 185 |
eks-anywhere | aws | Go | package curatedpackages
import (
"fmt"
"strconv"
"strings"
"sigs.k8s.io/yaml"
)
func GenerateAllValidConfigurations(configs map[string]string) (string, error) {
data := map[string]interface{}{}
for key, val := range configs {
if val != "" {
keySegments := strings.Split(key, ".")
parse(data, keySegments, 0, val)
}
}
out, err := yaml.Marshal(data)
if err != nil {
return "", fmt.Errorf("failed to marshal configurations %v", data)
}
return string(out), nil
}
func parse(data map[string]interface{}, keySegments []string, index int, val string) {
if index >= len(keySegments) {
return
}
key := keySegments[index]
inner := map[string]interface{}{}
if _, ok := data[key]; ok {
inner = data[key].(map[string]interface{})
}
parse(inner, keySegments, index+1, val)
if len(inner) == 0 {
if bVal, err := strconv.ParseBool(val); err == nil {
data[key] = bVal
} else {
data[key] = val
}
} else {
data[key] = inner
}
}
func ParseConfigurations(configs []string) (map[string]string, error) {
parsedConfigurations := make(map[string]string)
for _, c := range configs {
keyval := strings.Split(c, "=")
if len(keyval) < 2 {
return nil, fmt.Errorf("please specify %s as key=value", c)
}
key, val := keyval[0], keyval[1]
parsedConfigurations[key] = val
}
return parsedConfigurations, nil
}
| 60 |
eks-anywhere | aws | Go | package curatedpackages_test
import (
"fmt"
"testing"
. "github.com/onsi/gomega"
packagesv1 "github.com/aws/eks-anywhere-packages/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
)
type configurationTest struct {
*WithT
invalidbp *packagesv1.BundlePackage
configs map[string]string
}
func newConfigurationTest(t *testing.T) *configurationTest {
invalidbp := &packagesv1.BundlePackage{
Source: packagesv1.BundlePackageSource{
Versions: []packagesv1.SourceVersion{},
},
}
config := map[string]string{
"expose.tls.auto.commonName": "localhost",
"expose.tls.enabled": "false",
"sourceRegistry": "localhost:8080",
"title": "",
"subtitle": "",
}
return &configurationTest{
WithT: NewWithT(t),
configs: config,
invalidbp: invalidbp,
}
}
func TestGenerateAllValidConfigurationsSuccess(t *testing.T) {
tt := newConfigurationTest(t)
output, err := curatedpackages.GenerateAllValidConfigurations(tt.configs)
tt.Expect(err).To(BeNil())
expectedOutput := fmt.Sprintf(
"%s:\n %s:\n %s:\n %s: %s\n %s: %s\n%s: %s\n",
"expose", "tls", "auto", "commonName", "localhost", "enabled", "false",
"sourceRegistry", "localhost:8080",
)
tt.Expect(output).To(Equal(expectedOutput))
}
func TestParseConfigurationsSuccess(t *testing.T) {
tt := newConfigurationTest(t)
configs := []string{"registry=localhost:8080"}
parsedConfigs, err := curatedpackages.ParseConfigurations(configs)
tt.Expect(err).To(BeNil())
tt.Expect(len(parsedConfigs)).To(Equal(1))
}
func TestParseConfigurationsFail(t *testing.T) {
tt := newConfigurationTest(t)
configs := []string{"registry"}
parsedConfigs, err := curatedpackages.ParseConfigurations(configs)
tt.Expect(err).NotTo(BeNil())
tt.Expect(len(parsedConfigs)).To(Equal(0))
}
| 75 |
eks-anywhere | aws | Go | package curatedpackages
import (
"fmt"
"io"
"strings"
"text/tabwriter"
)
// cpTabwriter is a modified tabwriter for curated packages CLI duty.
type cpTabwriter struct {
*tabwriter.Writer
}
// newCPTabwriter instantiates a curated packages custom tabwriter.
//
// If customParams is nil, cpTabwriterDefaultParams will be used. The caller
// should call Flush just as they would with an unmodified tabwriter.Writer.
func newCPTabwriter(w io.Writer, customParams *cpTabwriterParams) *cpTabwriter {
if customParams == nil {
customParams = cpTabwriterDefaultParams()
}
tw := tabwriter.NewWriter(w, customParams.minWidth, customParams.tabWidth,
customParams.padding, customParams.padChar, customParams.flags)
return &cpTabwriter{Writer: tw}
}
// writeTable from a 2-D slice of strings, joining every string with tabs.
//
// Tab characters and newlines will be added to the end of each rank.
func (w *cpTabwriter) writeTable(lines [][]string) error {
var err error
for _, line := range lines {
joined := strings.Join(line, "\t")
// A final "\t" is added, as tabwriter is tab-terminated, not the more
// common tab-separated. See https://pkg.go.dev/text/tabwriter#Writer
// for details. There are cases where one might not want this trailing
// tab, but it hasn't come up yet, and is easily worked around when
// the time comes.
if !strings.HasSuffix(joined, "\t") {
joined += "\t"
}
_, err = fmt.Fprintln(w, joined)
if err != nil {
return err
}
}
return nil
}
// cpTabwriterParams makes it easier to reuse common tabwriter parameters.
//
// See https://pkg.go.dev/text/tabwriter#Writer.Init for details.
type cpTabwriterParams struct {
// minWidth is the minimal cell width including any padding
minWidth int
// tabWidth width of tab characters (equivalent number of spaces)
tabWidth int
// padding added to a cell before computing its width
padding int
// padChar ASCII char used for padding
padChar byte
// flags formatting control
flags uint
}
// cpTabwriterDefaultParams is just a convenience when making tabwriters.
//
// Its implemented as a function to make it harder to override the defaults
// accidentally.
func cpTabwriterDefaultParams() *cpTabwriterParams {
return &cpTabwriterParams{
minWidth: 16, tabWidth: 8, padding: 0, padChar: '\t', flags: 0,
}
}
| 77 |
eks-anywhere | aws | Go | package curatedpackages
import (
"bytes"
"fmt"
"testing"
)
func TestCPTabwriterDefaultParams(t *testing.T) {
buf := &bytes.Buffer{}
w := newCPTabwriter(buf, nil)
baseBuf := &bytes.Buffer{}
baseline := newCPTabwriter(baseBuf, nil)
fmt.Fprint(baseline, "one\ta\t\ntwo\tb\t\nthree\tc\t\n")
baseline.Flush()
err := w.writeTable([][]string{{"one", "a"}, {"two", "b"}, {"three", "c"}})
if err != nil {
t.Fatalf("expected nil, got %v", err)
}
w.Flush()
if baseBuf.String() != buf.String() {
t.Fatalf("expected %q, got %q", baseBuf.String(), buf.String())
}
}
func TestCPTabwriterCustomPadChar(t *testing.T) {
buf := &bytes.Buffer{}
params := cpTabwriterDefaultParams()
params.padChar = '='
w := newCPTabwriter(buf, params)
baseBuf := &bytes.Buffer{}
baseline := newCPTabwriter(baseBuf, params)
fmt.Fprint(baseline, "one\ta\t\ntwo\tb\t\nthree\tc\t\n")
baseline.Flush()
err := w.writeTable([][]string{{"one", "a"}, {"two", "b"}, {"three", "c"}})
if err != nil {
t.Fatalf("expected nil, got %v", err)
}
w.Flush()
if baseBuf.String() != buf.String() {
t.Fatalf("expected %q, got %q", baseBuf.String(), buf.String())
}
}
| 50 |
eks-anywhere | aws | Go | package curatedpackages
import (
"bytes"
"context"
"fmt"
"strings"
"github.com/go-logr/logr"
"oras.land/oras-go/pkg/content"
"oras.land/oras-go/pkg/oras"
"github.com/aws/eks-anywhere-packages/pkg/artifacts"
"github.com/aws/eks-anywhere-packages/pkg/bundle"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/types"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
const (
license = `The Amazon EKS Anywhere Curated Packages are only available to customers with the
Amazon EKS Anywhere Enterprise Subscription`
width = 86
)
var userMsgSeparator = strings.Repeat("-", width)
// CreateBundleManager builds a new bundle Manager.
func CreateBundleManager(log logr.Logger) bundle.RegistryClient {
puller := artifacts.NewRegistryPuller(log)
return bundle.NewRegistryClient(puller)
}
func parseKubeVersion(kubeVersion string) (string, string, error) {
versionSplit := strings.Split(kubeVersion, ".")
if len(versionSplit) != 2 {
return "", "", fmt.Errorf("invalid kubeversion %s", kubeVersion)
}
major, minor := versionSplit[0], versionSplit[1]
return major, minor, nil
}
func GetVersionBundle(reader Reader, eksaVersion string, spec *v1alpha1.Cluster) (*releasev1.VersionsBundle, error) {
b, err := reader.ReadBundlesForVersion(eksaVersion)
if err != nil {
return nil, err
}
versionsBundle, err := cluster.GetVersionsBundle(spec, b)
if err != nil {
return nil, err
}
return versionsBundle, nil
}
func PrintLicense() {
// Currently, use the width of the longest line to repeat the dashes
// Sample Output
//-------------------------------------------------------------------------------------
//The Amazon EKS Anywhere Curated Packages are only available to customers with the
//Amazon EKS Anywhere Enterprise Subscription
//-------------------------------------------------------------------------------------
fmt.Println(userMsgSeparator)
fmt.Println(license)
fmt.Println(userMsgSeparator)
}
// PullLatestBundle reads the contents of the artifact using the latest bundle.
func PullLatestBundle(ctx context.Context, log logr.Logger, artifact string) ([]byte, error) {
puller := artifacts.NewRegistryPuller(log)
data, err := puller.Pull(ctx, artifact, "")
if err != nil {
return nil, fmt.Errorf("unable to pull artifacts %v", err)
}
if len(bytes.TrimSpace(data)) == 0 {
return nil, fmt.Errorf("latest package bundle artifact is empty")
}
return data, nil
}
func PushBundle(ctx context.Context, ref, fileName string, fileContent []byte) error {
registry, err := content.NewRegistry(content.RegistryOptions{Insecure: ctx.Value(types.InsecureRegistry).(bool)})
if err != nil {
return fmt.Errorf("creating registry: %w", err)
}
memoryStore := content.NewMemory()
desc, err := memoryStore.Add("bundle.yaml", "", fileContent)
if err != nil {
return err
}
manifest, manifestDesc, config, configDesc, err := content.GenerateManifestAndConfig(nil, nil, desc)
if err != nil {
return err
}
memoryStore.Set(configDesc, config)
err = memoryStore.StoreManifest(ref, manifestDesc, manifest)
if err != nil {
return err
}
logger.Info(fmt.Sprintf("Pushing %s to %s...", fileName, ref))
desc, err = oras.Copy(ctx, memoryStore, ref, registry, "")
if err != nil {
return err
}
logger.Info(fmt.Sprintf("Pushed to %s with digest %s", ref, desc.Digest))
return nil
}
func GetRegistry(uri string) string {
lastInd := strings.LastIndex(uri, "/")
if lastInd == -1 {
return uri
}
return uri[:lastInd]
}
| 121 |
eks-anywhere | aws | Go | package curatedpackages_test
import (
_ "embed"
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
"github.com/aws/eks-anywhere/pkg/curatedpackages/mocks"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
func TestCreateBundleManagerWhenValidKubeVersion(t *testing.T) {
bm := curatedpackages.CreateBundleManager(test.NewNullLogger())
if bm == nil {
t.Errorf("Bundle Manager should be successful when valid kubeversion")
}
}
func TestGetVersionBundleSuccess(t *testing.T) {
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
eksaVersion := "v1.0.0"
kubeVersion := "1.21"
bundles := &releasev1.Bundles{
Spec: releasev1.BundlesSpec{
VersionsBundles: []releasev1.VersionsBundle{
{
PackageController: releasev1.PackageBundle{
Controller: releasev1.Image{
URI: "test_host/test_env/test_repository:test-version",
},
},
KubeVersion: kubeVersion,
},
},
},
}
clusterSpec := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
KubernetesVersion: v1alpha1.Kube121,
},
}
reader.EXPECT().ReadBundlesForVersion(eksaVersion).Return(bundles, nil)
_, err := curatedpackages.GetVersionBundle(reader, eksaVersion, clusterSpec)
if err != nil {
t.Errorf("GetVersionBundle Should Pass When bundle exists")
}
}
func TestGetVersionBundleFail(t *testing.T) {
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
eksaVersion := "v1.0.0"
reader.EXPECT().ReadBundlesForVersion(eksaVersion).Return(nil, errors.New("failed to read bundles"))
clusterSpec := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
KubernetesVersion: v1alpha1.Kube121,
},
}
_, err := curatedpackages.GetVersionBundle(reader, eksaVersion, clusterSpec)
if err == nil {
t.Errorf("GetVersionBundle should fail when no bundles exist")
}
}
func TestGetVersionBundleFailsWhenBundleNil(t *testing.T) {
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
eksaVersion := "v1.0.0"
bundles := &releasev1.Bundles{
Spec: releasev1.BundlesSpec{
VersionsBundles: []releasev1.VersionsBundle{
{
PackageController: releasev1.PackageBundle{
Controller: releasev1.Image{
URI: "test_host/test_env/test_repository:test-version",
},
},
KubeVersion: "1.22",
},
},
},
}
clusterSpec := &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
KubernetesVersion: v1alpha1.Kube121,
},
}
reader.EXPECT().ReadBundlesForVersion(eksaVersion).Return(bundles, nil)
_, err := curatedpackages.GetVersionBundle(reader, eksaVersion, clusterSpec)
if err == nil {
t.Errorf("GetVersionBundle should fail when version bundle for kubeversion doesn't exist")
}
}
func TestGetRegistrySuccess(t *testing.T) {
g := NewWithT(t)
uri := "public.ecr.aws/l0g8r8j6/eks-anywhere-packages"
registry := curatedpackages.GetRegistry(uri)
expected := "public.ecr.aws/l0g8r8j6"
g.Expect(registry).To(Equal(expected))
}
func TestGetRegistryFail(t *testing.T) {
g := NewWithT(t)
uri := "public.ecr.aws"
registry := curatedpackages.GetRegistry(uri)
expected := "public.ecr.aws"
g.Expect(registry).To(Equal(expected))
}
| 125 |
eks-anywhere | aws | Go | package curatedpackages
import (
"context"
"fmt"
"github.com/aws/eks-anywhere/pkg/executables"
)
type CustomRegistry struct {
*executables.Helm
registry string
}
func NewCustomRegistry(helm *executables.Helm, registry string) *CustomRegistry {
return &CustomRegistry{
Helm: helm,
registry: registry,
}
}
func (cm *CustomRegistry) GetRegistryBaseRef(ctx context.Context) (string, error) {
return fmt.Sprintf("%s/%s", cm.registry, ImageRepositoryName), nil
}
| 25 |
eks-anywhere | aws | Go | package curatedpackages
import (
"context"
"fmt"
"github.com/aws/eks-anywhere/pkg/manifests/bundles"
"github.com/aws/eks-anywhere/pkg/version"
)
type DefaultRegistry struct {
releaseManifestReader Reader
kubeVersion string
cliVersion version.Info
}
func NewDefaultRegistry(rmr Reader, kv string, cv version.Info) *DefaultRegistry {
return &DefaultRegistry{
releaseManifestReader: rmr,
kubeVersion: kv,
cliVersion: cv,
}
}
func (dr *DefaultRegistry) GetRegistryBaseRef(ctx context.Context) (string, error) {
release, err := dr.releaseManifestReader.ReadBundlesForVersion(dr.cliVersion.GitVersion)
if err != nil {
return "", fmt.Errorf("unable to parse the release manifest %v", err)
}
versionsBundle := bundles.VersionsBundleForKubernetesVersion(release, dr.kubeVersion)
if versionsBundle == nil {
return "", fmt.Errorf("kubernetes version %s is not supported by bundles manifest %d", dr.kubeVersion, release.Spec.Number)
}
packageController := versionsBundle.PackageController
// Use package controller registry to fetch packageBundles.
// Format of controller image is: <uri>/<env_type>/<repository_name>
registry := GetRegistry(packageController.Controller.Image())
registryBaseRef := fmt.Sprintf("%s/%s", registry, ImageRepositoryName)
return registryBaseRef, nil
}
| 42 |
eks-anywhere | aws | Go | package curatedpackages_test
import (
"context"
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
"github.com/aws/eks-anywhere/pkg/curatedpackages/mocks"
"github.com/aws/eks-anywhere/pkg/version"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type defaultRegistryTest struct {
*WithT
ctx context.Context
releaseManifest *mocks.MockReader
KubeVersion string
CliVersion version.Info
Command *curatedpackages.DefaultRegistry
bundles *releasev1.Bundles
}
func newDefaultRegistryTest(t *testing.T) *defaultRegistryTest {
ctrl := gomock.NewController(t)
reader := mocks.NewMockReader(ctrl)
kubeVersion := "1.21"
return &defaultRegistryTest{
WithT: NewWithT(t),
ctx: context.Background(),
releaseManifest: reader,
bundles: &releasev1.Bundles{
Spec: releasev1.BundlesSpec{
VersionsBundles: []releasev1.VersionsBundle{
{
PackageController: releasev1.PackageBundle{
Controller: releasev1.Image{
URI: "test_host/test_env/test_repository:test-version",
},
},
KubeVersion: kubeVersion,
},
},
},
},
}
}
func TestDefaultRegistrySucceeds(t *testing.T) {
tt := newDefaultRegistryTest(t)
tt.releaseManifest.EXPECT().ReadBundlesForVersion("v1.0.0").Return(tt.bundles, nil)
tt.Command = curatedpackages.NewDefaultRegistry(
tt.releaseManifest,
"1.21",
version.Info{GitVersion: "v1.0.0"},
)
result, err := tt.Command.GetRegistryBaseRef(tt.ctx)
tt.Expect(err).To(BeNil())
tt.Expect(result).To(BeEquivalentTo("test_host/test_env/" + curatedpackages.ImageRepositoryName))
}
func TestDefaultRegistryUnknownKubeVersionFails(t *testing.T) {
tt := newDefaultRegistryTest(t)
tt.releaseManifest.EXPECT().ReadBundlesForVersion("v1.0.0").Return(tt.bundles, nil)
tt.Command = curatedpackages.NewDefaultRegistry(
tt.releaseManifest,
"1.22",
version.Info{GitVersion: "v1.0.0"},
)
_, err := tt.Command.GetRegistryBaseRef(tt.ctx)
tt.Expect(err).To(MatchError(ContainSubstring("is not supported by bundles manifest")))
}
func TestDefaultRegistryUnknownGitVersion(t *testing.T) {
tt := newDefaultRegistryTest(t)
tt.releaseManifest.EXPECT().ReadBundlesForVersion("v1.0.0").Return(nil, errors.New("unknown git version"))
tt.Command = curatedpackages.NewDefaultRegistry(
tt.releaseManifest,
"1.21",
version.Info{GitVersion: "v1.0.0"},
)
_, err := tt.Command.GetRegistryBaseRef(tt.ctx)
tt.Expect(err).To(MatchError(ContainSubstring("unable to parse the release manifest")))
}
| 88 |
eks-anywhere | aws | Go | package curatedpackages
import (
"k8s.io/apimachinery/pkg/version"
)
// Discovery
/**
Implements ServerVersionInterface to provide the Kubernetes client version to be used.
*/
type Discovery struct {
kubeVersion *KubeVersion
}
type KubeVersion struct {
major string
minor string
}
func NewDiscovery(kubeVersion *KubeVersion) *Discovery {
return &Discovery{
kubeVersion: kubeVersion,
}
}
func NewKubeVersion(major string, minor string) *KubeVersion {
return &KubeVersion{
major: major,
minor: minor,
}
}
func (d *Discovery) ServerVersion() (*version.Info, error) {
v := &version.Info{
Major: d.kubeVersion.major,
Minor: d.kubeVersion.minor,
}
return v, nil
}
| 40 |
eks-anywhere | aws | Go | package curatedpackages_test
import (
"testing"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
)
func TestServerVersionSucceeds(t *testing.T) {
kubeVersion := curatedpackages.NewKubeVersion("1", "21")
discovery := curatedpackages.NewDiscovery(kubeVersion)
_, err := discovery.ServerVersion()
if err != nil {
t.Errorf("Server Version should succeed when valid kubernetes version is provided")
}
}
| 18 |
eks-anywhere | aws | Go | package curatedpackages
import (
"bytes"
"context"
"k8s.io/apimachinery/pkg/runtime"
)
type KubectlRunner interface {
ExecuteCommand(ctx context.Context, opts ...string) (bytes.Buffer, error)
ExecuteFromYaml(ctx context.Context, yaml []byte, opts ...string) (bytes.Buffer, error)
// GetObject performs a GET call to the kube API server authenticating with a kubeconfig file
// and unmarshalls the response into the provdied Object
// If the object is not found, it returns an error implementing apimachinery errors.APIStatus
GetObject(ctx context.Context, resourceType, name, namespece, kubeconfig string, obj runtime.Object) error
// HasResource is true if the resource can be retrieved from the API and has length > 0.
HasResource(ctx context.Context, resourceType string, name string, kubeconfig string, namespace string) (bool, error)
}
| 20 |
eks-anywhere | aws | Go | package curatedpackages
import (
"context"
"errors"
"fmt"
"io"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
packagesv1 "github.com/aws/eks-anywhere-packages/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/templater"
)
const (
CustomName = "generated-"
kind = "Package"
)
type PackageClientOpt func(*PackageClient)
type PackageClient struct {
bundle *packagesv1.PackageBundle
customPackages []string
kubectl KubectlRunner
customConfigs []string
}
func NewPackageClient(kubectl KubectlRunner, options ...PackageClientOpt) *PackageClient {
pc := &PackageClient{
kubectl: kubectl,
}
for _, o := range options {
o(pc)
}
return pc
}
// sourceWithVersions is a wrapper to help get package versions.
//
// This should be pushed upstream to eks-anywhere-packages, then this
// implementation can be removed.
type sourceWithVersions packagesv1.BundlePackageSource
func (s sourceWithVersions) VersionsSlice() []string {
versions := []string{}
for _, ver := range packagesv1.BundlePackageSource(s).Versions {
versions = append(versions, ver.Name)
}
return versions
}
// DisplayPackages pretty-prints a table of available packages.
func (pc *PackageClient) DisplayPackages(w io.Writer) error {
lines := append([][]string{}, packagesHeaderLines...)
for _, pkg := range pc.bundle.Spec.Packages {
versions := sourceWithVersions(pkg.Source).VersionsSlice()
lines = append(lines, []string{pkg.Name, strings.Join(versions, ", ")})
}
tw := newCPTabwriter(w, nil)
defer tw.Flush()
return tw.writeTable(lines)
}
// packagesHeaderLines pretties-up a table of curated packages info.
var packagesHeaderLines = [][]string{
{"Package", "Version(s)"},
{"-------", "----------"},
}
func (pc *PackageClient) GeneratePackages(clusterName string) ([]packagesv1.Package, error) {
packageMap := pc.packageMap()
var packages []packagesv1.Package
for _, p := range pc.customPackages {
bundlePackage, found := packageMap[strings.ToLower(p)]
if !found {
return nil, fmt.Errorf("unknown package %q", p)
}
name := CustomName + strings.ToLower(bundlePackage.Name)
packages = append(packages, convertBundlePackageToPackage(bundlePackage, name, clusterName, pc.bundle.APIVersion, ""))
}
return packages, nil
}
func (pc *PackageClient) WritePackagesToStdOut(packages []packagesv1.Package) error {
var output [][]byte
for _, p := range packages {
displayPackage := NewDisplayablePackage(&p)
content, err := yaml.Marshal(displayPackage)
if err != nil {
return fmt.Errorf("unable to parse package %s %v", p.Name, err)
}
output = append(output, content)
}
fmt.Println(string(templater.AppendYamlResources(output...)))
return nil
}
func (pc *PackageClient) GetPackageFromBundle(packageName string) (*packagesv1.BundlePackage, error) {
packageMap := pc.packageMap()
p, ok := packageMap[strings.ToLower(packageName)]
if !ok {
return nil, fmt.Errorf("package %s not found", packageName)
}
return &p, nil
}
func (pc *PackageClient) packageMap() map[string]packagesv1.BundlePackage {
pMap := make(map[string]packagesv1.BundlePackage)
for _, p := range pc.bundle.Spec.Packages {
pMap[strings.ToLower(p.Name)] = p
}
return pMap
}
func (pc *PackageClient) InstallPackage(ctx context.Context, bp *packagesv1.BundlePackage, customName string, clusterName string, kubeConfig string) error {
configString, err := pc.getInstallConfigurations()
if err != nil {
return err
}
p := convertBundlePackageToPackage(*bp, customName, clusterName, pc.bundle.APIVersion, configString)
displayPackage := NewDisplayablePackage(&p)
params := []string{"create", "-f", "-", "--kubeconfig", kubeConfig}
packageYaml, err := yaml.Marshal(displayPackage)
if err != nil {
return err
}
stdOut, err := pc.kubectl.ExecuteFromYaml(ctx, packageYaml, params...)
if err != nil {
return err
}
fmt.Print(&stdOut)
return nil
}
func (pc *PackageClient) getInstallConfigurations() (string, error) {
installConfigs, err := ParseConfigurations(pc.customConfigs)
if err != nil {
return "", err
}
return GenerateAllValidConfigurations(installConfigs)
}
func (pc *PackageClient) ApplyPackages(ctx context.Context, fileName string, kubeConfig string) error {
params := []string{"apply", "-f", fileName, "--kubeconfig", kubeConfig}
stdOut, err := pc.kubectl.ExecuteCommand(ctx, params...)
if err != nil {
fmt.Print(&stdOut)
return err
}
fmt.Print(&stdOut)
return nil
}
func (pc *PackageClient) CreatePackages(ctx context.Context, fileName string, kubeConfig string) error {
params := []string{"create", "-f", fileName, "--kubeconfig", kubeConfig}
stdOut, err := pc.kubectl.ExecuteCommand(ctx, params...)
if err != nil {
fmt.Print(&stdOut)
return err
}
fmt.Print(&stdOut)
return nil
}
func (pc *PackageClient) DeletePackages(ctx context.Context, packages []string, kubeConfig string, clusterName string) error {
params := []string{"delete", "packages", "--kubeconfig", kubeConfig, "--namespace", constants.EksaPackagesName + "-" + clusterName}
params = append(params, packages...)
stdOut, err := pc.kubectl.ExecuteCommand(ctx, params...)
if err != nil {
fmt.Print(&stdOut)
return err
}
fmt.Print(&stdOut)
return nil
}
func (pc *PackageClient) DescribePackages(ctx context.Context, packages []string, kubeConfig string, clusterName string) error {
params := []string{"describe", "packages", "--kubeconfig", kubeConfig, "--namespace", constants.EksaPackagesName + "-" + clusterName}
params = append(params, packages...)
stdOut, err := pc.kubectl.ExecuteCommand(ctx, params...)
if err != nil {
fmt.Print(&stdOut)
return fmt.Errorf("kubectl execution failure: \n%v", err)
}
if len(stdOut.Bytes()) == 0 {
return errors.New("no resources found")
}
fmt.Print(&stdOut)
return nil
}
func convertBundlePackageToPackage(bp packagesv1.BundlePackage, name string, clusterName string, apiVersion string, config string) packagesv1.Package {
p := packagesv1.Package{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: constants.EksaPackagesName + "-" + clusterName,
},
TypeMeta: metav1.TypeMeta{
Kind: kind,
APIVersion: apiVersion,
},
Spec: packagesv1.PackageSpec{
PackageName: bp.Name,
Config: config,
},
}
return p
}
func WithBundle(bundle *packagesv1.PackageBundle) func(*PackageClient) {
return func(config *PackageClient) {
config.bundle = bundle
}
}
func WithCustomPackages(customPackages []string) func(*PackageClient) {
return func(config *PackageClient) {
config.customPackages = customPackages
}
}
func WithCustomConfigs(customConfigs []string) func(*PackageClient) {
return func(config *PackageClient) {
config.customConfigs = customConfigs
}
}
| 233 |
eks-anywhere | aws | Go | package curatedpackages_test
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
packagesv1 "github.com/aws/eks-anywhere-packages/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
"github.com/aws/eks-anywhere/pkg/curatedpackages/mocks"
)
type packageTest struct {
*WithT
ctx context.Context
kubectl *mocks.MockKubectlRunner
bundle *packagesv1.PackageBundle
command *curatedpackages.PackageClient
kubeConfig string
}
func newPackageTest(t *testing.T) *packageTest {
ctrl := gomock.NewController(t)
k := mocks.NewMockKubectlRunner(ctrl)
return &packageTest{
WithT: NewWithT(t),
ctx: context.Background(),
bundle: &packagesv1.PackageBundle{
Spec: packagesv1.PackageBundleSpec{
Packages: []packagesv1.BundlePackage{
{
Name: "harbor-test",
Source: packagesv1.BundlePackageSource{
Versions: []packagesv1.SourceVersion{
{Name: "0.0.1"},
{Name: "0.0.2"},
},
},
},
{
Name: "redis-test",
Source: packagesv1.BundlePackageSource{
Versions: []packagesv1.SourceVersion{
{Name: "0.0.3"},
{Name: "0.0.4"},
},
},
},
},
},
},
kubectl: k,
kubeConfig: "kubeconfig.kubeconfig",
}
}
func TestGeneratePackagesSucceed(t *testing.T) {
tt := newPackageTest(t)
packages := []string{"harbor-test"}
tt.command = curatedpackages.NewPackageClient(tt.kubectl, curatedpackages.WithBundle(tt.bundle), curatedpackages.WithCustomPackages(packages))
result, err := tt.command.GeneratePackages("billy")
tt.Expect(err).To(BeNil())
tt.Expect(result[0].Name).To(Equal(curatedpackages.CustomName + packages[0]))
}
func TestGeneratePackagesFail(t *testing.T) {
tt := newPackageTest(t)
packages := []string{"unknown-package"}
tt.command = curatedpackages.NewPackageClient(tt.kubectl, curatedpackages.WithBundle(tt.bundle), curatedpackages.WithCustomPackages(packages))
result, err := tt.command.GeneratePackages("billy")
tt.Expect(err).NotTo(BeNil())
tt.Expect(result).To(BeNil())
}
func TestGetPackageFromBundleSucceeds(t *testing.T) {
tt := newPackageTest(t)
packages := []string{"harbor-test"}
tt.command = curatedpackages.NewPackageClient(tt.kubectl, curatedpackages.WithBundle(tt.bundle), curatedpackages.WithCustomPackages(packages))
result, err := tt.command.GetPackageFromBundle(packages[0])
tt.Expect(err).To(BeNil())
tt.Expect(result.Name).To(Equal(packages[0]))
}
func TestGetPackageFromBundleFails(t *testing.T) {
tt := newPackageTest(t)
packages := []string{"harbor-test"}
tt.command = curatedpackages.NewPackageClient(tt.kubectl, curatedpackages.WithBundle(tt.bundle), curatedpackages.WithCustomPackages(packages))
result, err := tt.command.GetPackageFromBundle("nonexisting")
tt.Expect(err).NotTo(BeNil())
tt.Expect(result).To(BeNil())
}
func TestInstallPackagesSucceeds(t *testing.T) {
tt := newPackageTest(t)
tt.kubectl.EXPECT().ExecuteFromYaml(tt.ctx, gomock.Any(), gomock.Any()).Return(convertJsonToBytes(tt.bundle.Spec.Packages[0]), nil)
packages := []string{"harbor-test"}
tt.command = curatedpackages.NewPackageClient(tt.kubectl, curatedpackages.WithBundle(tt.bundle), curatedpackages.WithCustomPackages(packages))
// Suppress output temporarily since it is not needed for testing
temp := os.Stdout
os.Stdout = nil // turn it off
err := tt.command.InstallPackage(tt.ctx, &tt.bundle.Spec.Packages[0], "my-harbor", "billy", "")
os.Stdout = temp // restore it
tt.Expect(err).To(BeNil())
}
func TestInstallPackagesFails(t *testing.T) {
tt := newPackageTest(t)
tt.kubectl.EXPECT().ExecuteFromYaml(tt.ctx, gomock.Any(), gomock.Any()).Return(bytes.Buffer{}, errors.New("error installing package. Package exists"))
packages := []string{"harbor-test"}
tt.command = curatedpackages.NewPackageClient(tt.kubectl, curatedpackages.WithBundle(tt.bundle), curatedpackages.WithCustomPackages(packages))
err := tt.command.InstallPackage(tt.ctx, &tt.bundle.Spec.Packages[0], "my-harbor", "billy", "")
tt.Expect(err).To(MatchError(ContainSubstring("error installing package. Package exists")))
}
func TestInstallPackagesFailsWhenInvalidConfigs(t *testing.T) {
tt := newPackageTest(t)
packages := []string{"harbor-test"}
customConfigs := []string{"test"}
tt.command = curatedpackages.NewPackageClient(tt.kubectl, curatedpackages.WithBundle(tt.bundle), curatedpackages.WithCustomPackages(packages), curatedpackages.WithCustomConfigs(customConfigs))
err := tt.command.InstallPackage(tt.ctx, &tt.bundle.Spec.Packages[0], "my-harbor", "billy", "")
tt.Expect(err).NotTo(BeNil())
}
func TestApplyPackagesPass(t *testing.T) {
tt := newPackageTest(t)
fileName := "test_file.yaml"
params := []string{"apply", "-f", fileName, "--kubeconfig", tt.kubeConfig}
tt.kubectl.EXPECT().ExecuteCommand(tt.ctx, params).Return(convertJsonToBytes(tt.bundle.Spec.Packages[0]), nil)
packages := []string{"harbor-test"}
tt.command = curatedpackages.NewPackageClient(tt.kubectl, curatedpackages.WithBundle(tt.bundle), curatedpackages.WithCustomPackages(packages))
err := tt.command.ApplyPackages(tt.ctx, fileName, tt.kubeConfig)
tt.Expect(err).To(BeNil())
fmt.Println()
}
func TestApplyPackagesFail(t *testing.T) {
tt := newPackageTest(t)
fileName := "non_existing.yaml"
params := []string{"apply", "-f", fileName, "--kubeconfig", tt.kubeConfig}
tt.kubectl.EXPECT().ExecuteCommand(tt.ctx, params).Return(bytes.Buffer{}, errors.New("file doesn't exist"))
packages := []string{"harbor-test"}
tt.command = curatedpackages.NewPackageClient(tt.kubectl, curatedpackages.WithBundle(tt.bundle), curatedpackages.WithCustomPackages(packages))
err := tt.command.ApplyPackages(tt.ctx, fileName, tt.kubeConfig)
tt.Expect(err).To(MatchError(ContainSubstring("file doesn't exist")))
}
func TestCreatePackagesPass(t *testing.T) {
tt := newPackageTest(t)
fileName := "test_file.yaml"
params := []string{"create", "-f", fileName, "--kubeconfig", tt.kubeConfig}
tt.kubectl.EXPECT().ExecuteCommand(tt.ctx, params).Return(convertJsonToBytes(tt.bundle.Spec.Packages[0]), nil)
packages := []string{"harbor-test"}
tt.command = curatedpackages.NewPackageClient(tt.kubectl, curatedpackages.WithBundle(tt.bundle), curatedpackages.WithCustomPackages(packages))
err := tt.command.CreatePackages(tt.ctx, fileName, tt.kubeConfig)
fmt.Println()
tt.Expect(err).To(BeNil())
}
func TestCreatePackagesFail(t *testing.T) {
tt := newPackageTest(t)
fileName := "non_existing.yaml"
params := []string{"create", "-f", fileName, "--kubeconfig", tt.kubeConfig}
tt.kubectl.EXPECT().ExecuteCommand(tt.ctx, params).Return(bytes.Buffer{}, errors.New("file doesn't exist"))
packages := []string{"harbor-test"}
tt.command = curatedpackages.NewPackageClient(tt.kubectl, curatedpackages.WithBundle(tt.bundle), curatedpackages.WithCustomPackages(packages))
err := tt.command.CreatePackages(tt.ctx, fileName, tt.kubeConfig)
tt.Expect(err).To(MatchError(ContainSubstring("file doesn't exist")))
}
func TestDeletePackagesPass(t *testing.T) {
tt := newPackageTest(t)
packages := []string{"harbor-test"}
args := []string{"harbor-test"}
params := []string{"delete", "packages", "--kubeconfig", tt.kubeConfig, "--namespace", constants.EksaPackagesName + "-susie"}
params = append(params, args...)
tt.kubectl.EXPECT().ExecuteCommand(tt.ctx, params).Return(convertJsonToBytes(tt.bundle.Spec.Packages[0]), nil)
tt.command = curatedpackages.NewPackageClient(tt.kubectl, curatedpackages.WithBundle(tt.bundle), curatedpackages.WithCustomPackages(packages))
err := tt.command.DeletePackages(tt.ctx, args, tt.kubeConfig, "susie")
fmt.Println()
tt.Expect(err).To(BeNil())
}
func TestDeletePackagesFail(t *testing.T) {
tt := newPackageTest(t)
packages := []string{"harbor-test"}
args := []string{"non-working-package"}
params := []string{"delete", "packages", "--kubeconfig", tt.kubeConfig, "--namespace", constants.EksaPackagesName + "-susie"}
params = append(params, args...)
tt.kubectl.EXPECT().ExecuteCommand(tt.ctx, params).Return(bytes.Buffer{}, errors.New("package doesn't exist"))
tt.command = curatedpackages.NewPackageClient(tt.kubectl, curatedpackages.WithBundle(tt.bundle), curatedpackages.WithCustomPackages(packages))
err := tt.command.DeletePackages(tt.ctx, args, tt.kubeConfig, "susie")
tt.Expect(err).To(MatchError(ContainSubstring("package doesn't exist")))
}
func TestDescribePackagesPass(t *testing.T) {
tt := newPackageTest(t)
packages := []string{"harbor-test"}
args := []string{"harbor-test"}
params := []string{"describe", "packages", "--kubeconfig", tt.kubeConfig, "--namespace", constants.EksaPackagesName + "-susie"}
params = append(params, args...)
tt.kubectl.EXPECT().ExecuteCommand(tt.ctx, params).Return(convertJsonToBytes(tt.bundle.Spec.Packages[0]), nil)
tt.command = curatedpackages.NewPackageClient(tt.kubectl, curatedpackages.WithBundle(tt.bundle), curatedpackages.WithCustomPackages(packages))
err := tt.command.DescribePackages(tt.ctx, args, tt.kubeConfig, "susie")
fmt.Println()
tt.Expect(err).To(BeNil())
}
func TestDescribePackagesFail(t *testing.T) {
tt := newPackageTest(t)
packages := []string{"harbor-test"}
args := []string{"non-working-package"}
params := []string{"describe", "packages", "--kubeconfig", tt.kubeConfig, "--namespace", constants.EksaPackagesName + "-susie"}
params = append(params, args...)
tt.kubectl.EXPECT().ExecuteCommand(tt.ctx, params).Return(bytes.Buffer{}, errors.New("package doesn't exist"))
tt.command = curatedpackages.NewPackageClient(tt.kubectl, curatedpackages.WithBundle(tt.bundle), curatedpackages.WithCustomPackages(packages))
err := tt.command.DescribePackages(tt.ctx, args, tt.kubeConfig, "susie")
tt.Expect(err).To(MatchError(ContainSubstring("package doesn't exist")))
}
func TestDescribePackagesWhenEmptyResources(t *testing.T) {
tt := newPackageTest(t)
packages := []string{"harbor-test"}
var args []string
params := []string{"describe", "packages", "--kubeconfig", tt.kubeConfig, "--namespace", constants.EksaPackagesName + "-susie"}
params = append(params, args...)
tt.kubectl.EXPECT().ExecuteCommand(tt.ctx, params).Return(bytes.Buffer{}, nil)
tt.command = curatedpackages.NewPackageClient(tt.kubectl, curatedpackages.WithBundle(tt.bundle), curatedpackages.WithCustomPackages(packages))
err := tt.command.DescribePackages(tt.ctx, args, tt.kubeConfig, "susie")
tt.Expect(err).To(MatchError(ContainSubstring("no resources found")))
}
func TestDisplayPackages(t *testing.T) {
tt := newPackageTest(t)
bundle := curatedpackages.WithBundle(tt.bundle)
pc := curatedpackages.NewPackageClient(nil, bundle)
buf := &bytes.Buffer{}
err := pc.DisplayPackages(buf)
tt.Expect(err).To(BeNil())
// The expected string needs to have whitespace at the end of the strings,
// which some editors will remove by default, so it's probably best to use
// this format, even though it's a little harder to read for humans.
expected := "Package\t\tVersion(s)\t\n-------\t\t----------\t\nharbor-test\t0.0.1, 0.0.2\t\nredis-test\t0.0.3, 0.0.4\t\n"
tt.Expect(buf.String()).To(Equal(expected))
}
| 273 |
eks-anywhere | aws | Go | package curatedpackages
import (
"context"
_ "embed"
"encoding/base64"
"fmt"
"strconv"
"strings"
"sync"
"time"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
packagesv1 "github.com/aws/eks-anywhere-packages/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/registrymirror"
"github.com/aws/eks-anywhere/pkg/templater"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
//go:embed config/secrets.yaml
var secretsValueYaml string
const (
eksaDefaultRegion = "us-west-2"
valueFileName = "values.yaml"
)
type PackageControllerClientOpt func(client *PackageControllerClient)
type PackageControllerClient struct {
kubeConfig string
chart *releasev1.Image
// chartManager installs and deletes helm charts.
chartManager ChartManager
clusterName string
clusterSpec *v1alpha1.ClusterSpec
managementClusterName string
kubectl KubectlRunner
eksaAccessKeyID string
eksaSecretAccessKey string
eksaRegion string
httpProxy string
httpsProxy string
noProxy []string
registryMirror *registrymirror.RegistryMirror
// activeBundleTimeout is the timeout to activate a bundle on installation.
activeBundleTimeout time.Duration
valuesFileWriter filewriter.FileWriter
// skipWaitForPackageBundle indicates whether the installer should wait
// until a package bundle is activated.
//
// Skipping the wait is desirable for full cluster lifecycle use cases,
// where resource creation and error reporting are asynchronous in nature.
skipWaitForPackageBundle bool
// tracker creates k8s clients for workload clusters managed via full
// cluster lifecycle API.
clientBuilder ClientBuilder
// mu provides some thread-safety.
mu sync.Mutex
}
// ClientBuilder returns a k8s client for the specified cluster.
type ClientBuilder interface {
GetClient(context.Context, types.NamespacedName) (client.Client, error)
}
type ChartInstaller interface {
InstallChart(ctx context.Context, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath string, skipCRDs bool, values []string) error
}
// ChartUninstaller handles deleting chart installations.
type ChartUninstaller interface {
Delete(ctx context.Context, kubeconfigFilePath, installName, namespace string) error
}
// ChartManager installs and uninstalls helm charts.
type ChartManager interface {
ChartInstaller
ChartUninstaller
}
// NewPackageControllerClientFullLifecycle creates a PackageControllerClient
// for the Full Cluster Lifecycle controller.
//
// It differs because the CLI use case has far more information available at
// instantiation, while the FCL use case has less information at
// instantiation, and the rest when cluster creation is triggered.
func NewPackageControllerClientFullLifecycle(logger logr.Logger, chartManager ChartManager, kubectl KubectlRunner, clientBuilder ClientBuilder) *PackageControllerClient {
return &PackageControllerClient{
chartManager: chartManager,
kubectl: kubectl,
skipWaitForPackageBundle: true,
eksaRegion: eksaDefaultRegion,
clientBuilder: clientBuilder,
}
}
// EnableFullLifecycle wraps Enable to handle run-time arguments.
//
// This method fills in the gaps between the original CLI use case, where all
// information is known at PackageControllerClient initialization, and the
// Full Cluster Lifecycle use case, where there's limited information at
// initialization. Basically any parameter here isn't known at instantiation
// of the PackageControllerClient during full cluster lifecycle usage, hence
// why this method exists.
func (pc *PackageControllerClient) EnableFullLifecycle(ctx context.Context, log logr.Logger, clusterName, kubeConfig string, chart *releasev1.Image, registryMirror *registrymirror.RegistryMirror, options ...PackageControllerClientOpt) (err error) {
log.V(6).Info("enabling curated packages full lifecycle")
defer func(err *error) {
if err != nil && *err != nil {
log.Error(*err, "Enabling curated packages full lifecycle", "clusterName", clusterName)
} else {
log.Info("Successfully enabled curated packages full lifecycle")
}
}(&err)
pc.mu.Lock()
// This anonymous function ensures that the pc.mu is unlocked before
// Enable is called, preventing deadlocks in the event that Enable tries
// to acquire pc.mu.
err = func() error {
defer pc.mu.Unlock()
pc.skipWaitForPackageBundle = true
pc.clusterName = clusterName
pc.kubeConfig = kubeConfig
pc.chart = chart
pc.registryMirror = registryMirror
writer, err := filewriter.NewWriter(clusterName)
if err != nil {
return fmt.Errorf("creating file writer for helm values: %w", err)
}
options = append(options, WithValuesFileWriter(writer))
for _, o := range options {
o(pc)
}
return nil
}()
if err != nil {
return err
}
return pc.Enable(ctx)
}
// NewPackageControllerClient instantiates a new instance of PackageControllerClient.
func NewPackageControllerClient(chartManager ChartManager, kubectl KubectlRunner, clusterName, kubeConfig string, chart *releasev1.Image, registryMirror *registrymirror.RegistryMirror, options ...PackageControllerClientOpt) *PackageControllerClient {
pcc := &PackageControllerClient{
kubeConfig: kubeConfig,
clusterName: clusterName,
chart: chart,
chartManager: chartManager,
kubectl: kubectl,
registryMirror: registryMirror,
eksaRegion: eksaDefaultRegion,
}
for _, o := range options {
o(pcc)
}
return pcc
}
// Enable curated packages in a cluster
//
// In case the cluster is management cluster, it performs the following actions:
// - Installation of Package Controller through helm chart installation
// - Creation of secret credentials
// - Creation of a single run of a cron job refresher
// - Activation of a curated packages bundle
//
// In case the cluster is a workload cluster, it performs the following actions:
// - Creation of package bundle controller (PBC) custom resource in management cluster
func (pc *PackageControllerClient) Enable(ctx context.Context) error {
ociURI := fmt.Sprintf("%s%s", "oci://", pc.registryMirror.ReplaceRegistry(pc.chart.Image()))
clusterName := fmt.Sprintf("clusterName=%s", pc.clusterName)
sourceRegistry, defaultRegistry, defaultImageRegistry := pc.GetCuratedPackagesRegistries()
sourceRegistry = fmt.Sprintf("sourceRegistry=%s", sourceRegistry)
defaultRegistry = fmt.Sprintf("defaultRegistry=%s", defaultRegistry)
defaultImageRegistry = fmt.Sprintf("defaultImageRegistry=%s", defaultImageRegistry)
values := []string{sourceRegistry, defaultRegistry, defaultImageRegistry, clusterName}
// Provide proxy details for curated packages helm chart when proxy details provided
if pc.httpProxy != "" {
httpProxy := fmt.Sprintf("proxy.HTTP_PROXY=%s", pc.httpProxy)
httpsProxy := fmt.Sprintf("proxy.HTTPS_PROXY=%s", pc.httpsProxy)
// Helm requires commas to be escaped: https://github.com/rancher/rancher/issues/16195
noProxy := fmt.Sprintf("proxy.NO_PROXY=%s", strings.Join(pc.noProxy, "\\,"))
values = append(values, httpProxy, httpsProxy, noProxy)
}
if (pc.eksaSecretAccessKey == "" || pc.eksaAccessKeyID == "") && pc.registryMirror == nil {
values = append(values, "cronjob.suspend=true")
}
var err error
var valueFilePath string
if valueFilePath, _, err = pc.CreateHelmOverrideValuesYaml(); err != nil {
return err
}
skipCRDs := false
chartName := pc.chart.Name
if pc.managementClusterName != pc.clusterName {
values = append(values, "workloadPackageOnly=true")
values = append(values, "managementClusterName="+pc.managementClusterName)
chartName = chartName + "-" + pc.clusterName
skipCRDs = true
}
if err := pc.chartManager.InstallChart(ctx, chartName, ociURI, pc.chart.Tag(), pc.kubeConfig, constants.EksaPackagesName, valueFilePath, skipCRDs, values); err != nil {
return err
}
if !pc.skipWaitForPackageBundle {
return pc.waitForActiveBundle(ctx)
}
return nil
}
// GetCuratedPackagesRegistries gets value for configurable registries from PBC.
func (pc *PackageControllerClient) GetCuratedPackagesRegistries() (sourceRegistry, defaultRegistry, defaultImageRegistry string) {
sourceRegistry = publicProdECR
defaultImageRegistry = packageProdDomain
accountName := prodAccount
if strings.Contains(pc.chart.Image(), devAccount) {
accountName = devAccount
defaultImageRegistry = packageDevDomain
sourceRegistry = publicDevECR
}
if strings.Contains(pc.chart.Image(), stagingAccount) {
accountName = stagingAccount
defaultImageRegistry = packageProdDomain
sourceRegistry = stagingDevECR
}
defaultRegistry = sourceRegistry
if pc.registryMirror != nil {
// account is added as part of registry name in package controller helm chart
// https://github.com/aws/eks-anywhere-packages/blob/main/charts/eks-anywhere-packages/values.yaml#L15-L18
sourceRegistry = fmt.Sprintf("%s/%s", pc.registryMirror.CoreEKSAMirror(), accountName)
defaultRegistry = fmt.Sprintf("%s/%s", pc.registryMirror.CoreEKSAMirror(), accountName)
if gatedOCINamespace := pc.registryMirror.CuratedPackagesMirror(); gatedOCINamespace != "" {
defaultImageRegistry = gatedOCINamespace
}
} else {
if pc.eksaRegion != eksaDefaultRegion {
defaultImageRegistry = strings.ReplaceAll(defaultImageRegistry, eksaDefaultRegion, pc.eksaRegion)
}
}
return sourceRegistry, defaultRegistry, defaultImageRegistry
}
// CreateHelmOverrideValuesYaml creates a temp file to override certain values in package controller helm install.
func (pc *PackageControllerClient) CreateHelmOverrideValuesYaml() (string, []byte, error) {
content, err := pc.generateHelmOverrideValues()
if err != nil {
return "", nil, err
}
if pc.valuesFileWriter == nil {
return "", content, fmt.Errorf("valuesFileWriter is nil")
}
filePath, err := pc.valuesFileWriter.Write(valueFileName, content)
if err != nil {
return "", content, err
}
return filePath, content, nil
}
func (pc *PackageControllerClient) generateHelmOverrideValues() ([]byte, error) {
var err error
endpoint, username, password, caCertContent, insecureSkipVerify := "", "", "", "", "false"
if pc.registryMirror != nil {
endpoint = pc.registryMirror.BaseRegistry
username, password, err = config.ReadCredentials()
if err != nil {
return []byte{}, err
}
caCertContent = pc.registryMirror.CACertContent
if pc.registryMirror.InsecureSkipVerify {
insecureSkipVerify = "true"
}
}
templateValues := map[string]interface{}{
"eksaAccessKeyId": base64.StdEncoding.EncodeToString([]byte(pc.eksaAccessKeyID)),
"eksaSecretAccessKey": base64.StdEncoding.EncodeToString([]byte(pc.eksaSecretAccessKey)),
"eksaRegion": base64.StdEncoding.EncodeToString([]byte(pc.eksaRegion)),
"mirrorEndpoint": base64.StdEncoding.EncodeToString([]byte(endpoint)),
"mirrorUsername": base64.StdEncoding.EncodeToString([]byte(username)),
"mirrorPassword": base64.StdEncoding.EncodeToString([]byte(password)),
"mirrorCACertContent": base64.StdEncoding.EncodeToString([]byte(caCertContent)),
"insecureSkipVerify": base64.StdEncoding.EncodeToString([]byte(insecureSkipVerify)),
}
result, err := templater.Execute(secretsValueYaml, templateValues)
if err != nil {
return []byte{}, err
}
values, err := pc.GetPackageControllerConfiguration()
return []byte(values + string(result)), err
}
// packageBundleControllerResource is the name of the package bundle controller
// resource in the API.
const packageBundleControllerResource string = "packageBundleController"
// waitForActiveBundle polls the package bundle controller for its active bundle.
//
// It returns nil on success. Success is defined as receiving a valid package
// bundle controller from the API with a non-empty active bundle.
//
// If no timeout is specified, a default of 3 minutes is used.
func (pc *PackageControllerClient) waitForActiveBundle(ctx context.Context) error {
timeout := 3 * time.Minute
if pc.activeBundleTimeout > 0 {
timeout = pc.activeBundleTimeout
}
timeoutCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
targetNs := constants.EksaPackagesName + "-" + pc.clusterName
done := make(chan error)
go func() {
defer close(done)
pbc := &packagesv1.PackageBundleController{}
for {
readyCnt := 0
err := pc.kubectl.GetObject(timeoutCtx, packageBundleControllerResource, pc.clusterName,
packagesv1.PackageNamespace, pc.kubeConfig, pbc)
if err != nil && !apierrors.IsNotFound(err) {
done <- fmt.Errorf("getting package bundle controller: %w", err)
return
}
if pbc != nil && pbc.Spec.ActiveBundle != "" {
logger.V(6).Info("found packages bundle controller active bundle",
"name", pbc.Spec.ActiveBundle)
readyCnt++
} else {
logger.V(6).Info("waiting for package bundle controller to activate a bundle",
"clusterName", pc.clusterName)
}
found, _ := pc.kubectl.HasResource(timeoutCtx, "namespace", targetNs, pc.kubeConfig, "default")
if found {
logger.V(6).Info("found namespace", "namespace", targetNs)
readyCnt++
} else {
logger.V(6).Info("waiting for namespace", "namespace", targetNs)
}
if readyCnt == 2 {
return
}
// TODO read a polling interval value from the context, falling
// back to this as a default.
time.Sleep(time.Second)
}
}()
select {
case <-timeoutCtx.Done():
return fmt.Errorf("timed out finding an active package bundle / %s namespace for the current cluster: %v", targetNs, timeoutCtx.Err())
case err := <-done:
if err != nil {
return fmt.Errorf("couldn't find an active package bundle for the current cluster: %v", err)
}
return nil
}
}
// IsInstalled checks if a package controller custom resource exists.
func (pc *PackageControllerClient) IsInstalled(ctx context.Context) bool {
hasResource, err := pc.kubectl.HasResource(ctx, packageBundleControllerResource, pc.clusterName, pc.kubeConfig, constants.EksaPackagesName)
return hasResource && err == nil
}
func formatYamlLine(space, key, value string) string {
if value == "" {
return ""
}
return space + key + ": " + value + "\n"
}
func formatImageResource(resource *v1alpha1.ImageResource, name string) (result string) {
if resource.CPU != "" || resource.Memory != "" {
result = " " + name + ":\n"
result += formatYamlLine(" ", "cpu", resource.CPU)
result += formatYamlLine(" ", "memory", resource.Memory)
}
return result
}
func formatCronJob(cronJob *v1alpha1.PackageControllerCronJob) (result string) {
if cronJob != nil {
result += "cronjob:\n"
result += formatYamlLine(" ", "digest", cronJob.Digest)
result += formatYamlLine(" ", "repository", cronJob.Repository)
result += formatYamlLine(" ", "suspend", strconv.FormatBool(cronJob.Disable))
result += formatYamlLine(" ", "tag", cronJob.Tag)
}
return result
}
func formatResources(resources *v1alpha1.PackageControllerResources) (result string) {
if resources.Limits.CPU != "" || resources.Limits.Memory != "" ||
resources.Requests.CPU != "" || resources.Requests.Memory != "" {
result += " resources:\n"
result += formatImageResource(&resources.Limits, "limits")
result += formatImageResource(&resources.Requests, "requests")
}
return result
}
// GetPackageControllerConfiguration returns the default kubernetes version for a Cluster.
func (pc *PackageControllerClient) GetPackageControllerConfiguration() (result string, err error) {
clusterSpec := pc.clusterSpec
if clusterSpec == nil || clusterSpec.Packages == nil {
return "", nil
}
if clusterSpec.Packages.Controller != nil {
result += "controller:\n"
result += formatYamlLine(" ", "digest", clusterSpec.Packages.Controller.Digest)
result += formatYamlLine(" ", "enableWebhooks", strconv.FormatBool(!clusterSpec.Packages.Controller.DisableWebhooks))
result += formatYamlLine(" ", "repository", clusterSpec.Packages.Controller.Repository)
result += formatYamlLine(" ", "tag", clusterSpec.Packages.Controller.Tag)
result += formatResources(&clusterSpec.Packages.Controller.Resources)
if len(clusterSpec.Packages.Controller.Env) > 0 {
result += " env:\n"
for _, kvp := range clusterSpec.Packages.Controller.Env {
results := strings.SplitN(kvp, "=", 2)
if len(results) != 2 {
err = fmt.Errorf("invalid environment in specification <%s>", kvp)
continue
}
result += " - name: " + results[0] + "\n"
result += " value: " + results[1] + "\n"
}
}
}
result += formatCronJob(clusterSpec.Packages.CronJob)
return result, err
}
// Reconcile installs resources when a full cluster lifecycle cluster is created.
func (pc *PackageControllerClient) Reconcile(ctx context.Context, logger logr.Logger, client client.Client, cluster *anywherev1.Cluster) error {
image, err := pc.getBundleFromCluster(ctx, client, cluster)
if err != nil {
return err
}
registry := registrymirror.FromCluster(cluster)
// No Kubeconfig is passed. This is intentional. The helm executable will
// get that configuration from its environment.
if err := pc.EnableFullLifecycle(ctx, logger, cluster.Name, "", image, registry,
WithManagementClusterName(cluster.ManagedBy())); err != nil {
return fmt.Errorf("packages client error: %w", err)
}
return nil
}
// getBundleFromCluster based on the cluster's k8s version.
func (pc *PackageControllerClient) getBundleFromCluster(ctx context.Context, client client.Client, clusterObj *anywherev1.Cluster) (*releasev1.Image, error) {
bundles := &releasev1.Bundles{}
nn := types.NamespacedName{
Name: clusterObj.Spec.BundlesRef.Name,
Namespace: clusterObj.Spec.BundlesRef.Namespace,
}
if err := client.Get(ctx, nn, bundles); err != nil {
return nil, fmt.Errorf("retrieving bundle: %w", err)
}
verBundle, err := cluster.GetVersionsBundle(clusterObj, bundles)
if err != nil {
return nil, err
}
return &verBundle.PackageController.HelmChart, nil
}
// KubeDeleter abstracts client.Client so mocks can be substituted in tests.
type KubeDeleter interface {
Delete(context.Context, client.Object, ...client.DeleteOption) error
}
// ReconcileDelete removes resources after a full cluster lifecycle cluster is
// deleted.
func (pc *PackageControllerClient) ReconcileDelete(ctx context.Context, logger logr.Logger, client KubeDeleter, cluster *anywherev1.Cluster) error {
namespace := "eksa-packages-" + cluster.Name
ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
if err := client.Delete(ctx, ns); err != nil {
if !apierrors.IsNotFound(err) {
return fmt.Errorf("deleting workload cluster curated packages namespace %q %w", namespace, err)
}
logger.V(6).Info("not found", "namespace", namespace)
}
name := "eks-anywhere-packages-" + pc.clusterName
if err := pc.chartManager.Delete(ctx, pc.kubeConfig, name, constants.EksaPackagesName); err != nil {
if !strings.Contains(err.Error(), "release: not found") {
return err
}
logger.V(6).Info("not found", "release", name)
}
logger.Info("Removed curated packages installation", "clusterName")
return nil
}
func WithEksaAccessKeyId(eksaAccessKeyId string) func(client *PackageControllerClient) {
return func(config *PackageControllerClient) {
config.eksaAccessKeyID = eksaAccessKeyId
}
}
func WithActiveBundleTimeout(timeout time.Duration) func(client *PackageControllerClient) {
return func(config *PackageControllerClient) {
config.activeBundleTimeout = timeout
}
}
func WithEksaSecretAccessKey(eksaSecretAccessKey string) func(client *PackageControllerClient) {
return func(config *PackageControllerClient) {
config.eksaSecretAccessKey = eksaSecretAccessKey
}
}
func WithEksaRegion(eksaRegion string) func(client *PackageControllerClient) {
return func(config *PackageControllerClient) {
if eksaRegion != "" {
config.eksaRegion = eksaRegion
}
}
}
func WithHTTPProxy(httpProxy string) func(client *PackageControllerClient) {
return func(config *PackageControllerClient) {
config.httpProxy = httpProxy
}
}
func WithHTTPSProxy(httpsProxy string) func(client *PackageControllerClient) {
return func(config *PackageControllerClient) {
config.httpsProxy = httpsProxy
}
}
func WithNoProxy(noProxy []string) func(client *PackageControllerClient) {
return func(config *PackageControllerClient) {
if noProxy != nil {
config.noProxy = noProxy
}
}
}
func WithManagementClusterName(managementClusterName string) func(client *PackageControllerClient) {
return func(config *PackageControllerClient) {
config.managementClusterName = managementClusterName
}
}
// WithValuesFileWriter sets up a writer to generate temporary values.yaml to
// override some values in package controller helm chart.
func WithValuesFileWriter(writer filewriter.FileWriter) func(client *PackageControllerClient) {
return func(config *PackageControllerClient) {
config.valuesFileWriter = writer
}
}
// WithClusterSpec sets the cluster spec.
func WithClusterSpec(clusterSpec *cluster.Spec) func(client *PackageControllerClient) {
return func(config *PackageControllerClient) {
config.clusterSpec = &clusterSpec.Cluster.Spec
}
}
| 596 |
eks-anywhere | aws | Go | package curatedpackages_test
import (
"context"
_ "embed"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/go-logr/logr/testr"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
packagesv1 "github.com/aws/eks-anywhere-packages/api/v1alpha1"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
"github.com/aws/eks-anywhere/pkg/curatedpackages/mocks"
"github.com/aws/eks-anywhere/pkg/filewriter"
writermocks "github.com/aws/eks-anywhere/pkg/filewriter/mocks"
"github.com/aws/eks-anywhere/pkg/registrymirror"
artifactsv1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
const valueFileName = "values.yaml"
//go:embed testdata/expected_all_values.yaml
var expectedAllValues string
type packageControllerTest struct {
*WithT
ctx context.Context
kubectl *mocks.MockKubectlRunner
chartManager *mocks.MockChartManager
command *curatedpackages.PackageControllerClient
clusterName string
kubeConfig string
chart *artifactsv1.Image
eksaAccessID string
eksaAccessKey string
eksaRegion string
httpProxy string
httpsProxy string
noProxy []string
registryMirror *registrymirror.RegistryMirror
writer filewriter.FileWriter
wantValueFile string
}
func newPackageControllerTests(t *testing.T) []*packageControllerTest {
ctrl := gomock.NewController(t)
k := mocks.NewMockKubectlRunner(ctrl)
cm := mocks.NewMockChartManager(ctrl)
kubeConfig := "kubeconfig.kubeconfig"
chart := &artifactsv1.Image{
Name: "test_controller",
URI: "test_registry/eks-anywhere/eks-anywhere-packages:v1",
}
chartDev := &artifactsv1.Image{
Name: "test_controller",
URI: "public.ecr.aws/l0g8r8j6/eks-anywhere-packages:v1",
}
chartStaging := &artifactsv1.Image{
Name: "test_controller",
URI: "public.ecr.aws/w9m0f3l5/eks-anywhere-packages:v1",
}
eksaAccessId := "test-access-id"
eksaAccessKey := "test-access-key"
eksaRegion := "test-region"
clusterName := "billy"
registryMirror := ®istrymirror.RegistryMirror{
BaseRegistry: "1.2.3.4:443",
NamespacedRegistryMap: map[string]string{
constants.DefaultCoreEKSARegistry: "1.2.3.4:443/public",
constants.DefaultCuratedPackagesRegistryRegex: "1.2.3.4:443/private",
},
Auth: true,
CACertContent: "-----BEGIN CERTIFICATE-----\nabc\nefg\n-----END CERTIFICATE-----\n",
InsecureSkipVerify: false,
}
registryMirrorInsecure := ®istrymirror.RegistryMirror{
BaseRegistry: "1.2.3.4:8443",
NamespacedRegistryMap: map[string]string{
constants.DefaultCoreEKSARegistry: "1.2.3.4:443/public",
constants.DefaultCuratedPackagesRegistryRegex: "1.2.3.4:443/private",
},
Auth: true,
CACertContent: "-----BEGIN CERTIFICATE-----\nabc\nefg\n-----END CERTIFICATE-----\n",
InsecureSkipVerify: true,
}
writer, _ := filewriter.NewWriter(clusterName)
clusterSpec := &cluster.Spec{
Config: &cluster.Config{
Cluster: &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{},
},
},
}
return []*packageControllerTest{
{
WithT: NewWithT(t),
ctx: context.Background(),
kubectl: k,
chartManager: cm,
command: curatedpackages.NewPackageControllerClient(
cm, k, clusterName, kubeConfig, chart, registryMirror,
curatedpackages.WithEksaSecretAccessKey(eksaAccessKey),
curatedpackages.WithEksaRegion(eksaRegion),
curatedpackages.WithEksaAccessKeyId(eksaAccessId),
curatedpackages.WithManagementClusterName(clusterName),
curatedpackages.WithValuesFileWriter(writer),
curatedpackages.WithClusterSpec(clusterSpec),
),
clusterName: clusterName,
kubeConfig: kubeConfig,
chart: chart,
eksaAccessID: eksaAccessId,
eksaAccessKey: eksaAccessKey,
eksaRegion: eksaRegion,
httpProxy: "1.1.1.1",
httpsProxy: "1.1.1.1",
noProxy: []string{"1.1.1.1/24"},
registryMirror: registryMirror,
writer: writer,
wantValueFile: "testdata/values_test.yaml",
},
{
WithT: NewWithT(t),
ctx: context.Background(),
kubectl: k,
chartManager: cm,
command: curatedpackages.NewPackageControllerClient(
cm, k, clusterName, kubeConfig, chart,
nil,
curatedpackages.WithEksaSecretAccessKey(eksaAccessKey),
curatedpackages.WithEksaRegion(eksaRegion),
curatedpackages.WithEksaAccessKeyId(eksaAccessId),
curatedpackages.WithManagementClusterName(clusterName),
curatedpackages.WithValuesFileWriter(writer),
),
clusterName: clusterName,
kubeConfig: kubeConfig,
chart: chart,
eksaAccessID: eksaAccessId,
eksaAccessKey: eksaAccessKey,
eksaRegion: eksaRegion,
httpProxy: "1.1.1.1",
httpsProxy: "1.1.1.1",
noProxy: []string{"1.1.1.1/24"},
registryMirror: nil,
writer: writer,
wantValueFile: "testdata/values_empty_registrymirrorsecret.yaml",
},
{
WithT: NewWithT(t),
ctx: context.Background(),
kubectl: k,
chartManager: cm,
command: curatedpackages.NewPackageControllerClient(
cm, k, clusterName, kubeConfig, chartDev,
nil,
curatedpackages.WithEksaSecretAccessKey(eksaAccessKey),
curatedpackages.WithEksaRegion(eksaRegion),
curatedpackages.WithEksaAccessKeyId(eksaAccessId),
curatedpackages.WithManagementClusterName(clusterName),
curatedpackages.WithValuesFileWriter(writer),
),
clusterName: clusterName,
kubeConfig: kubeConfig,
chart: chartDev,
eksaAccessID: eksaAccessId,
eksaAccessKey: eksaAccessKey,
eksaRegion: eksaRegion,
httpProxy: "1.1.1.1",
httpsProxy: "1.1.1.1",
noProxy: []string{"1.1.1.1/24"},
registryMirror: nil,
writer: writer,
wantValueFile: "testdata/values_empty_registrymirrorsecret.yaml",
},
{
WithT: NewWithT(t),
ctx: context.Background(),
kubectl: k,
chartManager: cm,
command: curatedpackages.NewPackageControllerClient(
cm, k, clusterName, kubeConfig, chartStaging,
nil,
curatedpackages.WithEksaSecretAccessKey(eksaAccessKey),
curatedpackages.WithEksaRegion(eksaRegion),
curatedpackages.WithEksaAccessKeyId(eksaAccessId),
curatedpackages.WithManagementClusterName(clusterName),
curatedpackages.WithValuesFileWriter(writer),
),
clusterName: clusterName,
kubeConfig: kubeConfig,
chart: chartStaging,
eksaAccessID: eksaAccessId,
eksaAccessKey: eksaAccessKey,
eksaRegion: eksaRegion,
httpProxy: "1.1.1.1",
httpsProxy: "1.1.1.1",
noProxy: []string{"1.1.1.1/24"},
registryMirror: nil,
writer: writer,
wantValueFile: "testdata/values_empty_registrymirrorsecret.yaml",
},
{
WithT: NewWithT(t),
ctx: context.Background(),
kubectl: k,
chartManager: cm,
command: curatedpackages.NewPackageControllerClient(
cm, k, clusterName, kubeConfig, chart, registryMirrorInsecure,
curatedpackages.WithManagementClusterName(clusterName),
curatedpackages.WithValuesFileWriter(writer),
),
clusterName: clusterName,
kubeConfig: kubeConfig,
chart: chart,
eksaAccessID: "",
eksaAccessKey: "",
eksaRegion: "",
httpProxy: "1.1.1.1",
httpsProxy: "1.1.1.1",
noProxy: []string{"1.1.1.1/24"},
registryMirror: registryMirrorInsecure,
writer: writer,
wantValueFile: "testdata/values_empty_awssecret.yaml",
},
{
WithT: NewWithT(t),
ctx: context.Background(),
kubectl: k,
chartManager: cm,
command: curatedpackages.NewPackageControllerClient(
cm, k, clusterName, kubeConfig, chart, nil,
curatedpackages.WithManagementClusterName(clusterName),
curatedpackages.WithValuesFileWriter(writer),
),
clusterName: clusterName,
kubeConfig: kubeConfig,
chart: chart,
eksaAccessID: "",
eksaAccessKey: "",
eksaRegion: "",
httpProxy: "1.1.1.1",
httpsProxy: "1.1.1.1",
noProxy: []string{"1.1.1.1/24"},
registryMirror: nil,
writer: writer,
wantValueFile: "testdata/values_empty.yaml",
},
}
}
func TestEnableSuccess(t *testing.T) {
for _, tt := range newPackageControllerTests(t) {
clusterName := fmt.Sprintf("clusterName=%s", "billy")
valueFilePath := filepath.Join("billy", filewriter.DefaultTmpFolder, valueFileName)
ociURI := fmt.Sprintf("%s%s", "oci://", tt.registryMirror.ReplaceRegistry(tt.chart.Image()))
sourceRegistry, defaultRegistry, defaultImageRegistry := tt.command.GetCuratedPackagesRegistries()
sourceRegistry = fmt.Sprintf("sourceRegistry=%s", sourceRegistry)
defaultRegistry = fmt.Sprintf("defaultRegistry=%s", defaultRegistry)
defaultImageRegistry = fmt.Sprintf("defaultImageRegistry=%s", defaultImageRegistry)
if tt.registryMirror != nil {
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
}
values := []string{sourceRegistry, defaultRegistry, defaultImageRegistry, clusterName}
if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil {
values = append(values, "cronjob.suspend=true")
}
tt.chartManager.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, false, values).Return(nil)
tt.kubectl.EXPECT().
GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(getPBCSuccess(t)).
AnyTimes()
tt.kubectl.EXPECT().
HasResource(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(func(_, _, _, _, _ interface{}) (bool, error) { return true, nil }).
AnyTimes()
err := tt.command.Enable(tt.ctx)
if err != nil {
t.Errorf("Install Controller Should succeed when installation passes")
}
}
}
func TestEnableSucceedInWorkloadCluster(t *testing.T) {
for _, tt := range newPackageControllerTests(t) {
tt.command = curatedpackages.NewPackageControllerClient(
tt.chartManager, tt.kubectl, tt.clusterName, tt.kubeConfig, tt.chart,
tt.registryMirror,
curatedpackages.WithEksaSecretAccessKey(tt.eksaAccessKey),
curatedpackages.WithEksaRegion("us-west-2"),
curatedpackages.WithEksaAccessKeyId(tt.eksaAccessID),
curatedpackages.WithManagementClusterName("mgmt"),
curatedpackages.WithValuesFileWriter(tt.writer),
)
clusterName := fmt.Sprintf("clusterName=%s", "billy")
valueFilePath := filepath.Join("billy", filewriter.DefaultTmpFolder, valueFileName)
ociURI := fmt.Sprintf("%s%s", "oci://", tt.registryMirror.ReplaceRegistry(tt.chart.Image()))
sourceRegistry, defaultRegistry, defaultImageRegistry := tt.command.GetCuratedPackagesRegistries()
sourceRegistry = fmt.Sprintf("sourceRegistry=%s", sourceRegistry)
defaultRegistry = fmt.Sprintf("defaultRegistry=%s", defaultRegistry)
defaultImageRegistry = fmt.Sprintf("defaultImageRegistry=%s", defaultImageRegistry)
if tt.registryMirror != nil {
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
}
values := []string{sourceRegistry, defaultRegistry, defaultImageRegistry, clusterName}
if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil {
values = append(values, "cronjob.suspend=true")
}
values = append(values, "managementClusterName=mgmt")
values = append(values, "workloadPackageOnly=true")
tt.chartManager.EXPECT().InstallChart(tt.ctx, tt.chart.Name+"-billy", ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, true, gomock.InAnyOrder(values)).Return(nil)
tt.kubectl.EXPECT().
GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(getPBCSuccess(t)).
AnyTimes()
tt.kubectl.EXPECT().
HasResource(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(func(_, _, _, _, _ interface{}) (bool, error) { return true, nil }).
AnyTimes()
err := tt.command.Enable(tt.ctx)
tt.Expect(err).To(BeNil())
}
}
func TestEnableSucceedInWorkloadClusterWhenPackageBundleControllerNotExist(t *testing.T) {
for _, tt := range newPackageControllerTests(t) {
tt.command = curatedpackages.NewPackageControllerClient(
tt.chartManager, tt.kubectl, tt.clusterName, tt.kubeConfig, tt.chart,
tt.registryMirror,
curatedpackages.WithEksaSecretAccessKey(tt.eksaAccessKey),
curatedpackages.WithEksaRegion("us-west-2"),
curatedpackages.WithEksaAccessKeyId(tt.eksaAccessID),
curatedpackages.WithManagementClusterName("mgmt"),
curatedpackages.WithValuesFileWriter(tt.writer),
)
clusterName := fmt.Sprintf("clusterName=%s", "billy")
valueFilePath := filepath.Join("billy", filewriter.DefaultTmpFolder, valueFileName)
ociURI := fmt.Sprintf("%s%s", "oci://", tt.registryMirror.ReplaceRegistry(tt.chart.Image()))
sourceRegistry, defaultRegistry, defaultImageRegistry := tt.command.GetCuratedPackagesRegistries()
sourceRegistry = fmt.Sprintf("sourceRegistry=%s", sourceRegistry)
defaultRegistry = fmt.Sprintf("defaultRegistry=%s", defaultRegistry)
defaultImageRegistry = fmt.Sprintf("defaultImageRegistry=%s", defaultImageRegistry)
if tt.registryMirror != nil {
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
}
values := []string{sourceRegistry, defaultRegistry, defaultImageRegistry, clusterName}
if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil {
values = append(values, "cronjob.suspend=true")
}
values = append(values, "managementClusterName=mgmt")
values = append(values, "workloadPackageOnly=true")
tt.chartManager.EXPECT().InstallChart(tt.ctx, tt.chart.Name+"-billy", ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, true, gomock.InAnyOrder(values)).Return(nil)
gomock.InOrder(
tt.kubectl.EXPECT().
GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(getPBCNotFound(t)),
tt.kubectl.EXPECT().
GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(getPBCSuccess(t)))
tt.kubectl.EXPECT().
HasResource(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(func(_, _, _, _, _ interface{}) (bool, error) { return true, nil }).
AnyTimes()
err := tt.command.Enable(tt.ctx)
tt.Expect(err).To(BeNil())
}
}
func getPBCSuccess(t *testing.T) func(context.Context, string, string, string, string, *packagesv1.PackageBundleController) error {
return func(_ context.Context, _, _, _, _ string, obj *packagesv1.PackageBundleController) error {
pbc := &packagesv1.PackageBundleController{
Spec: packagesv1.PackageBundleControllerSpec{
ActiveBundle: "test-bundle",
},
}
pbc.DeepCopyInto(obj)
return nil
}
}
func getPBCNotFound(t *testing.T) func(context.Context, string, string, string, string, *packagesv1.PackageBundleController) error {
return func(_ context.Context, _, _, _, _ string, obj *packagesv1.PackageBundleController) error {
return apierrors.NewNotFound(schema.GroupResource{
Group: "test group",
Resource: "test resource",
}, "test")
}
}
func getPBCFail(t *testing.T) func(context.Context, string, string, string, string, *packagesv1.PackageBundleController) error {
return func(_ context.Context, _, _, _, _ string, obj *packagesv1.PackageBundleController) error {
return fmt.Errorf("test error")
}
}
func TestEnableWithProxy(t *testing.T) {
for _, tt := range newPackageControllerTests(t) {
tt.command = curatedpackages.NewPackageControllerClient(
tt.chartManager, tt.kubectl, "billy", tt.kubeConfig, tt.chart,
tt.registryMirror,
curatedpackages.WithEksaSecretAccessKey(tt.eksaAccessKey),
curatedpackages.WithEksaRegion(tt.eksaRegion),
curatedpackages.WithEksaAccessKeyId(tt.eksaAccessID),
curatedpackages.WithHTTPProxy(tt.httpProxy),
curatedpackages.WithHTTPSProxy(tt.httpsProxy),
curatedpackages.WithNoProxy(tt.noProxy),
curatedpackages.WithManagementClusterName(tt.clusterName),
curatedpackages.WithValuesFileWriter(tt.writer),
)
clusterName := fmt.Sprintf("clusterName=%s", "billy")
valueFilePath := filepath.Join("billy", filewriter.DefaultTmpFolder, valueFileName)
httpProxy := fmt.Sprintf("proxy.HTTP_PROXY=%s", tt.httpProxy)
httpsProxy := fmt.Sprintf("proxy.HTTPS_PROXY=%s", tt.httpsProxy)
noProxy := fmt.Sprintf("proxy.NO_PROXY=%s", strings.Join(tt.noProxy, "\\,"))
ociURI := fmt.Sprintf("%s%s", "oci://", tt.registryMirror.ReplaceRegistry(tt.chart.Image()))
sourceRegistry, defaultRegistry, defaultImageRegistry := tt.command.GetCuratedPackagesRegistries()
sourceRegistry = fmt.Sprintf("sourceRegistry=%s", sourceRegistry)
defaultRegistry = fmt.Sprintf("defaultRegistry=%s", defaultRegistry)
defaultImageRegistry = fmt.Sprintf("defaultImageRegistry=%s", defaultImageRegistry)
if tt.registryMirror != nil {
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
} else {
if tt.eksaRegion == "" {
tt.eksaRegion = "us-west-2"
}
defaultImageRegistry = strings.ReplaceAll(defaultImageRegistry, "us-west-2", tt.eksaRegion)
}
values := []string{sourceRegistry, defaultRegistry, defaultImageRegistry, clusterName, httpProxy, httpsProxy, noProxy}
if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil {
values = append(values, "cronjob.suspend=true")
}
tt.chartManager.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, false, values).Return(nil)
tt.kubectl.EXPECT().
GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(getPBCSuccess(t)).
AnyTimes()
tt.kubectl.EXPECT().
HasResource(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(func(_, _, _, _, _ interface{}) (bool, error) { return true, nil }).
AnyTimes()
err := tt.command.Enable(tt.ctx)
if err != nil {
t.Errorf("Install Controller Should succeed when installation passes")
}
}
}
func TestEnableWithEmptyProxy(t *testing.T) {
for _, tt := range newPackageControllerTests(t) {
tt.command = curatedpackages.NewPackageControllerClient(
tt.chartManager, tt.kubectl, "billy", tt.kubeConfig, tt.chart,
tt.registryMirror,
curatedpackages.WithEksaSecretAccessKey(tt.eksaAccessKey),
curatedpackages.WithEksaRegion(tt.eksaRegion),
curatedpackages.WithEksaAccessKeyId(tt.eksaAccessID),
curatedpackages.WithHTTPProxy(""),
curatedpackages.WithHTTPSProxy(""),
curatedpackages.WithNoProxy(nil),
curatedpackages.WithManagementClusterName(tt.clusterName),
curatedpackages.WithValuesFileWriter(tt.writer),
)
clusterName := fmt.Sprintf("clusterName=%s", "billy")
valueFilePath := filepath.Join("billy", filewriter.DefaultTmpFolder, valueFileName)
ociURI := fmt.Sprintf("%s%s", "oci://", tt.registryMirror.ReplaceRegistry(tt.chart.Image()))
sourceRegistry, defaultRegistry, defaultImageRegistry := tt.command.GetCuratedPackagesRegistries()
sourceRegistry = fmt.Sprintf("sourceRegistry=%s", sourceRegistry)
defaultRegistry = fmt.Sprintf("defaultRegistry=%s", defaultRegistry)
defaultImageRegistry = fmt.Sprintf("defaultImageRegistry=%s", defaultImageRegistry)
if tt.registryMirror != nil {
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
} else {
if tt.eksaRegion == "" {
tt.eksaRegion = "us-west-2"
}
defaultImageRegistry = strings.ReplaceAll(defaultImageRegistry, "us-west-2", tt.eksaRegion)
}
values := []string{sourceRegistry, defaultRegistry, defaultImageRegistry, clusterName}
if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil {
values = append(values, "cronjob.suspend=true")
}
tt.chartManager.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, false, values).Return(nil)
tt.kubectl.EXPECT().
GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(getPBCSuccess(t)).
AnyTimes()
tt.kubectl.EXPECT().
HasResource(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(func(_, _, _, _, _ interface{}) (bool, error) { return true, nil }).
AnyTimes()
err := tt.command.Enable(tt.ctx)
if err != nil {
t.Errorf("Install Controller Should succeed when installation passes")
}
}
}
func TestEnableFail(t *testing.T) {
for _, tt := range newPackageControllerTests(t) {
clusterName := fmt.Sprintf("clusterName=%s", "billy")
valueFilePath := filepath.Join("billy", filewriter.DefaultTmpFolder, valueFileName)
ociURI := fmt.Sprintf("%s%s", "oci://", tt.registryMirror.ReplaceRegistry(tt.chart.Image()))
sourceRegistry, defaultRegistry, defaultImageRegistry := tt.command.GetCuratedPackagesRegistries()
sourceRegistry = fmt.Sprintf("sourceRegistry=%s", sourceRegistry)
defaultRegistry = fmt.Sprintf("defaultRegistry=%s", defaultRegistry)
defaultImageRegistry = fmt.Sprintf("defaultImageRegistry=%s", defaultImageRegistry)
if tt.registryMirror != nil {
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
}
values := []string{sourceRegistry, defaultRegistry, defaultImageRegistry, clusterName}
if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil {
values = append(values, "cronjob.suspend=true")
}
tt.chartManager.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, false, values).Return(errors.New("login failed"))
tt.kubectl.EXPECT().
GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(getPBCSuccess(t)).
AnyTimes()
err := tt.command.Enable(tt.ctx)
if err == nil {
t.Errorf("Install Controller Should fail when installation fails")
}
}
}
func TestEnableFailNoActiveBundle(t *testing.T) {
for _, tt := range newPackageControllerTests(t) {
clusterName := fmt.Sprintf("clusterName=%s", "billy")
valueFilePath := filepath.Join("billy", filewriter.DefaultTmpFolder, valueFileName)
ociURI := fmt.Sprintf("%s%s", "oci://", tt.registryMirror.ReplaceRegistry(tt.chart.Image()))
sourceRegistry, defaultRegistry, defaultImageRegistry := tt.command.GetCuratedPackagesRegistries()
sourceRegistry = fmt.Sprintf("sourceRegistry=%s", sourceRegistry)
defaultRegistry = fmt.Sprintf("defaultRegistry=%s", defaultRegistry)
defaultImageRegistry = fmt.Sprintf("defaultImageRegistry=%s", defaultImageRegistry)
if tt.registryMirror != nil {
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
}
values := []string{sourceRegistry, defaultRegistry, defaultImageRegistry, clusterName}
if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil {
values = append(values, "cronjob.suspend=true")
}
tt.chartManager.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, false, values).Return(nil)
tt.kubectl.EXPECT().
GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(getPBCFail(t)).
AnyTimes()
err := tt.command.Enable(tt.ctx)
if err == nil {
t.Errorf("expected error, got nil")
}
}
}
func TestEnableSuccessWhenCronJobFails(t *testing.T) {
for _, tt := range newPackageControllerTests(t) {
clusterName := fmt.Sprintf("clusterName=%s", "billy")
valueFilePath := filepath.Join("billy", filewriter.DefaultTmpFolder, valueFileName)
ociURI := fmt.Sprintf("%s%s", "oci://", tt.registryMirror.ReplaceRegistry(tt.chart.Image()))
sourceRegistry, defaultRegistry, defaultImageRegistry := tt.command.GetCuratedPackagesRegistries()
sourceRegistry = fmt.Sprintf("sourceRegistry=%s", sourceRegistry)
defaultRegistry = fmt.Sprintf("defaultRegistry=%s", defaultRegistry)
defaultImageRegistry = fmt.Sprintf("defaultImageRegistry=%s", defaultImageRegistry)
if tt.registryMirror != nil {
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
}
values := []string{sourceRegistry, defaultRegistry, defaultImageRegistry, clusterName}
if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil {
values = append(values, "cronjob.suspend=true")
}
tt.chartManager.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, false, values).Return(nil)
tt.kubectl.EXPECT().
GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(getPBCSuccess(t)).
AnyTimes()
tt.kubectl.EXPECT().
HasResource(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(func(_, _, _, _, _ interface{}) (bool, error) { return true, nil }).
AnyTimes()
err := tt.command.Enable(tt.ctx)
if err != nil {
t.Errorf("Install Controller Should succeed when cron job fails")
}
}
}
func TestIsInstalledTrue(t *testing.T) {
for _, tt := range newPackageControllerTests(t) {
tt.kubectl.EXPECT().HasResource(tt.ctx, "packageBundleController", tt.clusterName, tt.kubeConfig, constants.EksaPackagesName).Return(false, nil)
found := tt.command.IsInstalled(tt.ctx)
if found {
t.Errorf("expected true, got %t", found)
}
}
}
func TestIsInstalledFalse(t *testing.T) {
for _, tt := range newPackageControllerTests(t) {
tt.kubectl.EXPECT().HasResource(tt.ctx, "packageBundleController", tt.clusterName, tt.kubeConfig, constants.EksaPackagesName).
Return(false, errors.New("controller doesn't exist"))
found := tt.command.IsInstalled(tt.ctx)
if found {
t.Errorf("expected false, got %t", found)
}
}
}
func TestEnableActiveBundleCustomTimeout(t *testing.T) {
for _, tt := range newPackageControllerTests(t) {
tt.command = curatedpackages.NewPackageControllerClient(
tt.chartManager, tt.kubectl, "billy", tt.kubeConfig, tt.chart,
tt.registryMirror,
curatedpackages.WithEksaSecretAccessKey(tt.eksaAccessKey),
curatedpackages.WithEksaRegion(tt.eksaRegion),
curatedpackages.WithEksaAccessKeyId(tt.eksaAccessID),
curatedpackages.WithActiveBundleTimeout(time.Second),
curatedpackages.WithManagementClusterName(tt.clusterName),
curatedpackages.WithValuesFileWriter(tt.writer),
)
clusterName := fmt.Sprintf("clusterName=%s", "billy")
valueFilePath := filepath.Join("billy", filewriter.DefaultTmpFolder, valueFileName)
ociURI := fmt.Sprintf("%s%s", "oci://", tt.registryMirror.ReplaceRegistry(tt.chart.Image()))
sourceRegistry, defaultRegistry, defaultImageRegistry := tt.command.GetCuratedPackagesRegistries()
sourceRegistry = fmt.Sprintf("sourceRegistry=%s", sourceRegistry)
defaultRegistry = fmt.Sprintf("defaultRegistry=%s", defaultRegistry)
defaultImageRegistry = fmt.Sprintf("defaultImageRegistry=%s", defaultImageRegistry)
if tt.registryMirror != nil {
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
} else {
if tt.eksaRegion == "" {
tt.eksaRegion = "us-west-2"
}
defaultImageRegistry = strings.ReplaceAll(defaultImageRegistry, "us-west-2", tt.eksaRegion)
}
values := []string{sourceRegistry, defaultRegistry, defaultImageRegistry, clusterName}
if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil {
values = append(values, "cronjob.suspend=true")
}
tt.chartManager.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, false, values).Return(nil)
tt.kubectl.EXPECT().
GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(getPBCSuccess(t)).
AnyTimes()
tt.kubectl.EXPECT().
HasResource(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(func(_, _, _, _, _ interface{}) (bool, error) { return true, nil }).
AnyTimes()
err := tt.command.Enable(tt.ctx)
if err != nil {
t.Errorf("Install Controller Should succeed when installation passes")
}
}
}
func TestEnableActiveBundleWaitLoops(t *testing.T) {
for _, tt := range newPackageControllerTests(t) {
clusterName := fmt.Sprintf("clusterName=%s", "billy")
valueFilePath := filepath.Join("billy", filewriter.DefaultTmpFolder, valueFileName)
ociURI := fmt.Sprintf("%s%s", "oci://", tt.registryMirror.ReplaceRegistry(tt.chart.Image()))
sourceRegistry, defaultRegistry, defaultImageRegistry := tt.command.GetCuratedPackagesRegistries()
sourceRegistry = fmt.Sprintf("sourceRegistry=%s", sourceRegistry)
defaultRegistry = fmt.Sprintf("defaultRegistry=%s", defaultRegistry)
defaultImageRegistry = fmt.Sprintf("defaultImageRegistry=%s", defaultImageRegistry)
if tt.registryMirror != nil {
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
}
values := []string{sourceRegistry, defaultRegistry, defaultImageRegistry, clusterName}
if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil {
values = append(values, "cronjob.suspend=true")
}
tt.chartManager.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, false, values).Return(nil)
tt.kubectl.EXPECT().
GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(getPBCLoops(t, 3)).
AnyTimes()
tt.kubectl.EXPECT().
HasResource(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(func(_, _, _, _, _ interface{}) (bool, error) { return true, nil }).
AnyTimes()
err := tt.command.Enable(tt.ctx)
if err != nil {
t.Errorf("expected no error, got %v", err)
}
}
}
func getPBCLoops(t *testing.T, loops int) func(context.Context, string, string, string, string, *packagesv1.PackageBundleController) error {
return func(_ context.Context, _, _, _, _ string, obj *packagesv1.PackageBundleController) error {
loops = loops - 1
if loops > 0 {
return nil
}
pbc := &packagesv1.PackageBundleController{
Spec: packagesv1.PackageBundleControllerSpec{
ActiveBundle: "test-bundle",
},
}
pbc.DeepCopyInto(obj)
return nil
}
}
func TestEnableActiveBundleTimesOut(t *testing.T) {
for _, tt := range newPackageControllerTests(t) {
tt.command = curatedpackages.NewPackageControllerClient(
tt.chartManager, tt.kubectl, "billy", tt.kubeConfig, tt.chart,
tt.registryMirror,
curatedpackages.WithEksaSecretAccessKey(tt.eksaAccessKey),
curatedpackages.WithEksaRegion(tt.eksaRegion),
curatedpackages.WithEksaAccessKeyId(tt.eksaAccessID),
curatedpackages.WithActiveBundleTimeout(time.Millisecond),
curatedpackages.WithManagementClusterName(tt.clusterName),
curatedpackages.WithValuesFileWriter(tt.writer),
)
clusterName := fmt.Sprintf("clusterName=%s", "billy")
valueFilePath := filepath.Join("billy", filewriter.DefaultTmpFolder, valueFileName)
ociURI := fmt.Sprintf("%s%s", "oci://", tt.registryMirror.ReplaceRegistry(tt.chart.Image()))
sourceRegistry, defaultRegistry, defaultImageRegistry := tt.command.GetCuratedPackagesRegistries()
sourceRegistry = fmt.Sprintf("sourceRegistry=%s", sourceRegistry)
defaultRegistry = fmt.Sprintf("defaultRegistry=%s", defaultRegistry)
defaultImageRegistry = fmt.Sprintf("defaultImageRegistry=%s", defaultImageRegistry)
if tt.registryMirror != nil {
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
} else {
if tt.eksaRegion == "" {
tt.eksaRegion = "us-west-2"
}
defaultImageRegistry = strings.ReplaceAll(defaultImageRegistry, "us-west-2", tt.eksaRegion)
}
values := []string{sourceRegistry, defaultRegistry, defaultImageRegistry, clusterName}
if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil {
values = append(values, "cronjob.suspend=true")
}
tt.chartManager.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, false, values).Return(nil)
tt.kubectl.EXPECT().
GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(getPBCDelay(t, time.Second)).
AnyTimes()
err := tt.command.Enable(tt.ctx)
expectedErr := fmt.Errorf("timed out finding an active package bundle / eksa-packages-billy namespace for the current cluster: %v", context.DeadlineExceeded)
if err.Error() != expectedErr.Error() {
t.Errorf("expected %v, got %v", expectedErr, err)
}
}
}
func TestEnableActiveBundleNamespaceTimesOut(t *testing.T) {
for _, tt := range newPackageControllerTests(t) {
tt.command = curatedpackages.NewPackageControllerClient(
tt.chartManager, tt.kubectl, "billy", tt.kubeConfig, tt.chart,
tt.registryMirror,
curatedpackages.WithEksaSecretAccessKey(tt.eksaAccessKey),
curatedpackages.WithEksaRegion(tt.eksaRegion),
curatedpackages.WithEksaAccessKeyId(tt.eksaAccessID),
curatedpackages.WithActiveBundleTimeout(time.Millisecond),
curatedpackages.WithManagementClusterName(tt.clusterName),
curatedpackages.WithValuesFileWriter(tt.writer),
)
clusterName := fmt.Sprintf("clusterName=%s", "billy")
valueFilePath := filepath.Join("billy", filewriter.DefaultTmpFolder, valueFileName)
ociURI := fmt.Sprintf("%s%s", "oci://", tt.registryMirror.ReplaceRegistry(tt.chart.Image()))
sourceRegistry, defaultRegistry, defaultImageRegistry := tt.command.GetCuratedPackagesRegistries()
sourceRegistry = fmt.Sprintf("sourceRegistry=%s", sourceRegistry)
defaultRegistry = fmt.Sprintf("defaultRegistry=%s", defaultRegistry)
defaultImageRegistry = fmt.Sprintf("defaultImageRegistry=%s", defaultImageRegistry)
if tt.registryMirror != nil {
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
} else {
if tt.eksaRegion == "" {
tt.eksaRegion = "us-west-2"
}
defaultImageRegistry = strings.ReplaceAll(defaultImageRegistry, "us-west-2", tt.eksaRegion)
}
values := []string{sourceRegistry, defaultRegistry, defaultImageRegistry, clusterName}
if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil {
values = append(values, "cronjob.suspend=true")
}
tt.chartManager.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, false, values).Return(nil)
tt.kubectl.EXPECT().
GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(getPBCSuccess(t)).
AnyTimes()
tt.kubectl.EXPECT().
HasResource(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(func(_, _, _, _, _ interface{}) (bool, error) { return false, nil }).
AnyTimes()
err := tt.command.Enable(tt.ctx)
expectedErr := fmt.Errorf("timed out finding an active package bundle / eksa-packages-billy namespace for the current cluster: %v", context.DeadlineExceeded)
if err.Error() != expectedErr.Error() {
t.Errorf("expected %v, got %v", expectedErr, err)
}
}
}
func getPBCDelay(t *testing.T, delay time.Duration) func(context.Context, string, string, string, string, *packagesv1.PackageBundleController) error {
return func(_ context.Context, _, _, _, _ string, obj *packagesv1.PackageBundleController) error {
time.Sleep(delay)
return fmt.Errorf("test error")
}
}
func TestCreateHelmOverrideValuesYaml(t *testing.T) {
for _, tt := range newPackageControllerTests(t) {
if tt.registryMirror != nil {
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
}
filePath, content, err := tt.command.CreateHelmOverrideValuesYaml()
tt.Expect(err).To(BeNil())
tt.Expect(filePath).To(Equal(filepath.Join(tt.clusterName, filewriter.DefaultTmpFolder, "values.yaml")))
test.AssertContentToFile(t, string(content), tt.wantValueFile)
}
}
func TestCreateHelmOverrideValuesYamlFail(t *testing.T) {
_ = os.Unsetenv("REGISTRY_USERNAME")
_ = os.Unsetenv("REGISTRY_PASSWORD")
for _, tt := range newPackageControllerTests(t) {
filePath, content, err := tt.command.CreateHelmOverrideValuesYaml()
if tt.registryMirror != nil {
tt.Expect(err).NotTo(BeNil())
tt.Expect(filePath).To(Equal(""))
} else {
tt.Expect(err).To(BeNil())
tt.Expect(filePath).To(Equal(filepath.Join(tt.clusterName, filewriter.DefaultTmpFolder, "values.yaml")))
test.AssertContentToFile(t, string(content), tt.wantValueFile)
}
}
}
func TestCreateHelmOverrideValuesYamlFailWithNoWriter(t *testing.T) {
for _, tt := range newPackageControllerTests(t) {
tt.command = curatedpackages.NewPackageControllerClient(
tt.chartManager, tt.kubectl, "billy", tt.kubeConfig, tt.chart,
tt.registryMirror,
curatedpackages.WithEksaSecretAccessKey(tt.eksaAccessKey),
curatedpackages.WithEksaRegion(tt.eksaRegion),
curatedpackages.WithEksaAccessKeyId(tt.eksaAccessID),
curatedpackages.WithActiveBundleTimeout(time.Second),
curatedpackages.WithManagementClusterName(tt.clusterName),
)
if tt.registryMirror != nil {
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
}
err := tt.command.Enable(tt.ctx)
expectedErr := fmt.Errorf("valuesFileWriter is nil")
if err.Error() != expectedErr.Error() {
t.Errorf("expected %v, got %v", expectedErr, err)
}
}
}
func TestCreateHelmOverrideValuesYamlFailWithWriteError(t *testing.T) {
ctrl := gomock.NewController(t)
writer := writermocks.NewMockFileWriter(ctrl)
for _, tt := range newPackageControllerTests(t) {
tt.command = curatedpackages.NewPackageControllerClient(
tt.chartManager, tt.kubectl, "billy", tt.kubeConfig, tt.chart,
tt.registryMirror,
curatedpackages.WithValuesFileWriter(writer),
)
if tt.registryMirror != nil {
t.Setenv("REGISTRY_USERNAME", "username")
t.Setenv("REGISTRY_PASSWORD", "password")
}
writer.EXPECT().Write(gomock.Any(), gomock.Any()).Return("", errors.New("writer errors out"))
filePath, content, err := tt.command.CreateHelmOverrideValuesYaml()
tt.Expect(filePath).To(Equal(""))
tt.Expect(content).NotTo(BeNil())
tt.Expect(err).NotTo(BeNil())
}
}
func TestGetPackageControllerConfigurationNil(t *testing.T) {
g := NewWithT(t)
sut := curatedpackages.NewPackageControllerClient(nil, nil, "billy", "", nil, nil)
result, err := sut.GetPackageControllerConfiguration()
g.Expect(result).To(Equal(""))
g.Expect(err).To(BeNil())
}
func TestGetPackageControllerConfigurationAll(t *testing.T) {
clusterSpec := v1alpha1.ClusterSpec{
Packages: &v1alpha1.PackageConfiguration{
Disable: false,
Controller: &v1alpha1.PackageControllerConfiguration{
Repository: "my-repo",
Digest: "my-digest",
DisableWebhooks: true,
Tag: "my-tag",
Env: []string{"A=B"},
Resources: v1alpha1.PackageControllerResources{
Limits: v1alpha1.ImageResource{
CPU: "my-cpu",
Memory: "my-memory",
},
Requests: v1alpha1.ImageResource{
CPU: "my-requests-cpu",
Memory: "my-requests-memory",
},
},
},
CronJob: &v1alpha1.PackageControllerCronJob{
Repository: "my-cronjob-repo",
Digest: "my-cronjob-digest",
Tag: "my-cronjob-tag",
Disable: true,
},
},
}
cluster := cluster.Spec{Config: &cluster.Config{Cluster: &v1alpha1.Cluster{Spec: clusterSpec}}}
g := NewWithT(t)
sut := curatedpackages.NewPackageControllerClient(nil, nil, "billy", "", nil, nil, curatedpackages.WithClusterSpec(&cluster))
result, err := sut.GetPackageControllerConfiguration()
g.Expect(result).To(Equal(expectedAllValues))
g.Expect(err).To(BeNil())
}
func TestGetPackageControllerConfigurationNothing(t *testing.T) {
clusterSpec := v1alpha1.ClusterSpec{
Packages: &v1alpha1.PackageConfiguration{
Disable: true,
},
}
g := NewWithT(t)
cluster := cluster.Spec{Config: &cluster.Config{Cluster: &v1alpha1.Cluster{Spec: clusterSpec}}}
sut := curatedpackages.NewPackageControllerClient(nil, nil, "billy", "", nil, nil, curatedpackages.WithClusterSpec(&cluster))
result, err := sut.GetPackageControllerConfiguration()
g.Expect(result).To(Equal(""))
g.Expect(err).To(BeNil())
}
func TestGetCuratedPackagesRegistriesDefaultRegion(t *testing.T) {
clusterSpec := v1alpha1.ClusterSpec{
Packages: &v1alpha1.PackageConfiguration{
Disable: true,
},
}
chart := &artifactsv1.Image{
Name: "test_controller",
URI: "test_registry/eks-anywhere/eks-anywhere-packages:v1",
}
g := NewWithT(t)
cluster := cluster.Spec{Config: &cluster.Config{Cluster: &v1alpha1.Cluster{Spec: clusterSpec}}}
sut := curatedpackages.NewPackageControllerClient(nil, nil, "billy", "", chart, nil, curatedpackages.WithClusterSpec(&cluster))
_, _, img := sut.GetCuratedPackagesRegistries()
g.Expect(img).To(Equal("783794618700.dkr.ecr.us-west-2.amazonaws.com"))
}
func TestGetCuratedPackagesRegistriesCustomRegion(t *testing.T) {
clusterSpec := v1alpha1.ClusterSpec{
Packages: &v1alpha1.PackageConfiguration{
Disable: true,
},
}
chart := &artifactsv1.Image{
Name: "test_controller",
URI: "test_registry/eks-anywhere/eks-anywhere-packages:v1",
}
g := NewWithT(t)
cluster := cluster.Spec{Config: &cluster.Config{Cluster: &v1alpha1.Cluster{Spec: clusterSpec}}}
sut := curatedpackages.NewPackageControllerClient(nil, nil, "billy", "", chart, nil, curatedpackages.WithClusterSpec(&cluster), curatedpackages.WithEksaRegion("test"))
_, _, img := sut.GetCuratedPackagesRegistries()
g.Expect(img).To(Equal("783794618700.dkr.ecr.test.amazonaws.com"))
}
func TestGetPackageControllerConfigurationError(t *testing.T) {
clusterSpec := v1alpha1.ClusterSpec{
Packages: &v1alpha1.PackageConfiguration{
Disable: false,
Controller: &v1alpha1.PackageControllerConfiguration{
Env: []string{"AB"},
},
},
}
g := NewWithT(t)
cluster := cluster.Spec{Config: &cluster.Config{Cluster: &v1alpha1.Cluster{Spec: clusterSpec}}}
sut := curatedpackages.NewPackageControllerClient(nil, nil, "billy", "", nil, nil, curatedpackages.WithClusterSpec(&cluster))
_, err := sut.GetPackageControllerConfiguration()
g.Expect(err).NotTo(BeNil())
g.Expect(err.Error()).To(Equal("invalid environment in specification <AB>"))
}
func TestReconcileDeleteGoldenPath(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
log := testr.New(t)
cluster := &v1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "billy"}}
kubeconfig := "test.kubeconfig"
nsName := constants.EksaPackagesName + "-" + cluster.Name
ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: nsName}}
client := fake.NewClientBuilder().WithRuntimeObjects(ns).Build()
ctrl := gomock.NewController(t)
chartManager := mocks.NewMockChartManager(ctrl)
chartManager.EXPECT().Delete(ctx, kubeconfig, "eks-anywhere-packages-"+cluster.Name, constants.EksaPackagesName)
sut := curatedpackages.NewPackageControllerClient(chartManager, nil, "billy", kubeconfig, nil, nil)
err := sut.ReconcileDelete(ctx, log, client, cluster)
g.Expect(err).To(BeNil())
}
func TestReconcileDeleteNamespaceErrorHandling(s *testing.T) {
s.Run("ignores not found errors", func(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
log := testr.New(t)
cluster := &v1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "billy"}}
kubeconfig := "test.kubeconfig"
ctrl := gomock.NewController(t)
client := mocks.NewMockKubeDeleter(ctrl)
ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "eksa-packages-" + cluster.Name}}
notFoundErr := apierrors.NewNotFound(schema.GroupResource{}, "NOT FOUND: test error")
client.EXPECT().Delete(ctx, ns).Return(notFoundErr)
chartManager := mocks.NewMockChartManager(ctrl)
chartManager.EXPECT().Delete(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
sut := curatedpackages.NewPackageControllerClient(chartManager, nil, "billy", kubeconfig, nil, nil)
err := sut.ReconcileDelete(ctx, log, client, cluster)
g.Expect(err).ShouldNot(HaveOccurred())
})
s.Run("aborts on errors other than not found", func(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
log := testr.New(t)
cluster := &v1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "billy"}}
kubeconfig := "test.kubeconfig"
testErr := fmt.Errorf("test error")
ctrl := gomock.NewController(t)
client := mocks.NewMockKubeDeleter(ctrl)
client.EXPECT().Delete(ctx, gomock.Any()).Return(testErr)
chartManager := mocks.NewMockChartManager(ctrl)
sut := curatedpackages.NewPackageControllerClient(chartManager, nil, "billy", kubeconfig, nil, nil)
err := sut.ReconcileDelete(ctx, log, client, cluster)
g.Expect(err).Should(HaveOccurred())
})
}
func TestReconcileDeleteHelmErrorsHandling(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
log := testr.New(t)
cluster := &v1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "billy"}}
kubeconfig := "test.kubeconfig"
nsName := constants.EksaPackagesName + "-" + cluster.Name
ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: nsName}}
client := fake.NewClientBuilder().WithRuntimeObjects(ns).Build()
ctrl := gomock.NewController(t)
chartManager := mocks.NewMockChartManager(ctrl)
// TODO this namespace should no longer be empty, following PR 5081
testErr := fmt.Errorf("test error")
chartManager.EXPECT().
Delete(ctx, kubeconfig, "eks-anywhere-packages-"+cluster.Name, constants.EksaPackagesName).
Return(testErr)
sut := curatedpackages.NewPackageControllerClient(chartManager, nil, "billy", kubeconfig, nil, nil)
err := sut.ReconcileDelete(ctx, log, client, cluster)
g.Expect(err).Should(HaveOccurred())
g.Expect(err.Error()).Should(Equal("test error"))
}
func TestEnableFullLifecyclePath(t *testing.T) {
log := testr.New(t)
ctrl := gomock.NewController(t)
k := mocks.NewMockKubectlRunner(ctrl)
cm := mocks.NewMockChartManager(ctrl)
kubeConfig := "kubeconfig.kubeconfig"
chart := &artifactsv1.Image{
Name: "test_controller",
URI: "test_registry/eks-anywhere/eks-anywhere-packages:v1",
}
clusterName := "billy"
writer, _ := filewriter.NewWriter(clusterName)
tt := packageControllerTest{
WithT: NewWithT(t),
ctx: context.Background(),
kubectl: k,
chartManager: cm,
command: curatedpackages.NewPackageControllerClientFullLifecycle(log, cm, k, nil),
clusterName: clusterName,
kubeConfig: kubeConfig,
chart: chart,
registryMirror: nil,
writer: writer,
wantValueFile: "testdata/values_empty.yaml",
}
valueFilePath := filepath.Join("billy", filewriter.DefaultTmpFolder, valueFileName)
ociURI := fmt.Sprintf("%s%s", "oci://", tt.registryMirror.ReplaceRegistry(tt.chart.Image()))
// GetCuratedPackagesRegistries can't be used here, as when initialized
// via full cluster lifecycle the package controller client hasn't yet
// determined its chart.
values := []string{
"clusterName=" + clusterName,
"managementClusterName=mgmt",
"workloadPackageOnly=true",
"sourceRegistry=public.ecr.aws/eks-anywhere",
"defaultRegistry=public.ecr.aws/eks-anywhere",
"defaultImageRegistry=783794618700.dkr.ecr.us-west-2.amazonaws.com",
"cronjob.suspend=true",
}
tt.chartManager.EXPECT().InstallChart(tt.ctx, tt.chart.Name+"-"+clusterName, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, true, gomock.InAnyOrder(values)).Return(nil)
tt.kubectl.EXPECT().
GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(getPBCSuccess(t)).
AnyTimes()
tt.kubectl.EXPECT().
HasResource(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(func(_, _, _, _, _ interface{}) (bool, error) { return true, nil }).
AnyTimes()
chartImage := &artifactsv1.Image{
Name: "test_controller",
URI: "test_registry/eks-anywhere/eks-anywhere-packages:v1",
}
err := tt.command.EnableFullLifecycle(tt.ctx, log, clusterName, kubeConfig, chartImage, tt.registryMirror,
curatedpackages.WithEksaRegion("us-west-2"),
curatedpackages.WithManagementClusterName("mgmt"))
if err != nil {
t.Errorf("Install Controller Should succeed when installation passes")
}
}
func TestGetCuratedPackagesRegistries(s *testing.T) {
s.Run("substitutes a region if set", func(t *testing.T) {
ctrl := gomock.NewController(t)
k := mocks.NewMockKubectlRunner(ctrl)
cm := mocks.NewMockChartManager(ctrl)
kubeConfig := "kubeconfig.kubeconfig"
chart := &artifactsv1.Image{
Name: "test_controller",
URI: "test_registry/eks-anywhere/eks-anywhere-packages:v1",
}
// eksaRegion := "test-region"
clusterName := "billy"
writer, _ := filewriter.NewWriter(clusterName)
client := curatedpackages.NewPackageControllerClient(
cm, k, clusterName, kubeConfig, chart, nil,
curatedpackages.WithManagementClusterName(clusterName),
curatedpackages.WithValuesFileWriter(writer),
curatedpackages.WithEksaRegion("testing"),
)
expected := "783794618700.dkr.ecr.testing.amazonaws.com"
_, _, got := client.GetCuratedPackagesRegistries()
if got != expected {
t.Errorf("expected %q, got %q", expected, got)
}
})
s.Run("won't substitute a blank region", func(t *testing.T) {
ctrl := gomock.NewController(t)
k := mocks.NewMockKubectlRunner(ctrl)
cm := mocks.NewMockChartManager(ctrl)
kubeConfig := "kubeconfig.kubeconfig"
chart := &artifactsv1.Image{
Name: "test_controller",
URI: "test_registry/eks-anywhere/eks-anywhere-packages:v1",
}
// eksaRegion := "test-region"
clusterName := "billy"
writer, _ := filewriter.NewWriter(clusterName)
client := curatedpackages.NewPackageControllerClient(
cm, k, clusterName, kubeConfig, chart, nil,
curatedpackages.WithManagementClusterName(clusterName),
curatedpackages.WithValuesFileWriter(writer),
)
expected := "783794618700.dkr.ecr.us-west-2.amazonaws.com"
_, _, got := client.GetCuratedPackagesRegistries()
if got != expected {
t.Errorf("expected %q, got %q", expected, got)
}
})
}
func TestReconcile(s *testing.T) {
s.Run("golden path", func(t *testing.T) {
ctx := context.Background()
log := testr.New(t)
cluster := newReconcileTestCluster()
ctrl := gomock.NewController(t)
k := mocks.NewMockKubectlRunner(ctrl)
cm := mocks.NewMockChartManager(ctrl)
bundles := createBundle(cluster)
bundles.Spec.VersionsBundles[0].KubeVersion = string(cluster.Spec.KubernetesVersion)
bundles.ObjectMeta.Name = cluster.Spec.BundlesRef.Name
bundles.ObjectMeta.Namespace = cluster.Spec.BundlesRef.Namespace
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: constants.EksaSystemNamespace,
Name: cluster.Name + "-kubeconfig",
},
}
objs := []runtime.Object{cluster, bundles, secret}
fakeClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
cm.EXPECT().InstallChart(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
pcc := curatedpackages.NewPackageControllerClientFullLifecycle(log, cm, k, nil)
err := pcc.Reconcile(ctx, log, fakeClient, cluster)
if err != nil {
t.Errorf("expected nil error, got %s", err)
}
})
s.Run("errors when bundles aren't found", func(t *testing.T) {
ctx := context.Background()
log := testr.New(t)
cluster := newReconcileTestCluster()
ctrl := gomock.NewController(t)
k := mocks.NewMockKubectlRunner(ctrl)
cm := mocks.NewMockChartManager(ctrl)
objs := []runtime.Object{cluster}
fakeClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
pcc := curatedpackages.NewPackageControllerClientFullLifecycle(log, cm, k, nil)
err := pcc.Reconcile(ctx, log, fakeClient, cluster)
if err == nil || !apierrors.IsNotFound(err) {
t.Errorf("expected not found err getting cluster resource, got %s", err)
}
})
s.Run("errors when a matching k8s bundle version isn't found", func(t *testing.T) {
ctx := context.Background()
log := testr.New(t)
cluster := newReconcileTestCluster()
cluster.Spec.KubernetesVersion = "non-existent"
ctrl := gomock.NewController(t)
k := mocks.NewMockKubectlRunner(ctrl)
cm := mocks.NewMockChartManager(ctrl)
bundles := createBundle(cluster)
bundles.ObjectMeta.Name = cluster.Spec.BundlesRef.Name
bundles.ObjectMeta.Namespace = cluster.Spec.BundlesRef.Namespace
objs := []runtime.Object{cluster, bundles}
fakeClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
pcc := curatedpackages.NewPackageControllerClientFullLifecycle(log, cm, k, nil)
err := pcc.Reconcile(ctx, log, fakeClient, cluster)
if err == nil || !strings.Contains(err.Error(), "kubernetes version non-existent") {
t.Errorf("expected \"kubernetes version non-existent\" error, got %s", err)
}
})
s.Run("errors when helm fails", func(t *testing.T) {
ctx := context.Background()
log := testr.New(t)
cluster := newReconcileTestCluster()
ctrl := gomock.NewController(t)
k := mocks.NewMockKubectlRunner(ctrl)
cm := mocks.NewMockChartManager(ctrl)
bundles := createBundle(cluster)
bundles.Spec.VersionsBundles[0].KubeVersion = string(cluster.Spec.KubernetesVersion)
bundles.ObjectMeta.Name = cluster.Spec.BundlesRef.Name
bundles.ObjectMeta.Namespace = cluster.Spec.BundlesRef.Namespace
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: constants.EksaSystemNamespace,
Name: cluster.Name + "-kubeconfig",
},
}
objs := []runtime.Object{cluster, bundles, secret}
fakeClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
cm.EXPECT().InstallChart(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("test error"))
pcc := curatedpackages.NewPackageControllerClientFullLifecycle(log, cm, k, nil)
err := pcc.Reconcile(ctx, log, fakeClient, cluster)
if err == nil || !strings.Contains(err.Error(), "packages client error: test error") {
t.Errorf("expected packages client error, got %s", err)
}
})
}
func newReconcileTestCluster() *anywherev1.Cluster {
return &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-workload-cluster",
Namespace: "my-namespace",
},
Spec: anywherev1.ClusterSpec{
KubernetesVersion: "v1.25",
BundlesRef: &anywherev1.BundlesRef{
Name: "my-bundles-ref",
Namespace: "my-namespace",
},
ManagementCluster: anywherev1.ManagementCluster{
Name: "my-management-cluster",
},
},
}
}
func createBundle(cluster *anywherev1.Cluster) *artifactsv1.Bundles {
return &artifactsv1.Bundles{
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name,
Namespace: "default",
},
Spec: artifactsv1.BundlesSpec{
VersionsBundles: []artifactsv1.VersionsBundle{
{
KubeVersion: "1.20",
EksD: artifactsv1.EksDRelease{
Name: "test",
EksDReleaseUrl: "testdata/release.yaml",
KubeVersion: "1.20",
},
CertManager: artifactsv1.CertManagerBundle{},
ClusterAPI: artifactsv1.CoreClusterAPI{},
Bootstrap: artifactsv1.KubeadmBootstrapBundle{},
ControlPlane: artifactsv1.KubeadmControlPlaneBundle{},
VSphere: artifactsv1.VSphereBundle{},
Docker: artifactsv1.DockerBundle{},
Eksa: artifactsv1.EksaBundle{},
Cilium: artifactsv1.CiliumBundle{},
Kindnetd: artifactsv1.KindnetdBundle{},
Flux: artifactsv1.FluxBundle{},
BottleRocketHostContainers: artifactsv1.BottlerocketHostContainersBundle{},
ExternalEtcdBootstrap: artifactsv1.EtcdadmBootstrapBundle{},
ExternalEtcdController: artifactsv1.EtcdadmControllerBundle{},
Tinkerbell: artifactsv1.TinkerbellBundle{},
},
},
},
}
}
| 1,384 |
eks-anywhere | aws | Go | package curatedpackages
import (
"context"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/logger"
)
type PackageController interface {
// Enable curated packages support.
Enable(ctx context.Context) error
IsInstalled(ctx context.Context) bool
}
type PackageHandler interface {
CreatePackages(ctx context.Context, fileName string, kubeConfig string) error
}
type Installer struct {
packageController PackageController
spec *cluster.Spec
packageClient PackageHandler
kubectl KubectlRunner
packagesLocation string
mgmtKubeconfig string
}
// IsPackageControllerDisabled detect if the package controller is disabled.
func IsPackageControllerDisabled(cluster *anywherev1.Cluster) bool {
return cluster != nil && cluster.Spec.Packages != nil && cluster.Spec.Packages.Disable
}
// NewInstaller installs packageController and packages during cluster creation.
func NewInstaller(runner KubectlRunner, pc PackageHandler, pcc PackageController, spec *cluster.Spec, packagesLocation, mgmtKubeconfig string) *Installer {
return &Installer{
spec: spec,
packagesLocation: packagesLocation,
packageController: pcc,
packageClient: pc,
kubectl: runner,
mgmtKubeconfig: mgmtKubeconfig,
}
}
// InstallCuratedPackages installs curated packages as part of the cluster creation.
func (pi *Installer) InstallCuratedPackages(ctx context.Context) {
if IsPackageControllerDisabled(pi.spec.Cluster) {
logger.Info(" Package controller disabled")
return
}
PrintLicense()
err := pi.installPackagesController(ctx)
// There is an ask from customers to avoid considering the failure of installing curated packages
// controller as an error but rather a warning
if err != nil {
logger.MarkWarning(" Failed to install the optional EKS-A Curated Package Controller. Please try installation again through eksctl after the cluster creation succeeds", "warning", err)
return
}
// There is an ask from customers to avoid considering the failure of the installation of curated packages
// as an error but rather a warning
err = pi.installPackages(ctx)
if err != nil {
logger.MarkWarning(" Failed installing curated packages on the cluster; please install through eksctl anywhere create packages command after the cluster creation succeeds", "error", err)
}
}
func (pi *Installer) installPackagesController(ctx context.Context) error {
logger.Info("Enabling curated packages on the cluster")
err := pi.packageController.Enable(ctx)
if err != nil {
return err
}
return nil
}
func (pi *Installer) installPackages(ctx context.Context) error {
if pi.packagesLocation == "" {
return nil
}
err := pi.packageClient.CreatePackages(ctx, pi.packagesLocation, pi.mgmtKubeconfig)
if err != nil {
return err
}
return nil
}
| 89 |
eks-anywhere | aws | Go | package curatedpackages_test
import (
"context"
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
"github.com/aws/eks-anywhere/pkg/curatedpackages/mocks"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
"github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type packageInstallerTest struct {
*WithT
ctx context.Context
kubectlRunner *mocks.MockKubectlRunner
packageClient *mocks.MockPackageHandler
packageControllerClient *mocks.MockPackageController
spec *cluster.Spec
command *curatedpackages.Installer
packagePath string
kubeConfigPath string
}
func newPackageInstallerTest(t *testing.T) *packageInstallerTest {
ctrl := gomock.NewController(t)
k := mocks.NewMockKubectlRunner(ctrl)
pc := mocks.NewMockPackageHandler(ctrl)
pcc := mocks.NewMockPackageController(ctrl)
packagesPath := "/test/package.yaml"
spec := &cluster.Spec{
Config: &cluster.Config{
Cluster: &anywherev1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "test-cluster",
},
},
},
VersionsBundle: &cluster.VersionsBundle{
VersionsBundle: &v1alpha1.VersionsBundle{
PackageController: v1alpha1.PackageBundle{
HelmChart: v1alpha1.Image{
URI: "test_registry/test/eks-anywhere-packages:v1",
Name: "test_chart",
},
},
},
},
}
kubeConfigPath := kubeconfig.FromClusterName(spec.Cluster.Name)
return &packageInstallerTest{
WithT: NewWithT(t),
ctx: context.Background(),
kubectlRunner: k,
spec: spec,
packagePath: packagesPath,
packageClient: pc,
packageControllerClient: pcc,
kubeConfigPath: kubeConfigPath,
command: curatedpackages.NewInstaller(k, pc, pcc, spec, packagesPath, kubeConfigPath),
}
}
func TestPackageInstallerSuccess(t *testing.T) {
tt := newPackageInstallerTest(t)
tt.packageClient.EXPECT().CreatePackages(tt.ctx, tt.packagePath, tt.kubeConfigPath).Return(nil)
tt.packageControllerClient.EXPECT().Enable(tt.ctx).Return(nil)
tt.command.InstallCuratedPackages(tt.ctx)
}
func TestPackageInstallerFailWhenControllerFails(t *testing.T) {
tt := newPackageInstallerTest(t)
tt.packageControllerClient.EXPECT().Enable(tt.ctx).Return(errors.New("controller installation failed"))
tt.command.InstallCuratedPackages(tt.ctx)
}
func TestPackageInstallerFailWhenPackageFails(t *testing.T) {
tt := newPackageInstallerTest(t)
tt.packageClient.EXPECT().CreatePackages(tt.ctx, tt.packagePath, tt.kubeConfigPath).Return(errors.New("path doesn't exist"))
tt.packageControllerClient.EXPECT().Enable(tt.ctx).Return(nil)
tt.command.InstallCuratedPackages(tt.ctx)
}
func TestPackageInstallerDisabled(t *testing.T) {
tt := newPackageInstallerTest(t)
tt.spec.Cluster.Spec.Packages = &anywherev1.PackageConfiguration{
Disable: true,
}
tt.command.InstallCuratedPackages(tt.ctx)
}
func TestIsPackageControllerDisabled(t *testing.T) {
tt := newPackageInstallerTest(t)
if curatedpackages.IsPackageControllerDisabled(nil) {
t.Errorf("nil cluster should be enabled")
}
if curatedpackages.IsPackageControllerDisabled(tt.spec.Cluster) {
t.Errorf("nil package controller should be enabled")
}
tt.spec.Cluster.Spec.Packages = &anywherev1.PackageConfiguration{
Disable: false,
}
if curatedpackages.IsPackageControllerDisabled(tt.spec.Cluster) {
t.Errorf("package controller should be enabled")
}
tt.spec.Cluster.Spec.Packages = &anywherev1.PackageConfiguration{
Disable: true,
}
if !curatedpackages.IsPackageControllerDisabled(tt.spec.Cluster) {
t.Errorf("package controller should be disabled")
}
}
| 131 |
eks-anywhere | aws | Go | package curatedpackages_test
import (
"context"
_ "embed"
"fmt"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/stretchr/testify/assert"
"oras.land/oras-go/v2/registry/remote"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
"github.com/aws/eks-anywhere/pkg/registry"
registrymocks "github.com/aws/eks-anywhere/pkg/registry/mocks"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
//go:embed testdata/image-manifest.json
var imageManifest []byte
//go:embed testdata/package-bundle.yaml
var packageBundle []byte
var desc = ocispec.Descriptor{}
type packageReaderTest struct {
*WithT
ctx context.Context
command *curatedpackages.PackageReader
storageClient *registrymocks.MockStorageClient
registryName string
bundles *releasev1.Bundles
}
func newPackageReaderTest(t *testing.T) *packageReaderTest {
ctrl := gomock.NewController(t)
registryName := "public.ecr.aws"
sc := registrymocks.NewMockStorageClient(ctrl)
cache := registry.NewCache()
cache.Set(registryName, sc)
credentialStore := registry.NewCredentialStore()
bundles := releasev1.Bundles{
Spec: releasev1.BundlesSpec{
VersionsBundles: []releasev1.VersionsBundle{
{
KubeVersion: "1.21",
PackageController: releasev1.PackageBundle{
Version: "test-version",
Controller: releasev1.Image{
URI: registryName + "/l0g8r8j6/ctrl:v1",
},
},
},
},
},
}
return &packageReaderTest{
WithT: NewWithT(t),
ctx: context.Background(),
registryName: registryName,
storageClient: sc,
bundles: &bundles,
command: curatedpackages.NewPackageReader(cache, credentialStore, "us-east-1"),
}
}
func TestPackageReader_ReadImagesFromBundles(t *testing.T) {
tt := newPackageReaderTest(t)
repo, err := remote.NewRepository("owner/name")
assert.NoError(t, err)
tt.storageClient.EXPECT().GetStorage(tt.ctx, gomock.Any()).Return(repo, nil)
tt.storageClient.EXPECT().FetchBytes(tt.ctx, gomock.Any(), gomock.Any()).Return(desc, imageManifest, nil)
tt.storageClient.EXPECT().FetchBlob(tt.ctx, gomock.Any(), gomock.Any()).Return(packageBundle, nil)
images, err := tt.command.ReadImagesFromBundles(tt.ctx, tt.bundles)
tt.Expect(err).To(BeNil())
tt.Expect(images).NotTo(BeEmpty())
}
func TestPackageReader_ReadImagesFromBundlesProduction(t *testing.T) {
tt := newPackageReaderTest(t)
artifact := registry.NewArtifactFromURI("public.ecr.aws/eks-anywhere/eks-anywhere-packages-bundles:v1-21-latest")
repo, err := remote.NewRepository("owner/name")
assert.NoError(t, err)
tt.storageClient.EXPECT().GetStorage(tt.ctx, gomock.Any()).Return(repo, nil)
tt.storageClient.EXPECT().FetchBytes(tt.ctx, gomock.Any(), artifact).Return(desc, imageManifest, nil)
tt.storageClient.EXPECT().FetchBlob(tt.ctx, gomock.Any(), gomock.Any()).Return(packageBundle, nil)
tt.bundles.Spec.VersionsBundles[0].PackageController.Controller.URI = tt.registryName + "/eks-anywhere/ctrl:v1"
images, err := tt.command.ReadImagesFromBundles(tt.ctx, tt.bundles)
tt.Expect(err).To(BeNil())
tt.Expect(images).NotTo(BeEmpty())
}
func TestPackageReader_ReadImagesFromBundlesBadKubeVersion(t *testing.T) {
tt := newPackageReaderTest(t)
bundles := tt.bundles.DeepCopy()
bundles.Spec.VersionsBundles[0].KubeVersion = "1"
images, err := tt.command.ReadImagesFromBundles(tt.ctx, bundles)
tt.Expect(err).To(BeNil())
tt.Expect(images).To(BeEmpty())
}
func TestPackageReader_ReadImagesFromBundlesBadRegistry(t *testing.T) {
tt := newPackageReaderTest(t)
tt.bundles.Spec.VersionsBundles[0].PackageController.Controller.URI = "!@#$/eks-anywhere/ctrl:v1"
images, err := tt.command.ReadImagesFromBundles(tt.ctx, tt.bundles)
tt.Expect(err).To(BeNil())
tt.Expect(images).To(BeEmpty())
}
func TestPackageReader_ReadImagesFromBundlesBadData(t *testing.T) {
tt := newPackageReaderTest(t)
repo, err := remote.NewRepository("owner/name")
assert.NoError(t, err)
tt.storageClient.EXPECT().GetStorage(tt.ctx, gomock.Any()).Return(repo, nil)
tt.storageClient.EXPECT().FetchBytes(tt.ctx, gomock.Any(), gomock.Any()).Return(desc, []byte("wot?"), nil)
images, err := tt.command.ReadImagesFromBundles(tt.ctx, tt.bundles)
tt.Expect(err).To(BeNil())
tt.Expect(images).To(BeEmpty())
}
func TestPackageReader_ReadImagesFromBundlesBundlePullError(t *testing.T) {
tt := newPackageReaderTest(t)
repo, err := remote.NewRepository("owner/name")
assert.NoError(t, err)
tt.storageClient.EXPECT().GetStorage(tt.ctx, gomock.Any()).Return(repo, nil)
tt.storageClient.EXPECT().FetchBytes(tt.ctx, gomock.Any(), gomock.Any()).Return(desc, []byte{}, fmt.Errorf("oops"))
images, err := tt.command.ReadImagesFromBundles(tt.ctx, tt.bundles)
tt.Expect(err).To(BeNil())
tt.Expect(images).To(BeEmpty())
}
func TestPackageReader_ReadChartsFromBundles(t *testing.T) {
tt := newPackageReaderTest(t)
artifact := registry.NewArtifactFromURI("public.ecr.aws/l0g8r8j6/eks-anywhere-packages-bundles:v1-21-latest")
repo, err := remote.NewRepository("owner/name")
assert.NoError(t, err)
tt.storageClient.EXPECT().GetStorage(tt.ctx, gomock.Any()).Return(repo, nil)
tt.storageClient.EXPECT().FetchBytes(tt.ctx, gomock.Any(), artifact).Return(desc, imageManifest, nil)
tt.storageClient.EXPECT().FetchBlob(tt.ctx, gomock.Any(), gomock.Any()).Return(packageBundle, nil)
images := tt.command.ReadChartsFromBundles(tt.ctx, tt.bundles)
tt.Expect(images).NotTo(BeEmpty())
}
func TestPackageReader_ReadChartsFromBundlesProduction(t *testing.T) {
tt := newPackageReaderTest(t)
artifact := registry.NewArtifactFromURI("public.ecr.aws/eks-anywhere/eks-anywhere-packages-bundles:v1-21-latest")
repo, err := remote.NewRepository("owner/name")
assert.NoError(t, err)
tt.storageClient.EXPECT().GetStorage(tt.ctx, gomock.Any()).Return(repo, nil)
tt.storageClient.EXPECT().FetchBytes(tt.ctx, gomock.Any(), artifact).Return(desc, imageManifest, nil)
tt.storageClient.EXPECT().FetchBlob(tt.ctx, gomock.Any(), gomock.Any()).Return(packageBundle, nil)
tt.bundles.Spec.VersionsBundles[0].PackageController.Controller.URI = tt.registryName + "/eks-anywhere/ctrl:v1"
images := tt.command.ReadChartsFromBundles(tt.ctx, tt.bundles)
tt.Expect(images).NotTo(BeEmpty())
}
func TestPackageReader_ReadChartsFromBundlesBadKubeVersion(t *testing.T) {
tt := newPackageReaderTest(t)
bundles := tt.bundles.DeepCopy()
bundles.Spec.VersionsBundles[0].KubeVersion = "1"
images := tt.command.ReadChartsFromBundles(tt.ctx, bundles)
tt.Expect(images).To(BeEmpty())
}
func TestPackageReader_ReadChartsFromBundlesBundlePullError(t *testing.T) {
tt := newPackageReaderTest(t)
repo, err := remote.NewRepository("owner/name")
assert.NoError(t, err)
tt.storageClient.EXPECT().GetStorage(tt.ctx, gomock.Any()).Return(repo, nil)
tt.storageClient.EXPECT().FetchBytes(tt.ctx, gomock.Any(), gomock.Any()).Return(desc, []byte{}, fmt.Errorf("oops"))
images := tt.command.ReadChartsFromBundles(tt.ctx, tt.bundles)
tt.Expect(images).To(BeEmpty())
}
| 198 |
eks-anywhere | aws | Go | package curatedpackages
import (
api "github.com/aws/eks-anywhere-packages/api/v1alpha1"
)
// DisplayablePackage wraps Package to omit undesired members (like Status).
//
// This is necessary in part because of https://github.com/golang/go/issues/11939
// but also because we just don't want to generate a Status section when we're
// emitting templates for a user to modify.
type DisplayablePackage struct {
*api.Package
Status *interface{} `json:"status,omitempty"`
}
func NewDisplayablePackage(p *api.Package) *DisplayablePackage {
return &DisplayablePackage{Package: p}
}
| 20 |
eks-anywhere | aws | Go | package curatedpackages
import (
"context"
"fmt"
"sort"
"strings"
"sigs.k8s.io/yaml"
packagesv1 "github.com/aws/eks-anywhere-packages/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/registry"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
// Temporary: Curated packages dev and prod accounts are currently hard coded
// This is because there is no mechanism to extract these values as of now.
const (
prodAccount = "eks-anywhere"
devAccount = "l0g8r8j6"
stagingAccount = "w9m0f3l5"
publicProdECR = "public.ecr.aws/" + prodAccount
publicDevECR = "public.ecr.aws/" + devAccount
stagingDevECR = "public.ecr.aws/" + stagingAccount
packageProdDomain = "783794618700.dkr.ecr.us-west-2.amazonaws.com"
packageDevDomain = "857151390494.dkr.ecr.us-west-2.amazonaws.com"
)
type PackageReader struct {
cache *registry.Cache
credentialStore *registry.CredentialStore
awsRegion string
}
// NewPackageReader create a new package reader with storage client.
func NewPackageReader(cache *registry.Cache, credentialStore *registry.CredentialStore, awsRegion string) *PackageReader {
if len(awsRegion) <= 0 {
awsRegion = eksaDefaultRegion
}
return &PackageReader{
cache: cache,
credentialStore: credentialStore,
awsRegion: awsRegion,
}
}
// ReadImagesFromBundles and return a list of image artifacts.
func (r *PackageReader) ReadImagesFromBundles(ctx context.Context, b *releasev1.Bundles) ([]registry.Artifact, error) {
var err error
var images []registry.Artifact
for _, vb := range b.Spec.VersionsBundles {
bundleURI, bundle, err := r.getBundle(ctx, vb)
if err != nil {
logger.Info("Warning: Failed getting bundle reference", "error", err)
continue
}
packageImages := r.fetchImagesFromBundle(bundleURI, bundle)
images = append(images, packageImages...)
}
return removeDuplicateImages(images), err
}
// ReadChartsFromBundles and return a list of chart artifacts.
func (r *PackageReader) ReadChartsFromBundles(ctx context.Context, b *releasev1.Bundles) []registry.Artifact {
var images []registry.Artifact
for _, vb := range b.Spec.VersionsBundles {
bundleURI, bundle, err := r.getBundle(ctx, vb)
if err != nil {
logger.Info("Warning: Failed getting bundle reference", "error", err)
continue
}
bundleArtifact := registry.NewArtifactFromURI(bundleURI)
images = append(images, bundleArtifact)
packagesHelmChart := r.fetchPackagesHelmChart(bundleURI, bundle)
images = append(images, packagesHelmChart...)
}
return removeDuplicateImages(images)
}
func (r *PackageReader) getBundle(ctx context.Context, vb releasev1.VersionsBundle) (string, *packagesv1.PackageBundle, error) {
bundleURI, err := GetPackageBundleRef(vb)
if err != nil {
return "", nil, err
}
artifact := registry.NewArtifactFromURI(bundleURI)
sc, err := r.cache.Get(registry.NewStorageContext(artifact.Registry, r.credentialStore, nil, false))
if err != nil {
return "", nil, err
}
data, err := registry.PullBytes(ctx, sc, artifact)
if err != nil {
return "", nil, err
}
bundle := packagesv1.PackageBundle{}
err = yaml.Unmarshal(data, &bundle)
if err != nil {
return "", nil, err
}
return artifact.VersionedImage(), &bundle, nil
}
func (r *PackageReader) fetchPackagesHelmChart(bundleURI string, bundle *packagesv1.PackageBundle) []registry.Artifact {
images := make([]registry.Artifact, 0, len(bundle.Spec.Packages))
bundleRegistry := getChartRegistry(bundleURI)
for _, p := range bundle.Spec.Packages {
chartURI := fmt.Sprintf("%s/%s@%s", bundleRegistry, p.Source.Repository, p.Source.Versions[0].Digest)
pHC := registry.NewArtifactFromURI(chartURI)
pHC.Tag = p.Source.Versions[0].Name
images = append(images, pHC)
}
return images
}
func (r *PackageReader) fetchImagesFromBundle(bundleURI string, bundle *packagesv1.PackageBundle) []registry.Artifact {
images := make([]registry.Artifact, 0, len(bundle.Spec.Packages))
bundleRegistry := getImageRegistry(bundleURI, r.awsRegion)
for _, p := range bundle.Spec.Packages {
// each package will have at least one version
for _, version := range p.Source.Versions[0].Images {
imageURI := fmt.Sprintf("%s/%s@%s", bundleRegistry, version.Repository, version.Digest)
image := registry.NewArtifactFromURI(imageURI)
image.Tag = "" // We do not have the tag right now
images = append(images, image)
}
}
return images
}
func removeDuplicateImages(images []registry.Artifact) []registry.Artifact {
uniqueImages := make(map[string]struct{})
var list []registry.Artifact
for _, item := range images {
if _, value := uniqueImages[item.VersionedImage()]; !value {
uniqueImages[item.VersionedImage()] = struct{}{}
list = append(list, item)
}
}
sort.Slice(list, func(i, j int) bool {
return list[i].VersionedImage() < list[j].VersionedImage()
})
return list
}
func getChartRegistry(uri string) string {
if strings.Contains(uri, publicProdECR) {
return publicProdECR
}
return publicDevECR
}
func getImageRegistry(uri, awsRegion string) string {
if strings.Contains(uri, publicProdECR) {
return strings.ReplaceAll(packageProdDomain, eksaDefaultRegion, awsRegion)
}
return strings.ReplaceAll(packageDevDomain, eksaDefaultRegion, awsRegion)
}
| 161 |
eks-anywhere | aws | Go | package curatedpackages
import (
"fmt"
"strings"
)
func ValidateKubeVersion(kubeVersion string, clusterName string) error {
if len(clusterName) > 0 {
if len(kubeVersion) > 0 {
return fmt.Errorf("please specify either kube-version or cluster name not both")
}
return nil
}
if len(kubeVersion) > 0 {
versionSplit := strings.Split(kubeVersion, ".")
if len(versionSplit) != 2 {
return fmt.Errorf("please specify kube-version as <major>.<minor>")
}
return nil
}
return fmt.Errorf("please specify kube-version or cluster name")
}
| 25 |
eks-anywhere | aws | Go | package curatedpackages_test
import (
"testing"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
)
func TestValidateNoKubeVersionWhenClusterSucceeds(t *testing.T) {
err := curatedpackages.ValidateKubeVersion("", "morby")
if err != nil {
t.Errorf("empty kubeVersion allowed when cluster specified")
}
}
func TestValidateKubeVersionWhenClusterFails(t *testing.T) {
err := curatedpackages.ValidateKubeVersion("1.21", "morby")
if err == nil {
t.Errorf("not both kube-version and cluster")
}
}
func TestValidateKubeVersionWhenNoClusterFails(t *testing.T) {
err := curatedpackages.ValidateKubeVersion("", "")
if err == nil {
t.Errorf("must specify cluster or kubeversion")
}
}
func TestValidateKubeVersionWhenRegistrySucceeds(t *testing.T) {
kubeVersion := "1.21"
err := curatedpackages.ValidateKubeVersion(kubeVersion, "")
if err != nil {
t.Errorf("Registry with %s should succeed", kubeVersion)
}
}
func TestValidateKubeVersionWhenInvalidVersionFails(t *testing.T) {
kubeVersion := "1.2.3"
err := curatedpackages.ValidateKubeVersion(kubeVersion, "")
if err == nil {
t.Errorf("Registry with %s should fail", kubeVersion)
}
}
| 45 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/curatedpackages/bundlemanager.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
v1alpha1 "github.com/aws/eks-anywhere-packages/api/v1alpha1"
gomock "github.com/golang/mock/gomock"
)
// MockManager is a mock of Manager interface.
type MockManager struct {
ctrl *gomock.Controller
recorder *MockManagerMockRecorder
}
// MockManagerMockRecorder is the mock recorder for MockManager.
type MockManagerMockRecorder struct {
mock *MockManager
}
// NewMockManager creates a new mock instance.
func NewMockManager(ctrl *gomock.Controller) *MockManager {
mock := &MockManager{ctrl: ctrl}
mock.recorder = &MockManagerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockManager) EXPECT() *MockManagerMockRecorder {
return m.recorder
}
// LatestBundle mocks base method.
func (m *MockManager) LatestBundle(ctx context.Context, baseRef, kubeMajor, kubeMinor, clusterName string) (*v1alpha1.PackageBundle, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LatestBundle", ctx, baseRef, kubeMajor, kubeMinor, clusterName)
ret0, _ := ret[0].(*v1alpha1.PackageBundle)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// LatestBundle indicates an expected call of LatestBundle.
func (mr *MockManagerMockRecorder) LatestBundle(ctx, baseRef, kubeMajor, kubeMinor, clusterName interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LatestBundle", reflect.TypeOf((*MockManager)(nil).LatestBundle), ctx, baseRef, kubeMajor, kubeMinor, clusterName)
}
| 52 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/curatedpackages/packagecontrollerclient.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
types "k8s.io/apimachinery/pkg/types"
client "sigs.k8s.io/controller-runtime/pkg/client"
)
// MockClientBuilder is a mock of ClientBuilder interface.
type MockClientBuilder struct {
ctrl *gomock.Controller
recorder *MockClientBuilderMockRecorder
}
// MockClientBuilderMockRecorder is the mock recorder for MockClientBuilder.
type MockClientBuilderMockRecorder struct {
mock *MockClientBuilder
}
// NewMockClientBuilder creates a new mock instance.
func NewMockClientBuilder(ctrl *gomock.Controller) *MockClientBuilder {
mock := &MockClientBuilder{ctrl: ctrl}
mock.recorder = &MockClientBuilderMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockClientBuilder) EXPECT() *MockClientBuilderMockRecorder {
return m.recorder
}
// GetClient mocks base method.
func (m *MockClientBuilder) GetClient(arg0 context.Context, arg1 types.NamespacedName) (client.Client, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetClient", arg0, arg1)
ret0, _ := ret[0].(client.Client)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetClient indicates an expected call of GetClient.
func (mr *MockClientBuilderMockRecorder) GetClient(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClient", reflect.TypeOf((*MockClientBuilder)(nil).GetClient), arg0, arg1)
}
// MockChartInstaller is a mock of ChartInstaller interface.
type MockChartInstaller struct {
ctrl *gomock.Controller
recorder *MockChartInstallerMockRecorder
}
// MockChartInstallerMockRecorder is the mock recorder for MockChartInstaller.
type MockChartInstallerMockRecorder struct {
mock *MockChartInstaller
}
// NewMockChartInstaller creates a new mock instance.
func NewMockChartInstaller(ctrl *gomock.Controller) *MockChartInstaller {
mock := &MockChartInstaller{ctrl: ctrl}
mock.recorder = &MockChartInstallerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockChartInstaller) EXPECT() *MockChartInstallerMockRecorder {
return m.recorder
}
// InstallChart mocks base method.
func (m *MockChartInstaller) InstallChart(ctx context.Context, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath string, skipCRDs bool, values []string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InstallChart", ctx, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath, skipCRDs, values)
ret0, _ := ret[0].(error)
return ret0
}
// InstallChart indicates an expected call of InstallChart.
func (mr *MockChartInstallerMockRecorder) InstallChart(ctx, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath, skipCRDs, values interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallChart", reflect.TypeOf((*MockChartInstaller)(nil).InstallChart), ctx, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath, skipCRDs, values)
}
// MockChartUninstaller is a mock of ChartUninstaller interface.
type MockChartUninstaller struct {
ctrl *gomock.Controller
recorder *MockChartUninstallerMockRecorder
}
// MockChartUninstallerMockRecorder is the mock recorder for MockChartUninstaller.
type MockChartUninstallerMockRecorder struct {
mock *MockChartUninstaller
}
// NewMockChartUninstaller creates a new mock instance.
func NewMockChartUninstaller(ctrl *gomock.Controller) *MockChartUninstaller {
mock := &MockChartUninstaller{ctrl: ctrl}
mock.recorder = &MockChartUninstallerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockChartUninstaller) EXPECT() *MockChartUninstallerMockRecorder {
return m.recorder
}
// Delete mocks base method.
func (m *MockChartUninstaller) Delete(ctx context.Context, kubeconfigFilePath, installName, namespace string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", ctx, kubeconfigFilePath, installName, namespace)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete.
func (mr *MockChartUninstallerMockRecorder) Delete(ctx, kubeconfigFilePath, installName, namespace interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockChartUninstaller)(nil).Delete), ctx, kubeconfigFilePath, installName, namespace)
}
// MockChartManager is a mock of ChartManager interface.
type MockChartManager struct {
ctrl *gomock.Controller
recorder *MockChartManagerMockRecorder
}
// MockChartManagerMockRecorder is the mock recorder for MockChartManager.
type MockChartManagerMockRecorder struct {
mock *MockChartManager
}
// NewMockChartManager creates a new mock instance.
func NewMockChartManager(ctrl *gomock.Controller) *MockChartManager {
mock := &MockChartManager{ctrl: ctrl}
mock.recorder = &MockChartManagerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockChartManager) EXPECT() *MockChartManagerMockRecorder {
return m.recorder
}
// Delete mocks base method.
func (m *MockChartManager) Delete(ctx context.Context, kubeconfigFilePath, installName, namespace string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", ctx, kubeconfigFilePath, installName, namespace)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete.
func (mr *MockChartManagerMockRecorder) Delete(ctx, kubeconfigFilePath, installName, namespace interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockChartManager)(nil).Delete), ctx, kubeconfigFilePath, installName, namespace)
}
// InstallChart mocks base method.
func (m *MockChartManager) InstallChart(ctx context.Context, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath string, skipCRDs bool, values []string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InstallChart", ctx, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath, skipCRDs, values)
ret0, _ := ret[0].(error)
return ret0
}
// InstallChart indicates an expected call of InstallChart.
func (mr *MockChartManagerMockRecorder) InstallChart(ctx, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath, skipCRDs, values interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallChart", reflect.TypeOf((*MockChartManager)(nil).InstallChart), ctx, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath, skipCRDs, values)
}
// MockKubeDeleter is a mock of KubeDeleter interface.
type MockKubeDeleter struct {
ctrl *gomock.Controller
recorder *MockKubeDeleterMockRecorder
}
// MockKubeDeleterMockRecorder is the mock recorder for MockKubeDeleter.
type MockKubeDeleterMockRecorder struct {
mock *MockKubeDeleter
}
// NewMockKubeDeleter creates a new mock instance.
func NewMockKubeDeleter(ctrl *gomock.Controller) *MockKubeDeleter {
mock := &MockKubeDeleter{ctrl: ctrl}
mock.recorder = &MockKubeDeleterMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockKubeDeleter) EXPECT() *MockKubeDeleterMockRecorder {
return m.recorder
}
// Delete mocks base method.
func (m *MockKubeDeleter) Delete(arg0 context.Context, arg1 client.Object, arg2 ...client.DeleteOption) error {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Delete", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete.
func (mr *MockKubeDeleterMockRecorder) Delete(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockKubeDeleter)(nil).Delete), varargs...)
}
| 220 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/curatedpackages/kubectlrunner.go
// Package mocks is a generated GoMock package.
package mocks
import (
bytes "bytes"
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// MockKubectlRunner is a mock of KubectlRunner interface.
type MockKubectlRunner struct {
ctrl *gomock.Controller
recorder *MockKubectlRunnerMockRecorder
}
// MockKubectlRunnerMockRecorder is the mock recorder for MockKubectlRunner.
type MockKubectlRunnerMockRecorder struct {
mock *MockKubectlRunner
}
// NewMockKubectlRunner creates a new mock instance.
func NewMockKubectlRunner(ctrl *gomock.Controller) *MockKubectlRunner {
mock := &MockKubectlRunner{ctrl: ctrl}
mock.recorder = &MockKubectlRunnerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockKubectlRunner) EXPECT() *MockKubectlRunnerMockRecorder {
return m.recorder
}
// ExecuteCommand mocks base method.
func (m *MockKubectlRunner) ExecuteCommand(ctx context.Context, opts ...string) (bytes.Buffer, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ExecuteCommand", varargs...)
ret0, _ := ret[0].(bytes.Buffer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ExecuteCommand indicates an expected call of ExecuteCommand.
func (mr *MockKubectlRunnerMockRecorder) ExecuteCommand(ctx interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteCommand", reflect.TypeOf((*MockKubectlRunner)(nil).ExecuteCommand), varargs...)
}
// ExecuteFromYaml mocks base method.
func (m *MockKubectlRunner) ExecuteFromYaml(ctx context.Context, yaml []byte, opts ...string) (bytes.Buffer, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, yaml}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ExecuteFromYaml", varargs...)
ret0, _ := ret[0].(bytes.Buffer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ExecuteFromYaml indicates an expected call of ExecuteFromYaml.
func (mr *MockKubectlRunnerMockRecorder) ExecuteFromYaml(ctx, yaml interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, yaml}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteFromYaml", reflect.TypeOf((*MockKubectlRunner)(nil).ExecuteFromYaml), varargs...)
}
// GetObject mocks base method.
func (m *MockKubectlRunner) GetObject(ctx context.Context, resourceType, name, namespece, kubeconfig string, obj runtime.Object) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetObject", ctx, resourceType, name, namespece, kubeconfig, obj)
ret0, _ := ret[0].(error)
return ret0
}
// GetObject indicates an expected call of GetObject.
func (mr *MockKubectlRunnerMockRecorder) GetObject(ctx, resourceType, name, namespece, kubeconfig, obj interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObject", reflect.TypeOf((*MockKubectlRunner)(nil).GetObject), ctx, resourceType, name, namespece, kubeconfig, obj)
}
// HasResource mocks base method.
func (m *MockKubectlRunner) HasResource(ctx context.Context, resourceType, name, kubeconfig, namespace string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "HasResource", ctx, resourceType, name, kubeconfig, namespace)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// HasResource indicates an expected call of HasResource.
func (mr *MockKubectlRunnerMockRecorder) HasResource(ctx, resourceType, name, kubeconfig, namespace interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasResource", reflect.TypeOf((*MockKubectlRunner)(nil).HasResource), ctx, resourceType, name, kubeconfig, namespace)
}
| 107 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: sigs.k8s.io/controller-runtime/pkg/client (interfaces: Client)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
meta "k8s.io/apimachinery/pkg/api/meta"
runtime "k8s.io/apimachinery/pkg/runtime"
types "k8s.io/apimachinery/pkg/types"
client "sigs.k8s.io/controller-runtime/pkg/client"
)
// MockKubeClient is a mock of Client interface.
type MockKubeClient struct {
ctrl *gomock.Controller
recorder *MockKubeClientMockRecorder
}
// MockKubeClientMockRecorder is the mock recorder for MockKubeClient.
type MockKubeClientMockRecorder struct {
mock *MockKubeClient
}
// NewMockKubeClient creates a new mock instance.
func NewMockKubeClient(ctrl *gomock.Controller) *MockKubeClient {
mock := &MockKubeClient{ctrl: ctrl}
mock.recorder = &MockKubeClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockKubeClient) EXPECT() *MockKubeClientMockRecorder {
return m.recorder
}
// Create mocks base method.
func (m *MockKubeClient) Create(arg0 context.Context, arg1 client.Object, arg2 ...client.CreateOption) error {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Create", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// Create indicates an expected call of Create.
func (mr *MockKubeClientMockRecorder) Create(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockKubeClient)(nil).Create), varargs...)
}
// Delete mocks base method.
func (m *MockKubeClient) Delete(arg0 context.Context, arg1 client.Object, arg2 ...client.DeleteOption) error {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Delete", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete.
func (mr *MockKubeClientMockRecorder) Delete(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockKubeClient)(nil).Delete), varargs...)
}
// DeleteAllOf mocks base method.
func (m *MockKubeClient) DeleteAllOf(arg0 context.Context, arg1 client.Object, arg2 ...client.DeleteAllOfOption) error {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "DeleteAllOf", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteAllOf indicates an expected call of DeleteAllOf.
func (mr *MockKubeClientMockRecorder) DeleteAllOf(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAllOf", reflect.TypeOf((*MockKubeClient)(nil).DeleteAllOf), varargs...)
}
// Get mocks base method.
func (m *MockKubeClient) Get(arg0 context.Context, arg1 types.NamespacedName, arg2 client.Object, arg3 ...client.GetOption) error {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1, arg2}
for _, a := range arg3 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Get", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// Get indicates an expected call of Get.
func (mr *MockKubeClientMockRecorder) Get(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockKubeClient)(nil).Get), varargs...)
}
// List mocks base method.
func (m *MockKubeClient) List(arg0 context.Context, arg1 client.ObjectList, arg2 ...client.ListOption) error {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "List", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// List indicates an expected call of List.
func (mr *MockKubeClientMockRecorder) List(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockKubeClient)(nil).List), varargs...)
}
// Patch mocks base method.
func (m *MockKubeClient) Patch(arg0 context.Context, arg1 client.Object, arg2 client.Patch, arg3 ...client.PatchOption) error {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1, arg2}
for _, a := range arg3 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Patch", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// Patch indicates an expected call of Patch.
func (mr *MockKubeClientMockRecorder) Patch(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Patch", reflect.TypeOf((*MockKubeClient)(nil).Patch), varargs...)
}
// RESTMapper mocks base method.
func (m *MockKubeClient) RESTMapper() meta.RESTMapper {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RESTMapper")
ret0, _ := ret[0].(meta.RESTMapper)
return ret0
}
// RESTMapper indicates an expected call of RESTMapper.
func (mr *MockKubeClientMockRecorder) RESTMapper() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RESTMapper", reflect.TypeOf((*MockKubeClient)(nil).RESTMapper))
}
// Scheme mocks base method.
func (m *MockKubeClient) Scheme() *runtime.Scheme {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Scheme")
ret0, _ := ret[0].(*runtime.Scheme)
return ret0
}
// Scheme indicates an expected call of Scheme.
func (mr *MockKubeClientMockRecorder) Scheme() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Scheme", reflect.TypeOf((*MockKubeClient)(nil).Scheme))
}
// Status mocks base method.
func (m *MockKubeClient) Status() client.SubResourceWriter {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Status")
ret0, _ := ret[0].(client.SubResourceWriter)
return ret0
}
// Status indicates an expected call of Status.
func (mr *MockKubeClientMockRecorder) Status() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*MockKubeClient)(nil).Status))
}
// SubResource mocks base method.
func (m *MockKubeClient) SubResource(arg0 string) client.SubResourceClient {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SubResource", arg0)
ret0, _ := ret[0].(client.SubResourceClient)
return ret0
}
// SubResource indicates an expected call of SubResource.
func (mr *MockKubeClientMockRecorder) SubResource(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubResource", reflect.TypeOf((*MockKubeClient)(nil).SubResource), arg0)
}
// Update mocks base method.
func (m *MockKubeClient) Update(arg0 context.Context, arg1 client.Object, arg2 ...client.UpdateOption) error {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Update", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// Update indicates an expected call of Update.
func (mr *MockKubeClientMockRecorder) Update(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockKubeClient)(nil).Update), varargs...)
}
| 229 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/curatedpackages/packageinstaller.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
)
// MockPackageController is a mock of PackageController interface.
type MockPackageController struct {
ctrl *gomock.Controller
recorder *MockPackageControllerMockRecorder
}
// MockPackageControllerMockRecorder is the mock recorder for MockPackageController.
type MockPackageControllerMockRecorder struct {
mock *MockPackageController
}
// NewMockPackageController creates a new mock instance.
func NewMockPackageController(ctrl *gomock.Controller) *MockPackageController {
mock := &MockPackageController{ctrl: ctrl}
mock.recorder = &MockPackageControllerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockPackageController) EXPECT() *MockPackageControllerMockRecorder {
return m.recorder
}
// Enable mocks base method.
func (m *MockPackageController) Enable(ctx context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Enable", ctx)
ret0, _ := ret[0].(error)
return ret0
}
// Enable indicates an expected call of Enable.
func (mr *MockPackageControllerMockRecorder) Enable(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enable", reflect.TypeOf((*MockPackageController)(nil).Enable), ctx)
}
// IsInstalled mocks base method.
func (m *MockPackageController) IsInstalled(ctx context.Context) bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IsInstalled", ctx)
ret0, _ := ret[0].(bool)
return ret0
}
// IsInstalled indicates an expected call of IsInstalled.
func (mr *MockPackageControllerMockRecorder) IsInstalled(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsInstalled", reflect.TypeOf((*MockPackageController)(nil).IsInstalled), ctx)
}
// MockPackageHandler is a mock of PackageHandler interface.
type MockPackageHandler struct {
ctrl *gomock.Controller
recorder *MockPackageHandlerMockRecorder
}
// MockPackageHandlerMockRecorder is the mock recorder for MockPackageHandler.
type MockPackageHandlerMockRecorder struct {
mock *MockPackageHandler
}
// NewMockPackageHandler creates a new mock instance.
func NewMockPackageHandler(ctrl *gomock.Controller) *MockPackageHandler {
mock := &MockPackageHandler{ctrl: ctrl}
mock.recorder = &MockPackageHandlerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockPackageHandler) EXPECT() *MockPackageHandlerMockRecorder {
return m.recorder
}
// CreatePackages mocks base method.
func (m *MockPackageHandler) CreatePackages(ctx context.Context, fileName, kubeConfig string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreatePackages", ctx, fileName, kubeConfig)
ret0, _ := ret[0].(error)
return ret0
}
// CreatePackages indicates an expected call of CreatePackages.
func (mr *MockPackageHandlerMockRecorder) CreatePackages(ctx, fileName, kubeConfig interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePackages", reflect.TypeOf((*MockPackageHandler)(nil).CreatePackages), ctx, fileName, kubeConfig)
}
| 101 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/curatedpackages/bundle.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
v1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
gomock "github.com/golang/mock/gomock"
)
// MockReader is a mock of Reader interface.
type MockReader struct {
ctrl *gomock.Controller
recorder *MockReaderMockRecorder
}
// MockReaderMockRecorder is the mock recorder for MockReader.
type MockReaderMockRecorder struct {
mock *MockReader
}
// NewMockReader creates a new mock instance.
func NewMockReader(ctrl *gomock.Controller) *MockReader {
mock := &MockReader{ctrl: ctrl}
mock.recorder = &MockReaderMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockReader) EXPECT() *MockReaderMockRecorder {
return m.recorder
}
// ReadBundlesForVersion mocks base method.
func (m *MockReader) ReadBundlesForVersion(eksaVersion string) (*v1alpha1.Bundles, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReadBundlesForVersion", eksaVersion)
ret0, _ := ret[0].(*v1alpha1.Bundles)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ReadBundlesForVersion indicates an expected call of ReadBundlesForVersion.
func (mr *MockReaderMockRecorder) ReadBundlesForVersion(eksaVersion interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadBundlesForVersion", reflect.TypeOf((*MockReader)(nil).ReadBundlesForVersion), eksaVersion)
}
// MockBundleRegistry is a mock of BundleRegistry interface.
type MockBundleRegistry struct {
ctrl *gomock.Controller
recorder *MockBundleRegistryMockRecorder
}
// MockBundleRegistryMockRecorder is the mock recorder for MockBundleRegistry.
type MockBundleRegistryMockRecorder struct {
mock *MockBundleRegistry
}
// NewMockBundleRegistry creates a new mock instance.
func NewMockBundleRegistry(ctrl *gomock.Controller) *MockBundleRegistry {
mock := &MockBundleRegistry{ctrl: ctrl}
mock.recorder = &MockBundleRegistryMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockBundleRegistry) EXPECT() *MockBundleRegistryMockRecorder {
return m.recorder
}
// GetRegistryBaseRef mocks base method.
func (m *MockBundleRegistry) GetRegistryBaseRef(ctx context.Context) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetRegistryBaseRef", ctx)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetRegistryBaseRef indicates an expected call of GetRegistryBaseRef.
func (mr *MockBundleRegistryMockRecorder) GetRegistryBaseRef(ctx interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRegistryBaseRef", reflect.TypeOf((*MockBundleRegistry)(nil).GetRegistryBaseRef), ctx)
}
| 90 |
eks-anywhere | aws | Go | package oras
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/go-logr/logr"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
"github.com/aws/eks-anywhere/pkg/logger"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type BundleDownloader struct {
dstFolder string
log logr.Logger
}
// NewBundleDownloader returns a new BundleDownloader.
func NewBundleDownloader(log logr.Logger, dstFolder string) *BundleDownloader {
return &BundleDownloader{
log: log,
dstFolder: dstFolder,
}
}
func (bd *BundleDownloader) Download(ctx context.Context, bundles *releasev1.Bundles) {
artifacts := ReadFilesFromBundles(bundles)
for _, a := range UniqueCharts(artifacts) {
data, err := curatedpackages.PullLatestBundle(ctx, bd.log, a)
if err != nil {
fmt.Printf("unable to download bundle %v \n", err)
continue
}
bundleName := strings.Replace(filepath.Base(a), ":", "-", 1)
err = writeToFile(bd.dstFolder, bundleName, data)
if err != nil {
fmt.Printf("unable to write to file %v \n", err)
}
}
}
func UniqueCharts(charts []string) []string {
keys := make(map[string]bool)
var list []string
// If the key(values of the slice) is not equal
// to the already present value in new slice (list)
// then we append it. else we jump on another element.
for _, entry := range charts {
if _, value := keys[entry]; !value {
keys[entry] = true
list = append(list, entry)
}
}
return list
}
func writeToFile(dir string, packageName string, content []byte) error {
file := filepath.Join(dir, packageName) + ".yaml"
if err := os.WriteFile(file, content, 0o640); err != nil {
return fmt.Errorf("unable to write to the file: %s %v", file, err)
}
return nil
}
func ReadFilesFromBundles(bundles *releasev1.Bundles) []string {
var files []string
for _, vb := range bundles.Spec.VersionsBundles {
file, err := curatedpackages.GetPackageBundleRef(vb)
if err != nil {
logger.Info("Warning: Failed parsing package bundle reference", "error", err)
continue
}
files = append(files, file)
}
return files
}
| 82 |
eks-anywhere | aws | Go | package oras
import (
"context"
"os"
"path/filepath"
"strings"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/utils/urls"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type FileRegistryImporter struct {
registry string
username, password string
srcFolder string
}
func NewFileRegistryImporter(registry, username, password, srcFolder string) *FileRegistryImporter {
return &FileRegistryImporter{
registry: registry,
username: username,
password: password,
srcFolder: srcFolder,
}
}
func (fr *FileRegistryImporter) Push(ctx context.Context, bundles *releasev1.Bundles) {
artifacts := ReadFilesFromBundles(bundles)
for _, a := range UniqueCharts(artifacts) {
updatedChartURL := urls.ReplaceHost(a, fr.registry)
fileName := ChartFileName(a)
chartFilepath := filepath.Join(fr.srcFolder, fileName)
data, err := os.ReadFile(chartFilepath)
if err != nil {
logger.Info("Warning: reading file", "error", err)
continue
}
err = curatedpackages.PushBundle(ctx, updatedChartURL, fileName, data)
if err != nil {
logger.Info("Warning: Failed to push to registry", "error", err)
}
}
}
func ChartFileName(chart string) string {
return strings.Replace(filepath.Base(chart), ":", "-", 1) + ".yaml"
}
| 51 |
eks-anywhere | aws | Go | /*
Package defaulting implements tools to perform defaulting in data objects.
These might be used from the CLI and/or the controller.
This package shoul not, under any circumtance, include specific defaulting logic.
Only the tools to operate that logic should live here.
*/
package defaulting
| 10 |
eks-anywhere | aws | Go | package defaulting_test
import (
"context"
"fmt"
"github.com/aws/eks-anywhere/internal/test"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/defaulting"
eksaerrors "github.com/aws/eks-anywhere/pkg/errors"
)
func ExampleRunner_RunAll() {
r := defaulting.NewRunner[cluster.Spec]()
r.Register(
func(ctx context.Context, spec cluster.Spec) (cluster.Spec, error) {
if spec.Cluster.Spec.KubernetesVersion == "" {
spec.Cluster.Spec.KubernetesVersion = anywherev1.Kube124
}
return spec, nil
},
func(ctx context.Context, spec cluster.Spec) (cluster.Spec, error) {
if spec.Cluster.Spec.ControlPlaneConfiguration.Count == 0 {
spec.Cluster.Spec.ControlPlaneConfiguration.Count = 3
}
return spec, nil
},
)
ctx := context.Background()
spec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.ControlPlaneConfiguration.Count = 5
})
updatedSpec, agg := r.RunAll(ctx, *spec)
if agg != nil {
printErrors(agg)
return
}
fmt.Println("Cluster config is valid")
fmt.Printf("Cluster is for kube version: %s\n", updatedSpec.Cluster.Spec.KubernetesVersion)
fmt.Printf("Cluster CP replicas is: %d\n", updatedSpec.Cluster.Spec.ControlPlaneConfiguration.Count)
// Output:
// Cluster config is valid
// Cluster is for kube version: 1.24
// Cluster CP replicas is: 5
}
func printErrors(agg eksaerrors.Aggregate) {
fmt.Println("Failed assigning cluster spec defaults")
for _, err := range agg.Errors() {
msg := "- " + err.Error()
fmt.Println(msg)
}
}
| 59 |
eks-anywhere | aws | Go | package defaulting
import (
"context"
"github.com/aws/eks-anywhere/pkg/errors"
)
// Default is the logic for a default for a type O. It should return a value of O
// wether it updates it or not. When there is an error, return the zero value of O
// and the error.
type Default[O any] func(ctx context.Context, obj O) (O, error)
// Runner allows to compose and run validations/defaults.
type Runner[O any] struct {
defaults []Default[O]
}
// NewRunner constructs a new Runner.
func NewRunner[O any]() *Runner[O] {
return &Runner[O]{}
}
// Register adds defaults to the Runner.
func (r *Runner[O]) Register(defaults ...Default[O]) {
r.defaults = append(r.defaults, defaults...)
}
// RunAll runs all defaults sequentially and returns the updated O. When there are errors,
// it returns the zero value of O and the aggregated errors.
func (r *Runner[O]) RunAll(ctx context.Context, obj O) (O, errors.Aggregate) {
var allErr []error
updatedObj := obj
for _, d := range r.defaults {
if newObj, err := d(ctx, updatedObj); err != nil {
allErr = append(allErr, flatten(err)...)
} else {
updatedObj = newObj
}
}
if len(allErr) != 0 {
return *new(O), errors.NewAggregate(allErr)
}
return updatedObj, nil
}
// flatten unfolds and flattens errors inside a errors.Aggregate. If err is not
// a errors.Aggregate, it just returns a slice with one single error.
func flatten(err error) []error {
if agg, ok := err.(errors.Aggregate); ok {
return errors.Flatten(agg).Errors()
}
return []error{err}
}
| 59 |
eks-anywhere | aws | Go | package defaulting_test
import (
"context"
"errors"
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/defaulting"
eksaerrors "github.com/aws/eks-anywhere/pkg/errors"
)
func TestRunnerRunAll(t *testing.T) {
g := NewWithT(t)
r := defaulting.NewRunner[apiCluster]()
r.Register(
func(ctx context.Context, cluster apiCluster) (apiCluster, error) {
if cluster.bundlesName == "" {
cluster.bundlesName = "bundles-1"
}
return cluster, nil
},
func(ctx context.Context, cluster apiCluster) (apiCluster, error) {
if cluster.controlPlaneCount == 0 {
cluster.controlPlaneCount = 3
}
return cluster, nil
},
)
ctx := context.Background()
cluster := apiCluster{}
newCluster, err := r.RunAll(ctx, cluster)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(newCluster.bundlesName).To(Equal("bundles-1"))
g.Expect(newCluster.controlPlaneCount).To(Equal(3))
}
func TestRunnerRunAllError(t *testing.T) {
g := NewWithT(t)
e1 := errors.New("first error")
e2 := errors.New("second error")
e3 := errors.New("third error")
r := defaulting.NewRunner[apiCluster]()
r.Register(
func(ctx context.Context, cluster apiCluster) (apiCluster, error) {
return apiCluster{}, eksaerrors.NewAggregate([]error{e1, e2})
},
func(ctx context.Context, cluster apiCluster) (apiCluster, error) {
return cluster, e3
},
)
ctx := context.Background()
cluster := apiCluster{}
g.Expect(r.RunAll(ctx, cluster)).Error().To(And(
MatchError(ContainSubstring("first error")),
MatchError(ContainSubstring("second error")),
MatchError(ContainSubstring("third error")),
))
}
type apiCluster struct {
controlPlaneCount int
bundlesName string
}
| 70 |
eks-anywhere | aws | Go | package dependencies
import (
"context"
"crypto/tls"
"fmt"
"net/http"
"os"
"time"
"github.com/go-logr/logr"
"github.com/google/uuid"
"golang.org/x/exp/maps"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/aws"
"github.com/aws/eks-anywhere/pkg/awsiamauth"
"github.com/aws/eks-anywhere/pkg/bootstrapper"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/clustermanager"
cliconfig "github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/crypto"
"github.com/aws/eks-anywhere/pkg/curatedpackages"
"github.com/aws/eks-anywhere/pkg/diagnostics"
"github.com/aws/eks-anywhere/pkg/eksd"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/executables/cmk"
"github.com/aws/eks-anywhere/pkg/files"
"github.com/aws/eks-anywhere/pkg/filewriter"
gitfactory "github.com/aws/eks-anywhere/pkg/git/factory"
"github.com/aws/eks-anywhere/pkg/gitops/flux"
"github.com/aws/eks-anywhere/pkg/govmomi"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/manifests"
"github.com/aws/eks-anywhere/pkg/manifests/bundles"
"github.com/aws/eks-anywhere/pkg/networking/cilium"
"github.com/aws/eks-anywhere/pkg/networking/kindnetd"
"github.com/aws/eks-anywhere/pkg/networkutils"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder"
"github.com/aws/eks-anywhere/pkg/providers/docker"
"github.com/aws/eks-anywhere/pkg/providers/nutanix"
"github.com/aws/eks-anywhere/pkg/providers/snow"
"github.com/aws/eks-anywhere/pkg/providers/tinkerbell"
"github.com/aws/eks-anywhere/pkg/providers/validator"
"github.com/aws/eks-anywhere/pkg/providers/vsphere"
"github.com/aws/eks-anywhere/pkg/registrymirror"
"github.com/aws/eks-anywhere/pkg/retrier"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/version"
"github.com/aws/eks-anywhere/pkg/workflow/task/workload"
"github.com/aws/eks-anywhere/pkg/workflows/interfaces"
)
type Dependencies struct {
Logger logr.Logger
Provider providers.Provider
ClusterAwsCli *executables.Clusterawsadm
DockerClient *executables.Docker
Kubectl *executables.Kubectl
Govc *executables.Govc
CloudStackValidatorRegistry cloudstack.ValidatorRegistry
SnowAwsClientRegistry *snow.AwsClientRegistry
SnowConfigManager *snow.ConfigManager
Writer filewriter.FileWriter
Kind *executables.Kind
Clusterctl *executables.Clusterctl
Flux *executables.Flux
Troubleshoot *executables.Troubleshoot
Helm *executables.Helm
UnAuthKubeClient *kubernetes.UnAuthClient
Networking clustermanager.Networking
CNIInstaller workload.CNIInstaller
CiliumTemplater *cilium.Templater
AwsIamAuth *awsiamauth.Installer
ClusterManager *clustermanager.ClusterManager
Bootstrapper *bootstrapper.Bootstrapper
GitOpsFlux *flux.Flux
Git *gitfactory.GitTools
EksdInstaller *eksd.Installer
EksdUpgrader *eksd.Upgrader
KubeProxyCLIUpgrader clustermanager.KubeProxyCLIUpgrader
AnalyzerFactory diagnostics.AnalyzerFactory
CollectorFactory diagnostics.CollectorFactory
DignosticCollectorFactory diagnostics.DiagnosticBundleFactory
CAPIManager *clusterapi.Manager
FileReader *files.Reader
ManifestReader *manifests.Reader
closers []types.Closer
CliConfig *cliconfig.CliConfig
PackageInstaller interfaces.PackageInstaller
BundleRegistry curatedpackages.BundleRegistry
PackageControllerClient *curatedpackages.PackageControllerClient
PackageClient curatedpackages.PackageHandler
VSphereValidator *vsphere.Validator
VSphereDefaulter *vsphere.Defaulter
NutanixClientCache *nutanix.ClientCache
NutanixDefaulter *nutanix.Defaulter
NutanixValidator *nutanix.Validator
SnowValidator *snow.Validator
IPValidator *validator.IPValidator
UnAuthKubectlClient KubeClients
}
// KubeClients defines super struct that exposes all behavior.
type KubeClients struct {
*executables.Kubectl
*kubernetes.UnAuthClient
}
func (d *Dependencies) Close(ctx context.Context) error {
// Reverse the loop so we close like LIFO
for i := len(d.closers) - 1; i >= 0; i-- {
if err := d.closers[i].Close(ctx); err != nil {
return err
}
}
return nil
}
func ForSpec(ctx context.Context, clusterSpec *cluster.Spec) *Factory {
eksaToolsImage := clusterSpec.VersionsBundle.Eksa.CliTools
return NewFactory().
UseExecutableImage(eksaToolsImage.VersionedImage()).
WithRegistryMirror(registrymirror.FromCluster(clusterSpec.Cluster)).
UseProxyConfiguration(clusterSpec.Cluster.ProxyConfiguration()).
WithWriterFolder(clusterSpec.Cluster.Name).
WithDiagnosticCollectorImage(clusterSpec.VersionsBundle.Eksa.DiagnosticCollector.VersionedImage())
}
// Factory helps initialization.
type Factory struct {
executablesConfig *executablesConfig
config config
registryMirror *registrymirror.RegistryMirror
proxyConfiguration map[string]string
writerFolder string
diagnosticCollectorImage string
buildSteps []buildStep
dependencies Dependencies
}
type executablesConfig struct {
builder *executables.ExecutablesBuilder
image string
useDockerContainer bool
dockerClient executables.DockerClient
mountDirs []string
}
type config struct {
bundlesOverride string
noTimeouts bool
}
type buildStep func(ctx context.Context) error
func NewFactory() *Factory {
return &Factory{
writerFolder: "./",
executablesConfig: &executablesConfig{
useDockerContainer: executables.ExecutablesInDocker(),
},
buildSteps: make([]buildStep, 0),
}
}
func (f *Factory) Build(ctx context.Context) (*Dependencies, error) {
for _, step := range f.buildSteps {
if err := step(ctx); err != nil {
return nil, err
}
}
// clean up stack
f.buildSteps = make([]buildStep, 0)
// Make copy of dependencies since its attributes are public
d := f.dependencies
return &d, nil
}
func (f *Factory) WithWriterFolder(folder string) *Factory {
f.writerFolder = folder
return f
}
// WithRegistryMirror configures the factory to use registry mirror wherever applicable.
func (f *Factory) WithRegistryMirror(registryMirror *registrymirror.RegistryMirror) *Factory {
f.registryMirror = registryMirror
return f
}
func (f *Factory) UseProxyConfiguration(proxyConfig map[string]string) *Factory {
f.proxyConfiguration = proxyConfig
return f
}
func (f *Factory) GetProxyConfiguration() map[string]string {
return f.proxyConfiguration
}
func (f *Factory) WithProxyConfiguration() *Factory {
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.proxyConfiguration == nil {
proxyConfig := cliconfig.GetProxyConfigFromEnv()
f.UseProxyConfiguration(proxyConfig)
}
return nil
},
)
return f
}
func (f *Factory) UseExecutableImage(image string) *Factory {
f.executablesConfig.image = image
return f
}
// WithExecutableImage sets the right cli tools image for the executable builder, reading
// from the Bundle and using the first VersionsBundle
// This is just the default for when there is not an specific kubernetes version available
// For commands that receive a cluster config file or a kubernetes version directly as input,
// use UseExecutableImage to specify the image directly.
func (f *Factory) WithExecutableImage() *Factory {
f.WithManifestReader()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.executablesConfig.image != "" {
return nil
}
if f.config.bundlesOverride != "" {
image, err := f.selectImageFromBundleOverride(f.config.bundlesOverride)
if err != nil {
return err
}
f.executablesConfig.image = image
return nil
}
bundles, err := f.dependencies.ManifestReader.ReadBundlesForVersion(version.Get().GitVersion)
if err != nil {
return fmt.Errorf("retrieving executable tools image from bundle in dependency factory: %v", err)
}
f.executablesConfig.image = bundles.DefaultEksAToolsImage().VersionedImage()
return nil
})
return f
}
// selectImageFromBundleOverride retrieves an image from a bundles override.
//
// Handles cases where the bundle is configured with an override.
func (f *Factory) selectImageFromBundleOverride(bundlesOverride string) (string, error) {
releaseBundles, err := bundles.Read(f.dependencies.ManifestReader, bundlesOverride)
if err != nil {
return "", fmt.Errorf("retrieving executable tools image from overridden bundle in dependency factory %v", err)
}
// Note: Currently using the first available version of the cli tools
// This is because the binaries bundled are all the same version hence no compatibility concerns
// In case, there is a change to this behavior, there might be a need to reassess this item
return releaseBundles.DefaultEksAToolsImage().VersionedImage(), nil
}
// WithCustomBundles allows configuring a bundle override.
func (f *Factory) WithCustomBundles(bundlesOverride string) *Factory {
if bundlesOverride == "" {
return f
}
f.config.bundlesOverride = bundlesOverride
f.WithExecutableImage()
return f
}
func (f *Factory) WithExecutableMountDirs(mountDirs ...string) *Factory {
f.executablesConfig.mountDirs = mountDirs
return f
}
func (f *Factory) WithLocalExecutables() *Factory {
f.executablesConfig.useDockerContainer = false
return f
}
// UseExecutablesDockerClient forces a specific DockerClient to build
// Executables as opposed to follow the normal building flow
// This is only for testing.
func (f *Factory) UseExecutablesDockerClient(client executables.DockerClient) *Factory {
f.executablesConfig.dockerClient = client
return f
}
// dockerLogin performs a docker login with the ENV VARS.
func dockerLogin(ctx context.Context, registry string, docker executables.DockerClient) error {
username, password, _ := cliconfig.ReadCredentials()
err := docker.Login(ctx, registry, username, password)
if err != nil {
return err
}
return nil
}
// WithDockerLogin adds a docker login to the build steps.
func (f *Factory) WithDockerLogin() *Factory {
f.WithDocker()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.registryMirror != nil {
err := dockerLogin(ctx, f.registryMirror.BaseRegistry, f.executablesConfig.dockerClient)
if err != nil {
return err
}
}
return nil
})
return f
}
func (f *Factory) WithExecutableBuilder() *Factory {
if f.executablesConfig.useDockerContainer {
f.WithExecutableImage().WithDocker()
if f.registryMirror != nil && f.registryMirror.Auth {
f.WithDockerLogin()
}
}
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.executablesConfig.builder != nil {
return nil
}
if f.executablesConfig.useDockerContainer {
image := f.executablesConfig.image
if f.registryMirror != nil {
image = f.registryMirror.ReplaceRegistry(image)
}
b, err := executables.NewInDockerExecutablesBuilder(
f.executablesConfig.dockerClient,
image,
f.executablesConfig.mountDirs...,
)
if err != nil {
return err
}
f.executablesConfig.builder = b
} else {
f.executablesConfig.builder = executables.NewLocalExecutablesBuilder()
}
closer, err := f.executablesConfig.builder.Init(ctx)
if err != nil {
return err
}
if f.registryMirror != nil && f.registryMirror.Auth {
docker := f.executablesConfig.builder.BuildDockerExecutable()
err := dockerLogin(ctx, f.registryMirror.BaseRegistry, docker)
if err != nil {
return err
}
}
f.dependencies.closers = append(f.dependencies.closers, closer)
return nil
})
return f
}
func (f *Factory) WithProvider(clusterConfigFile string, clusterConfig *v1alpha1.Cluster, skipIpCheck bool, hardwareCSVPath string, force bool, tinkerbellBootstrapIp string) *Factory {
switch clusterConfig.Spec.DatacenterRef.Kind {
case v1alpha1.VSphereDatacenterKind:
f.WithKubectl().WithGovc().WithWriter().WithIPValidator()
case v1alpha1.CloudStackDatacenterKind:
f.WithKubectl().WithCloudStackValidatorRegistry(skipIpCheck).WithWriter()
case v1alpha1.DockerDatacenterKind:
f.WithDocker().WithKubectl()
case v1alpha1.TinkerbellDatacenterKind:
if clusterConfig.Spec.RegistryMirrorConfiguration != nil {
f.WithDocker().WithKubectl().WithWriter().WithHelm(executables.WithInsecure())
} else {
f.WithDocker().WithKubectl().WithWriter().WithHelm()
}
case v1alpha1.SnowDatacenterKind:
f.WithUnAuthKubeClient().WithSnowConfigManager()
case v1alpha1.NutanixDatacenterKind:
f.WithKubectl().WithNutanixClientCache().WithNutanixDefaulter().WithNutanixValidator().WithIPValidator()
}
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.Provider != nil {
return nil
}
switch clusterConfig.Spec.DatacenterRef.Kind {
case v1alpha1.VSphereDatacenterKind:
datacenterConfig, err := v1alpha1.GetVSphereDatacenterConfig(clusterConfigFile)
if err != nil {
return fmt.Errorf("unable to get datacenter config from file %s: %v", clusterConfigFile, err)
}
f.dependencies.Provider = vsphere.NewProvider(
datacenterConfig,
clusterConfig,
f.dependencies.Govc,
f.dependencies.Kubectl,
f.dependencies.Writer,
f.dependencies.IPValidator,
time.Now,
skipIpCheck,
)
case v1alpha1.CloudStackDatacenterKind:
datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(clusterConfigFile)
if err != nil {
return fmt.Errorf("unable to get datacenter config from file %s: %v", clusterConfigFile, err)
}
execConfig, err := decoder.ParseCloudStackCredsFromEnv()
if err != nil {
return fmt.Errorf("parsing CloudStack credentials: %v", err)
}
validator, err := f.dependencies.CloudStackValidatorRegistry.Get(execConfig)
if err != nil {
return fmt.Errorf("building validator from exec config: %v", err)
}
f.dependencies.Provider = cloudstack.NewProvider(datacenterConfig, clusterConfig, f.dependencies.Kubectl, validator, f.dependencies.Writer, time.Now, logger.Get())
case v1alpha1.SnowDatacenterKind:
f.dependencies.Provider = snow.NewProvider(
f.dependencies.UnAuthKubeClient,
f.dependencies.SnowConfigManager,
skipIpCheck,
)
case v1alpha1.TinkerbellDatacenterKind:
datacenterConfig, err := v1alpha1.GetTinkerbellDatacenterConfig(clusterConfigFile)
if err != nil {
return fmt.Errorf("unable to get datacenter config from file %s: %v", clusterConfigFile, err)
}
machineConfigs, err := v1alpha1.GetTinkerbellMachineConfigs(clusterConfigFile)
if err != nil {
return fmt.Errorf("unable to get machine config from file %s: %v", clusterConfigFile, err)
}
tinkerbellIp := tinkerbellBootstrapIp
if tinkerbellIp == "" {
logger.V(4).Info("Inferring local Tinkerbell Bootstrap IP from environment")
localIp, err := networkutils.GetLocalIP()
if err != nil {
return err
}
tinkerbellIp = localIp.String()
}
logger.V(4).Info("Tinkerbell IP", "tinkerbell-ip", tinkerbellIp)
provider, err := tinkerbell.NewProvider(
datacenterConfig,
machineConfigs,
clusterConfig,
hardwareCSVPath,
f.dependencies.Writer,
f.dependencies.DockerClient,
f.dependencies.Helm,
f.dependencies.Kubectl,
tinkerbellIp,
time.Now,
force,
skipIpCheck,
)
if err != nil {
return err
}
f.dependencies.Provider = provider
case v1alpha1.DockerDatacenterKind:
datacenterConfig, err := v1alpha1.GetDockerDatacenterConfig(clusterConfigFile)
if err != nil {
return fmt.Errorf("unable to get datacenter config from file %s: %v", clusterConfigFile, err)
}
f.dependencies.Provider = docker.NewProvider(
datacenterConfig,
f.dependencies.DockerClient,
f.dependencies.Kubectl,
time.Now,
)
case v1alpha1.NutanixDatacenterKind:
datacenterConfig, err := v1alpha1.GetNutanixDatacenterConfig(clusterConfigFile)
if err != nil {
return fmt.Errorf("unable to get datacenter config from file %s: %v", clusterConfigFile, err)
}
machineConfigs, err := v1alpha1.GetNutanixMachineConfigs(clusterConfigFile)
if err != nil {
return fmt.Errorf("unable to get machine config from file %s: %v", clusterConfigFile, err)
}
skipVerifyTransport := http.DefaultTransport.(*http.Transport).Clone()
skipVerifyTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
httpClient := &http.Client{Transport: skipVerifyTransport}
provider := nutanix.NewProvider(
datacenterConfig,
machineConfigs,
clusterConfig,
f.dependencies.Kubectl,
f.dependencies.Writer,
f.dependencies.NutanixClientCache,
f.dependencies.IPValidator,
crypto.NewTlsValidator(),
httpClient,
time.Now,
skipIpCheck,
)
f.dependencies.Provider = provider
default:
return fmt.Errorf("no provider support for datacenter kind: %s", clusterConfig.Spec.DatacenterRef.Kind)
}
return nil
})
return f
}
func (f *Factory) WithDocker() *Factory {
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.DockerClient != nil {
return nil
}
f.dependencies.DockerClient = executables.BuildDockerExecutable()
if f.executablesConfig.dockerClient == nil {
f.executablesConfig.dockerClient = f.dependencies.DockerClient
}
return nil
})
return f
}
func (f *Factory) WithKubectl() *Factory {
f.WithExecutableBuilder()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.Kubectl != nil {
return nil
}
f.dependencies.Kubectl = f.executablesConfig.builder.BuildKubectlExecutable()
return nil
})
return f
}
func (f *Factory) WithGovc() *Factory {
f.WithExecutableBuilder().WithWriter()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.Govc != nil {
return nil
}
f.dependencies.Govc = f.executablesConfig.builder.BuildGovcExecutable(f.dependencies.Writer)
f.dependencies.closers = append(f.dependencies.closers, f.dependencies.Govc)
return nil
})
return f
}
// WithCloudStackValidatorRegistry initializes the CloudStack validator for the object being constructed to make it available in the constructor.
func (f *Factory) WithCloudStackValidatorRegistry(skipIPCheck bool) *Factory {
f.WithExecutableBuilder().WithWriter()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.CloudStackValidatorRegistry != nil {
return nil
}
cmkBuilder := cmk.NewCmkBuilder(f.executablesConfig.builder)
f.dependencies.CloudStackValidatorRegistry = cloudstack.NewValidatorFactory(cmkBuilder, f.dependencies.Writer, skipIPCheck)
return nil
})
return f
}
func (f *Factory) WithSnowConfigManager() *Factory {
f.WithAwsSnow().WithWriter()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.SnowConfigManager != nil {
return nil
}
client := aws.NewClient()
if err := client.BuildIMDS(ctx); err != nil {
return err
}
validator := snow.NewValidator(f.dependencies.SnowAwsClientRegistry, snow.WithIMDS(client))
defaulters := snow.NewDefaulters(f.dependencies.SnowAwsClientRegistry, f.dependencies.Writer)
f.dependencies.SnowConfigManager = snow.NewConfigManager(defaulters, validator)
return nil
})
return f
}
func (f *Factory) WithAwsSnow() *Factory {
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.SnowAwsClientRegistry != nil {
return nil
}
clientRegistry := snow.NewAwsClientRegistry()
err := clientRegistry.Build(ctx)
if err != nil {
return err
}
f.dependencies.SnowAwsClientRegistry = clientRegistry
return nil
})
return f
}
func (f *Factory) WithWriter() *Factory {
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.Writer != nil {
return nil
}
var err error
f.dependencies.Writer, err = filewriter.NewWriter(f.writerFolder)
if err != nil {
return err
}
return nil
})
return f
}
func (f *Factory) WithKind() *Factory {
f.WithExecutableBuilder().WithWriter()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.Kind != nil {
return nil
}
f.dependencies.Kind = f.executablesConfig.builder.BuildKindExecutable(f.dependencies.Writer)
return nil
})
return f
}
func (f *Factory) WithClusterctl() *Factory {
f.WithExecutableBuilder().WithWriter().WithFileReader()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.Clusterctl != nil {
return nil
}
f.dependencies.Clusterctl = f.executablesConfig.builder.BuildClusterCtlExecutable(
f.dependencies.Writer,
f.dependencies.FileReader,
)
return nil
})
return f
}
func (f *Factory) WithFlux() *Factory {
f.WithExecutableBuilder()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.Flux != nil {
return nil
}
f.dependencies.Flux = f.executablesConfig.builder.BuildFluxExecutable()
return nil
})
return f
}
func (f *Factory) WithTroubleshoot() *Factory {
f.WithExecutableBuilder()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.Troubleshoot != nil {
return nil
}
f.dependencies.Troubleshoot = f.executablesConfig.builder.BuildTroubleshootExecutable()
return nil
})
return f
}
func (f *Factory) WithHelm(opts ...executables.HelmOpt) *Factory {
f.WithExecutableBuilder().WithProxyConfiguration()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.registryMirror != nil {
opts = append(opts, executables.WithRegistryMirror(f.registryMirror))
}
if f.proxyConfiguration != nil {
opts = append(opts, executables.WithEnv(f.proxyConfiguration))
}
f.dependencies.Helm = f.executablesConfig.builder.BuildHelmExecutable(opts...)
return nil
})
return f
}
// WithNetworking builds a Networking.
func (f *Factory) WithNetworking(clusterConfig *v1alpha1.Cluster) *Factory {
var networkingBuilder func() clustermanager.Networking
if clusterConfig.Spec.ClusterNetwork.CNIConfig.Kindnetd != nil {
f.WithKubectl().WithFileReader()
networkingBuilder = func() clustermanager.Networking {
return kindnetd.NewKindnetd(f.dependencies.Kubectl, f.dependencies.FileReader)
}
} else {
f.WithKubectl().WithCiliumTemplater()
networkingBuilder = func() clustermanager.Networking {
var opts []cilium.RetrierClientOpt
if f.config.noTimeouts {
opts = append(opts, cilium.RetrierClientRetrier(retrier.NewWithNoTimeout()))
}
c := cilium.NewCilium(
cilium.NewRetrier(f.dependencies.Kubectl, opts...),
f.dependencies.CiliumTemplater,
)
c.SetSkipUpgrade(!clusterConfig.Spec.ClusterNetwork.CNIConfig.Cilium.IsManaged())
return c
}
}
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.Networking != nil {
return nil
}
f.dependencies.Networking = networkingBuilder()
return nil
})
return f
}
// WithCNIInstaller builds a CNI installer for the given cluster.
func (f *Factory) WithCNIInstaller(spec *cluster.Spec, provider providers.Provider) *Factory {
if spec.Cluster.Spec.ClusterNetwork.CNIConfig.Kindnetd != nil {
f.WithKubectl().WithFileReader()
} else {
f.WithKubectl().WithCiliumTemplater()
}
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.CNIInstaller != nil {
return nil
}
if spec.Cluster.Spec.ClusterNetwork.CNIConfig.Kindnetd != nil {
f.dependencies.CNIInstaller = kindnetd.NewInstallerForSpec(
f.dependencies.Kubectl,
f.dependencies.FileReader,
spec,
)
} else {
f.dependencies.CNIInstaller = cilium.NewInstallerForSpec(
cilium.NewRetrier(f.dependencies.Kubectl),
f.dependencies.CiliumTemplater,
cilium.Config{
Spec: spec,
AllowedNamespaces: maps.Keys(provider.GetDeployments()),
},
)
}
return nil
})
return f
}
func (f *Factory) WithCiliumTemplater() *Factory {
f.WithHelm(executables.WithInsecure())
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.CiliumTemplater != nil {
return nil
}
f.dependencies.CiliumTemplater = cilium.NewTemplater(f.dependencies.Helm)
return nil
})
return f
}
func (f *Factory) WithAwsIamAuth() *Factory {
f.WithKubectl().WithWriter()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.AwsIamAuth != nil {
return nil
}
certgen := crypto.NewCertificateGenerator()
clusterId := uuid.New()
var opts []awsiamauth.RetrierClientOpt
if f.config.noTimeouts {
opts = append(opts, awsiamauth.RetrierClientRetrier(*retrier.NewWithNoTimeout()))
}
f.dependencies.AwsIamAuth = awsiamauth.NewInstaller(
certgen,
clusterId,
awsiamauth.NewRetrierClient(f.dependencies.Kubectl, opts...),
f.dependencies.Writer,
)
return nil
})
return f
}
// WithIPValidator builds the IPValidator for the given cluster.
func (f *Factory) WithIPValidator() *Factory {
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.IPValidator != nil {
return nil
}
f.dependencies.IPValidator = validator.NewIPValidator()
return nil
})
return f
}
type bootstrapperClient struct {
*executables.Kind
*executables.Kubectl
}
func (f *Factory) WithBootstrapper() *Factory {
f.WithKind().WithKubectl()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.Bootstrapper != nil {
return nil
}
var opts []bootstrapper.RetrierClientOpt
if f.config.noTimeouts {
opts = append(opts,
bootstrapper.WithRetrierClientRetrier(
*retrier.NewWithNoTimeout(),
),
)
}
f.dependencies.Bootstrapper = bootstrapper.New(
bootstrapper.NewRetrierClient(
f.dependencies.Kind,
f.dependencies.Kubectl,
opts...,
),
)
return nil
})
return f
}
type clusterManagerClient struct {
*executables.Clusterctl
*executables.Kubectl
}
// ClusterManagerTimeoutOptions maintains the timeout options for cluster manager.
type ClusterManagerTimeoutOptions struct {
NoTimeouts bool
ControlPlaneWait, ExternalEtcdWait, MachineWait, UnhealthyMachineWait, NodeStartupWait time.Duration
}
func (f *Factory) eksaInstallerOpts() []clustermanager.EKSAInstallerOpt {
var opts []clustermanager.EKSAInstallerOpt
if f.config.noTimeouts {
opts = append(opts, clustermanager.WithEKSAInstallerNoTimeouts())
}
return opts
}
func (f *Factory) clusterManagerOpts(timeoutOpts *ClusterManagerTimeoutOptions) []clustermanager.ClusterManagerOpt {
if timeoutOpts == nil {
return nil
}
o := []clustermanager.ClusterManagerOpt{
clustermanager.WithControlPlaneWaitTimeout(timeoutOpts.ControlPlaneWait),
clustermanager.WithExternalEtcdWaitTimeout(timeoutOpts.ExternalEtcdWait),
clustermanager.WithMachineMaxWait(timeoutOpts.MachineWait),
clustermanager.WithUnhealthyMachineTimeout(timeoutOpts.UnhealthyMachineWait),
clustermanager.WithNodeStartupTimeout(timeoutOpts.NodeStartupWait),
}
if f.config.noTimeouts {
o = append(o, clustermanager.WithNoTimeouts())
}
return o
}
// WithClusterManager builds a cluster manager based on the cluster config and timeout options.
func (f *Factory) WithClusterManager(clusterConfig *v1alpha1.Cluster, timeoutOpts *ClusterManagerTimeoutOptions) *Factory {
f.WithClusterctl().WithKubectl().WithNetworking(clusterConfig).WithWriter().WithDiagnosticBundleFactory().WithAwsIamAuth().WithFileReader()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.ClusterManager != nil {
return nil
}
var r *retrier.Retrier
if f.config.noTimeouts {
r = retrier.NewWithNoTimeout()
} else {
r = clustermanager.DefaultRetrier()
}
client := clustermanager.NewRetrierClient(
&clusterManagerClient{
f.dependencies.Clusterctl,
f.dependencies.Kubectl,
},
r,
)
installer := clustermanager.NewEKSAInstaller(client, f.dependencies.FileReader, f.eksaInstallerOpts()...)
f.dependencies.ClusterManager = clustermanager.New(
client,
f.dependencies.Networking,
f.dependencies.Writer,
f.dependencies.DignosticCollectorFactory,
f.dependencies.AwsIamAuth,
installer,
f.clusterManagerOpts(timeoutOpts)...,
)
return nil
})
return f
}
// WithNoTimeouts injects no timeouts to all the dependencies with configurable timeout.
// Calling this method sets no timeout for the waits and retries in all the
// cluster operations, i.e. cluster manager, eksa installer, networking installer.
// Instead of passing the option to each dependency's constructor, use this
// method to pass no timeouts to new dependency.
func (f *Factory) WithNoTimeouts() *Factory {
f.config.noTimeouts = true
return f
}
// WithCliConfig builds a cli config.
func (f *Factory) WithCliConfig(cliConfig *cliconfig.CliConfig) *Factory {
f.dependencies.CliConfig = cliConfig
return f
}
type eksdInstallerClient struct {
*executables.Kubectl
}
func (f *Factory) WithEksdInstaller() *Factory {
f.WithKubectl().WithFileReader()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.EksdInstaller != nil {
return nil
}
var opts []eksd.InstallerOpt
if f.config.noTimeouts {
opts = append(opts, eksd.WithRetrier(retrier.NewWithNoTimeout()))
}
f.dependencies.EksdInstaller = eksd.NewEksdInstaller(
&eksdInstallerClient{
f.dependencies.Kubectl,
},
f.dependencies.FileReader,
opts...,
)
return nil
})
return f
}
func (f *Factory) WithEksdUpgrader() *Factory {
f.WithKubectl().WithFileReader()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.EksdUpgrader != nil {
return nil
}
var opts []eksd.InstallerOpt
if f.config.noTimeouts {
opts = append(opts, eksd.WithRetrier(retrier.NewWithNoTimeout()))
}
f.dependencies.EksdUpgrader = eksd.NewUpgrader(
&eksdInstallerClient{
f.dependencies.Kubectl,
},
f.dependencies.FileReader,
opts...,
)
return nil
})
return f
}
// WithKubeProxyCLIUpgrader builds a KubeProxyCLIUpgrader.
func (f *Factory) WithKubeProxyCLIUpgrader() *Factory {
f.WithLogger().WithUnAuthKubeClient()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
var opts []clustermanager.KubeProxyCLIUpgraderOpt
if f.config.noTimeouts {
opts = append(opts, clustermanager.KubeProxyCLIUpgraderRetrier(*retrier.NewWithNoTimeout()))
}
f.dependencies.KubeProxyCLIUpgrader = clustermanager.NewKubeProxyCLIUpgrader(
f.dependencies.Logger,
f.dependencies.UnAuthKubeClient,
opts...,
)
return nil
})
return f
}
// WithValidatorClients builds KubeClients.
func (f *Factory) WithValidatorClients() *Factory {
f.WithKubectl().WithUnAuthKubeClient()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
f.dependencies.UnAuthKubectlClient = KubeClients{
Kubectl: f.dependencies.Kubectl,
UnAuthClient: f.dependencies.UnAuthKubeClient,
}
return nil
})
return f
}
// WithLogger setups a logger to be injected in constructors. It uses the logger
// package level logger.
func (f *Factory) WithLogger() *Factory {
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
f.dependencies.Logger = logger.Get()
return nil
})
return f
}
func (f *Factory) WithGit(clusterConfig *v1alpha1.Cluster, fluxConfig *v1alpha1.FluxConfig) *Factory {
f.WithWriter()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.Git != nil {
return nil
}
if fluxConfig == nil {
return nil
}
tools, err := gitfactory.Build(ctx, clusterConfig, fluxConfig, f.dependencies.Writer)
if err != nil {
return fmt.Errorf("creating Git provider: %v", err)
}
if fluxConfig.Spec.Git != nil {
err = tools.Client.ValidateRemoteExists(ctx)
if err != nil {
return err
}
}
if tools.Provider != nil {
err = tools.Provider.Validate(ctx)
if err != nil {
return fmt.Errorf("validating provider: %v", err)
}
}
f.dependencies.Git = tools
return nil
})
return f
}
// WithGitOpsFlux builds a gitops flux.
func (f *Factory) WithGitOpsFlux(clusterConfig *v1alpha1.Cluster, fluxConfig *v1alpha1.FluxConfig, cliConfig *cliconfig.CliConfig) *Factory {
f.WithWriter().WithFlux().WithKubectl().WithGit(clusterConfig, fluxConfig)
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.GitOpsFlux != nil {
return nil
}
f.dependencies.GitOpsFlux = flux.NewFlux(f.dependencies.Flux, f.dependencies.Kubectl, f.dependencies.Git, cliConfig)
return nil
})
return f
}
func (f *Factory) WithPackageInstaller(spec *cluster.Spec, packagesLocation, kubeConfig string) *Factory {
f.WithKubectl().WithPackageControllerClient(spec, kubeConfig).WithPackageClient()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.PackageInstaller != nil {
return nil
}
managementClusterName := getManagementClusterName(spec)
mgmtKubeConfig := kubeconfig.ResolveFilename(kubeConfig, managementClusterName)
f.dependencies.PackageInstaller = curatedpackages.NewInstaller(
f.dependencies.Kubectl,
f.dependencies.PackageClient,
f.dependencies.PackageControllerClient,
spec,
packagesLocation,
mgmtKubeConfig,
)
return nil
})
return f
}
func (f *Factory) WithPackageControllerClient(spec *cluster.Spec, kubeConfig string) *Factory {
f.WithHelm(executables.WithInsecure()).WithKubectl()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.PackageControllerClient != nil || spec == nil {
return nil
}
managementClusterName := getManagementClusterName(spec)
mgmtKubeConfig := kubeconfig.ResolveFilename(kubeConfig, managementClusterName)
httpProxy, httpsProxy, noProxy := getProxyConfiguration(spec)
eksaAccessKeyID, eksaSecretKey, eksaRegion := os.Getenv(cliconfig.EksaAccessKeyIdEnv), os.Getenv(cliconfig.EksaSecretAccessKeyEnv), os.Getenv(cliconfig.EksaRegionEnv)
writer, err := filewriter.NewWriter(spec.Cluster.Name)
if err != nil {
return err
}
f.dependencies.PackageControllerClient = curatedpackages.NewPackageControllerClient(
f.dependencies.Helm,
f.dependencies.Kubectl,
spec.Cluster.Name,
mgmtKubeConfig,
&spec.VersionsBundle.PackageController.HelmChart,
f.registryMirror,
curatedpackages.WithEksaAccessKeyId(eksaAccessKeyID),
curatedpackages.WithEksaSecretAccessKey(eksaSecretKey),
curatedpackages.WithEksaRegion(eksaRegion),
curatedpackages.WithHTTPProxy(httpProxy),
curatedpackages.WithHTTPSProxy(httpsProxy),
curatedpackages.WithNoProxy(noProxy),
curatedpackages.WithManagementClusterName(managementClusterName),
curatedpackages.WithValuesFileWriter(writer),
curatedpackages.WithClusterSpec(spec),
)
return nil
})
return f
}
func (f *Factory) WithPackageClient() *Factory {
f.WithKubectl()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.PackageClient != nil {
return nil
}
f.dependencies.PackageClient = curatedpackages.NewPackageClient(
f.dependencies.Kubectl,
)
return nil
})
return f
}
func (f *Factory) WithCuratedPackagesRegistry(registryName, kubeVersion string, version version.Info) *Factory {
if registryName != "" {
f.WithHelm(executables.WithInsecure())
} else {
f.WithManifestReader()
}
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.BundleRegistry != nil {
return nil
}
if registryName != "" {
f.dependencies.BundleRegistry = curatedpackages.NewCustomRegistry(
f.dependencies.Helm,
registryName,
)
} else {
f.dependencies.BundleRegistry = curatedpackages.NewDefaultRegistry(
f.dependencies.ManifestReader,
kubeVersion,
version,
)
}
return nil
})
return f
}
func (f *Factory) WithDiagnosticBundleFactory() *Factory {
f.WithWriter().WithTroubleshoot().WithCollectorFactory().WithAnalyzerFactory().WithKubectl()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.DignosticCollectorFactory != nil {
return nil
}
opts := diagnostics.EksaDiagnosticBundleFactoryOpts{
AnalyzerFactory: f.dependencies.AnalyzerFactory,
Client: f.dependencies.Troubleshoot,
CollectorFactory: f.dependencies.CollectorFactory,
Kubectl: f.dependencies.Kubectl,
Writer: f.dependencies.Writer,
}
f.dependencies.DignosticCollectorFactory = diagnostics.NewFactory(opts)
return nil
})
return f
}
func (f *Factory) WithAnalyzerFactory() *Factory {
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.AnalyzerFactory != nil {
return nil
}
f.dependencies.AnalyzerFactory = diagnostics.NewAnalyzerFactory()
return nil
})
return f
}
func (f *Factory) WithDiagnosticCollectorImage(diagnosticCollectorImage string) *Factory {
f.diagnosticCollectorImage = diagnosticCollectorImage
return f
}
func (f *Factory) WithCollectorFactory() *Factory {
f.WithFileReader()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.CollectorFactory != nil {
return nil
}
if f.diagnosticCollectorImage == "" {
f.dependencies.CollectorFactory = diagnostics.NewDefaultCollectorFactory(f.dependencies.FileReader)
} else {
f.dependencies.CollectorFactory = diagnostics.NewCollectorFactory(f.diagnosticCollectorImage, f.dependencies.FileReader)
}
return nil
})
return f
}
func (f *Factory) WithCAPIManager() *Factory {
f.WithClusterctl()
f.WithKubectl()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.CAPIManager != nil {
return nil
}
f.dependencies.CAPIManager = clusterapi.NewManager(f.dependencies.Clusterctl, f.dependencies.Kubectl)
return nil
})
return f
}
func (f *Factory) WithFileReader() *Factory {
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.FileReader != nil {
return nil
}
f.dependencies.FileReader = files.NewReader(files.WithEKSAUserAgent("cli", version.Get().GitVersion))
return nil
})
return f
}
func (f *Factory) WithManifestReader() *Factory {
f.WithFileReader()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.ManifestReader != nil {
return nil
}
f.dependencies.ManifestReader = manifests.NewReader(f.dependencies.FileReader)
return nil
})
return f
}
func (f *Factory) WithUnAuthKubeClient() *Factory {
f.WithKubectl()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.UnAuthKubeClient != nil {
return nil
}
f.dependencies.UnAuthKubeClient = kubernetes.NewUnAuthClient(f.dependencies.Kubectl)
if err := f.dependencies.UnAuthKubeClient.Init(); err != nil {
return fmt.Errorf("building unauth kube client: %v", err)
}
return nil
})
return f
}
func (f *Factory) WithVSphereValidator() *Factory {
f.WithGovc()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.VSphereValidator != nil {
return nil
}
vcb := govmomi.NewVMOMIClientBuilder()
v := vsphere.NewValidator(
f.dependencies.Govc,
vcb,
)
f.dependencies.VSphereValidator = v
return nil
})
return f
}
func (f *Factory) WithVSphereDefaulter() *Factory {
f.WithGovc()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.VSphereDefaulter != nil {
return nil
}
f.dependencies.VSphereDefaulter = vsphere.NewDefaulter(f.dependencies.Govc)
return nil
})
return f
}
// WithNutanixDefaulter adds a new NutanixDefaulter to the factory.
func (f *Factory) WithNutanixDefaulter() *Factory {
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.NutanixDefaulter != nil {
return nil
}
f.dependencies.NutanixDefaulter = nutanix.NewDefaulter()
return nil
})
return f
}
// WithNutanixValidator adds a new NutanixValidator to the factory.
func (f *Factory) WithNutanixValidator() *Factory {
f.WithNutanixClientCache()
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.NutanixValidator != nil {
return nil
}
skipVerifyTransport := http.DefaultTransport.(*http.Transport).Clone()
skipVerifyTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
v := nutanix.NewValidator(
f.dependencies.NutanixClientCache,
crypto.NewTlsValidator(),
&http.Client{Transport: skipVerifyTransport},
)
f.dependencies.NutanixValidator = v
return nil
})
return f
}
// WithNutanixClientCache adds a new NutanixClientCache to the factory.
func (f *Factory) WithNutanixClientCache() *Factory {
f.buildSteps = append(f.buildSteps, func(ctx context.Context) error {
if f.dependencies.NutanixClientCache != nil {
return nil
}
f.dependencies.NutanixClientCache = nutanix.NewClientCache()
return nil
})
return f
}
func getProxyConfiguration(clusterSpec *cluster.Spec) (httpProxy, httpsProxy string, noProxy []string) {
proxyConfiguration := clusterSpec.Cluster.Spec.ProxyConfiguration
if proxyConfiguration != nil {
return proxyConfiguration.HttpProxy, proxyConfiguration.HttpsProxy, proxyConfiguration.NoProxy
}
return "", "", nil
}
func getManagementClusterName(clusterSpec *cluster.Spec) string {
if clusterSpec.Cluster.Spec.ManagementCluster.Name != "" {
return clusterSpec.Cluster.Spec.ManagementCluster.Name
}
return clusterSpec.Cluster.Name
}
| 1,490 |
eks-anywhere | aws | Go | package dependencies_test
import (
"bytes"
"context"
"encoding/base64"
"testing"
"time"
. "github.com/onsi/gomega"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/internal/test"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/dependencies"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder"
"github.com/aws/eks-anywhere/pkg/registrymirror"
"github.com/aws/eks-anywhere/pkg/version"
"github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type factoryTest struct {
*WithT
clusterConfigFile string
clusterSpec *cluster.Spec
ctx context.Context
hardwareConfigFile string
tinkerbellBootstrapIP string
cliConfig config.CliConfig
}
type provider string
const (
vsphere provider = "vsphere"
tinkerbell provider = "tinkerbell"
nutanix provider = "nutanix"
snow provider = "snow"
)
func newTest(t *testing.T, p provider) *factoryTest {
var clusterConfigFile string
switch p {
case vsphere:
clusterConfigFile = "testdata/cluster_vsphere.yaml"
case tinkerbell:
clusterConfigFile = "testdata/cluster_tinkerbell.yaml"
case nutanix:
clusterConfigFile = "testdata/nutanix/cluster_nutanix.yaml"
case snow:
clusterConfigFile = "testdata/snow/cluster_snow.yaml"
default:
t.Fatalf("Not a valid provider: %v", p)
}
return &factoryTest{
WithT: NewGomegaWithT(t),
clusterConfigFile: clusterConfigFile,
clusterSpec: test.NewFullClusterSpec(t, clusterConfigFile),
ctx: context.Background(),
}
}
func TestFactoryBuildWithProvidervSphere(t *testing.T) {
tt := newTest(t, vsphere)
deps, err := dependencies.NewFactory().
WithLocalExecutables().
WithProvider(tt.clusterConfigFile, tt.clusterSpec.Cluster, false, tt.hardwareConfigFile, false, tt.tinkerbellBootstrapIP).
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.Provider).NotTo(BeNil())
tt.Expect(deps.DockerClient).To(BeNil(), "it only builds deps for vsphere")
}
func TestFactoryBuildWithProviderTinkerbell(t *testing.T) {
tt := newTest(t, tinkerbell)
deps, err := dependencies.NewFactory().
WithLocalExecutables().
WithProvider(tt.clusterConfigFile, tt.clusterSpec.Cluster, false, tt.hardwareConfigFile, false, tt.tinkerbellBootstrapIP).
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.Provider).NotTo(BeNil())
tt.Expect(deps.Helm).NotTo(BeNil())
tt.Expect(deps.DockerClient).NotTo(BeNil())
}
func TestFactoryBuildWithProviderSnow(t *testing.T) {
tt := newTest(t, snow)
t.Setenv("EKSA_AWS_CREDENTIALS_FILE", "./testdata/snow/valid_credentials")
t.Setenv("EKSA_AWS_CA_BUNDLES_FILE", "./testdata/snow/valid_certificates")
deps, err := dependencies.NewFactory().
WithLocalExecutables().
WithProvider(tt.clusterConfigFile, tt.clusterSpec.Cluster, false, tt.hardwareConfigFile, false, tt.tinkerbellBootstrapIP).
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.Provider).NotTo(BeNil())
tt.Expect(deps.SnowAwsClientRegistry).NotTo(BeNil())
tt.Expect(deps.SnowConfigManager).NotTo(BeNil())
}
func TestFactoryBuildWithProviderNutanix(t *testing.T) {
tests := []struct {
name string
clusterConfigFile string
expectError bool
}{
{
name: "nutanix provider valid config",
clusterConfigFile: "testdata/nutanix/cluster_nutanix.yaml",
},
{
name: "nutanix provider valid config with additional trust bundle",
clusterConfigFile: "testdata/nutanix/cluster_nutanix_with_trust_bundle.yaml",
},
}
t.Setenv(constants.EksaNutanixUsernameKey, "test")
t.Setenv(constants.EksaNutanixPasswordKey, "test")
for _, tc := range tests {
tt := newTest(t, nutanix)
tt.clusterConfigFile = tc.clusterConfigFile
deps, err := dependencies.NewFactory().
WithLocalExecutables().
WithProvider(tt.clusterConfigFile, tt.clusterSpec.Cluster, false, tt.hardwareConfigFile, false, tt.tinkerbellBootstrapIP).
WithNutanixValidator().
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.Provider).NotTo(BeNil())
tt.Expect(deps.NutanixClientCache).NotTo(BeNil())
}
}
func TestFactoryBuildWithInvalidProvider(t *testing.T) {
clusterConfigFile := "testdata/cluster_invalid_provider.yaml"
tt := &factoryTest{
WithT: NewGomegaWithT(t),
clusterConfigFile: clusterConfigFile,
clusterSpec: test.NewFullClusterSpec(t, clusterConfigFile),
ctx: context.Background(),
}
deps, err := dependencies.NewFactory().
WithLocalExecutables().
WithProvider(tt.clusterConfigFile, tt.clusterSpec.Cluster, false, tt.hardwareConfigFile, false, tt.tinkerbellBootstrapIP).
Build(context.Background())
tt.Expect(err).NotTo(BeNil())
tt.Expect(deps).To(BeNil())
}
func TestFactoryBuildWithClusterManager(t *testing.T) {
tt := newTest(t, vsphere)
deps, err := dependencies.NewFactory().
WithLocalExecutables().
WithCliConfig(&tt.cliConfig).
WithClusterManager(tt.clusterSpec.Cluster, nil).
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.ClusterManager).NotTo(BeNil())
}
func TestFactoryBuildWithClusterManagerWithoutCliConfig(t *testing.T) {
tt := newTest(t, vsphere)
deps, err := dependencies.NewFactory().
WithLocalExecutables().
WithClusterManager(tt.clusterSpec.Cluster, nil).
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.ClusterManager).NotTo(BeNil())
}
func TestFactoryBuildWithMultipleDependencies(t *testing.T) {
configString := test.ReadFile(t, "testdata/cloudstack_config_multiple_profiles.ini")
encodedConfig := base64.StdEncoding.EncodeToString([]byte(configString))
timeoutOpts := &dependencies.ClusterManagerTimeoutOptions{
NoTimeouts: true,
ControlPlaneWait: 10 * time.Minute,
}
t.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, encodedConfig)
tt := newTest(t, vsphere)
deps, err := dependencies.NewFactory().
WithLocalExecutables().
WithBootstrapper().
WithCliConfig(&tt.cliConfig).
WithClusterManager(tt.clusterSpec.Cluster, timeoutOpts).
WithProvider(tt.clusterConfigFile, tt.clusterSpec.Cluster, false, tt.hardwareConfigFile, false, tt.tinkerbellBootstrapIP).
WithGitOpsFlux(tt.clusterSpec.Cluster, tt.clusterSpec.FluxConfig, nil).
WithWriter().
WithEksdInstaller().
WithEksdUpgrader().
WithDiagnosticCollectorImage("public.ecr.aws/collector").
WithAnalyzerFactory().
WithCollectorFactory().
WithTroubleshoot().
WithCAPIManager().
WithManifestReader().
WithUnAuthKubeClient().
WithCloudStackValidatorRegistry(false).
WithVSphereDefaulter().
WithVSphereValidator().
WithCiliumTemplater().
WithIPValidator().
WithKubeProxyCLIUpgrader().
WithValidatorClients().
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.Bootstrapper).NotTo(BeNil())
tt.Expect(deps.ClusterManager).NotTo(BeNil())
tt.Expect(deps.Provider).NotTo(BeNil())
tt.Expect(deps.GitOpsFlux).NotTo(BeNil())
tt.Expect(deps.Writer).NotTo(BeNil())
tt.Expect(deps.EksdInstaller).NotTo(BeNil())
tt.Expect(deps.EksdUpgrader).NotTo(BeNil())
tt.Expect(deps.AnalyzerFactory).NotTo(BeNil())
tt.Expect(deps.CollectorFactory).NotTo(BeNil())
tt.Expect(deps.Troubleshoot).NotTo(BeNil())
tt.Expect(deps.CAPIManager).NotTo(BeNil())
tt.Expect(deps.ManifestReader).NotTo(BeNil())
tt.Expect(deps.UnAuthKubeClient).NotTo(BeNil())
tt.Expect(deps.VSphereDefaulter).NotTo(BeNil())
tt.Expect(deps.VSphereValidator).NotTo(BeNil())
tt.Expect(deps.CiliumTemplater).NotTo(BeNil())
tt.Expect(deps.IPValidator).NotTo(BeNil())
tt.Expect(deps.KubeProxyCLIUpgrader).NotTo(BeNil())
tt.Expect(deps.UnAuthKubectlClient).NotTo(BeNil())
}
func TestFactoryBuildWithProxyConfiguration(t *testing.T) {
tt := newTest(t, vsphere)
wantHttpsProxy := "FOO"
wantHttpProxy := "BAR"
wantNoProxy := "localhost,anotherhost"
env := map[string]string{
config.HttpsProxyKey: wantHttpsProxy,
config.HttpProxyKey: wantHttpProxy,
config.NoProxyKey: wantNoProxy,
}
for k, v := range env {
t.Setenv(k, v)
}
f := dependencies.NewFactory().WithProxyConfiguration()
tt.Expect(f.GetProxyConfiguration()).To(BeNil())
_, err := f.Build(context.Background())
pc := f.GetProxyConfiguration()
tt.Expect(err).To(BeNil())
tt.Expect(pc[config.HttpsProxyKey]).To(Equal(wantHttpsProxy))
tt.Expect(pc[config.HttpProxyKey]).To(Equal(wantHttpProxy))
tt.Expect(pc[config.NoProxyKey]).To(Equal(wantNoProxy))
}
func TestFactoryBuildWithRegistryMirror(t *testing.T) {
tt := newTest(t, vsphere)
deps, err := dependencies.NewFactory().
WithLocalExecutables().
WithRegistryMirror(
®istrymirror.RegistryMirror{
BaseRegistry: "1.2.3.4:443",
NamespacedRegistryMap: map[string]string{
constants.DefaultCoreEKSARegistry: "1.2.3.4:443/custom",
},
Auth: false,
}).
WithHelm(executables.WithInsecure()).
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.Helm).NotTo(BeNil())
}
func TestFactoryBuildWithPackageInstaller(t *testing.T) {
spec := &cluster.Spec{
Config: &cluster.Config{
Cluster: &anywherev1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "test-cluster",
},
},
},
VersionsBundle: &cluster.VersionsBundle{
VersionsBundle: &v1alpha1.VersionsBundle{
PackageController: v1alpha1.PackageBundle{
HelmChart: v1alpha1.Image{
URI: "test_registry/test/eks-anywhere-packages:v1",
Name: "test_chart",
},
},
},
},
}
tt := newTest(t, vsphere)
deps, err := dependencies.NewFactory().
WithLocalExecutables().
WithHelm(executables.WithInsecure()).
WithKubectl().
WithPackageInstaller(spec, "/test/packages.yaml", "kubeconfig.kubeconfig").
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.PackageInstaller).NotTo(BeNil())
}
func TestFactoryBuildWithCuratedPackagesCustomRegistry(t *testing.T) {
tt := newTest(t, vsphere)
deps, err := dependencies.NewFactory().
WithLocalExecutables().
WithHelm(executables.WithInsecure()).
WithCuratedPackagesRegistry("test_host:8080", "1.22", version.Info{GitVersion: "1.19"}).
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.BundleRegistry).NotTo(BeNil())
}
func TestFactoryBuildWithPackageClient(t *testing.T) {
tt := newTest(t, vsphere)
deps, err := dependencies.NewFactory().
WithLocalExecutables().
WithKubectl().
WithPackageClient().
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.PackageClient).NotTo(BeNil())
}
func TestFactoryBuildWithPackageControllerClientNoProxy(t *testing.T) {
spec := &cluster.Spec{
Config: &cluster.Config{
Cluster: &anywherev1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "test-cluster",
},
Spec: anywherev1.ClusterSpec{
ManagementCluster: anywherev1.ManagementCluster{
Name: "mgmt-1",
},
},
},
},
VersionsBundle: &cluster.VersionsBundle{
VersionsBundle: &v1alpha1.VersionsBundle{
PackageController: v1alpha1.PackageBundle{
HelmChart: v1alpha1.Image{
URI: "test_registry/test/eks-anywhere-packages:v1",
Name: "test_chart",
},
},
},
},
}
tt := newTest(t, vsphere)
deps, err := dependencies.NewFactory().
WithLocalExecutables().
WithHelm(executables.WithInsecure()).
WithKubectl().
WithPackageControllerClient(spec, "kubeconfig.kubeconfig").
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.PackageControllerClient).NotTo(BeNil())
}
func TestFactoryBuildWithPackageControllerClientProxy(t *testing.T) {
spec := &cluster.Spec{
Config: &cluster.Config{
Cluster: &anywherev1.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: "test-cluster",
},
Spec: anywherev1.ClusterSpec{
ProxyConfiguration: &anywherev1.ProxyConfiguration{
HttpProxy: "1.1.1.1",
HttpsProxy: "1.1.1.1",
NoProxy: []string{"1.1.1.1"},
},
},
},
},
VersionsBundle: &cluster.VersionsBundle{
VersionsBundle: &v1alpha1.VersionsBundle{
PackageController: v1alpha1.PackageBundle{
HelmChart: v1alpha1.Image{
URI: "test_registry/test/eks-anywhere-packages:v1",
Name: "test_chart",
},
},
},
},
}
tt := newTest(t, vsphere)
deps, err := dependencies.NewFactory().
WithLocalExecutables().
WithHelm(executables.WithInsecure()).
WithKubectl().
WithPackageControllerClient(spec, "kubeconfig.kubeconfig").
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.PackageControllerClient).NotTo(BeNil())
}
func TestFactoryBuildWithCuratedPackagesDefaultRegistry(t *testing.T) {
tt := newTest(t, vsphere)
deps, err := dependencies.NewFactory().
WithLocalExecutables().
WithManifestReader().
WithCuratedPackagesRegistry("", "1.22", version.Info{GitVersion: "1.19"}).
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.BundleRegistry).NotTo(BeNil())
}
func TestFactoryBuildWithCuratedPackagesCustomManifestImage(t *testing.T) {
tt := newTest(t, vsphere)
deps, err := dependencies.NewFactory().
WithCustomBundles("testdata/cli_tools_bundle.yaml").
WithManifestReader().
WithCuratedPackagesRegistry("", "1.22", version.Info{GitVersion: "1.19"}).
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.BundleRegistry).NotTo(BeNil())
}
func TestFactoryBuildWithCuratedPackagesCustomManifestImageNoOverrides(t *testing.T) {
tt := newTest(t, vsphere)
deps, err := dependencies.NewFactory().
WithCustomBundles("").
WithManifestReader().
WithCuratedPackagesRegistry("", "1.22", version.Info{GitVersion: "1.19"}).
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.BundleRegistry).NotTo(BeNil())
}
func TestFactoryBuildWithCuratedPackagesCustomManifestImageMissingBundle(t *testing.T) {
tt := newTest(t, vsphere)
_, err := dependencies.NewFactory().
WithCustomBundles("testdata/not_exist.yaml").
WithManifestReader().
WithCuratedPackagesRegistry("", "1.22", version.Info{GitVersion: "1.19"}).
Build(context.Background())
tt.Expect(err).NotTo(BeNil())
}
func TestFactoryBuildWithCuratedPackagesCustomManifestWithExistingExecConfig(t *testing.T) {
tt := newTest(t, vsphere)
deps, err := dependencies.NewFactory().
UseExecutableImage("test-exec-image").
WithCustomBundles("testdata/not_exist.yaml").
WithManifestReader().
WithCuratedPackagesRegistry("", "1.22", version.Info{GitVersion: "1.19"}).
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.BundleRegistry).NotTo(BeNil())
}
func TestFactoryBuildWithExecutablesUsingDocker(t *testing.T) {
tt := newTest(t, vsphere)
deps, err := dependencies.NewFactory().
UseExecutablesDockerClient(dummyDockerClient{}).
UseExecutableImage("myimage").
WithGovc().
WithHelm().
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.Govc).NotTo(BeNil())
tt.Expect(deps.Helm).NotTo(BeNil())
}
func TestFactoryBuildWithCNIInstallerCilium(t *testing.T) {
tt := newTest(t, vsphere)
factory := dependencies.NewFactory()
deps, err := factory.
WithLocalExecutables().
WithProvider(tt.clusterConfigFile, tt.clusterSpec.Cluster, false, tt.hardwareConfigFile, false, tt.tinkerbellBootstrapIP).
Build(tt.ctx)
tt.Expect(err).To(BeNil())
deps, err = factory.
WithCNIInstaller(tt.clusterSpec, deps.Provider).
WithCNIInstaller(tt.clusterSpec, deps.Provider). // idempotency
Build(tt.ctx)
tt.Expect(err).To(BeNil())
tt.Expect(deps.CNIInstaller).NotTo(BeNil())
}
func TestFactoryBuildWithCNIInstallerKindnetd(t *testing.T) {
tt := newTest(t, vsphere)
tt.clusterSpec.Cluster.Spec.ClusterNetwork.CNIConfig = &anywherev1.CNIConfig{
Kindnetd: &anywherev1.KindnetdConfig{},
}
factory := dependencies.NewFactory()
deps, err := factory.
WithLocalExecutables().
WithProvider(tt.clusterConfigFile, tt.clusterSpec.Cluster, false, tt.hardwareConfigFile, false, tt.tinkerbellBootstrapIP).
Build(tt.ctx)
tt.Expect(err).To(BeNil())
deps, err = factory.
WithCNIInstaller(tt.clusterSpec, deps.Provider).
WithCNIInstaller(tt.clusterSpec, deps.Provider). // idempotency
Build(tt.ctx)
tt.Expect(err).To(BeNil())
tt.Expect(deps.CNIInstaller).NotTo(BeNil())
}
func TestFactoryBuildWithKubeProxyCLIUpgraderNoTimeout(t *testing.T) {
tt := newTest(t, vsphere)
deps, err := dependencies.NewFactory().
WithLocalExecutables().
WithNoTimeouts().
WithKubeProxyCLIUpgrader().
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.KubeProxyCLIUpgrader).NotTo(BeNil())
}
func TestFactoryBuildWithAwsIamAuthNoTimeout(t *testing.T) {
tt := newTest(t, vsphere)
deps, err := dependencies.NewFactory().
WithLocalExecutables().
WithNoTimeouts().
WithAwsIamAuth().
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.AwsIamAuth).NotTo(BeNil())
}
func TestFactoryBuildWithBootstrapperNoTimeout(t *testing.T) {
tt := newTest(t, vsphere)
deps, err := dependencies.NewFactory().
WithLocalExecutables().
WithNoTimeouts().
WithBootstrapper().
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.Bootstrapper).NotTo(BeNil())
}
func TestFactoryBuildWithEksdUpgraderNoTimeout(t *testing.T) {
tt := newTest(t, vsphere)
deps, err := dependencies.NewFactory().
WithLocalExecutables().
WithNoTimeouts().
WithEksdUpgrader().
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.EksdUpgrader).NotTo(BeNil())
}
func TestFactoryBuildWithEksdInstallerNoTimeout(t *testing.T) {
tt := newTest(t, vsphere)
deps, err := dependencies.NewFactory().
WithLocalExecutables().
WithNoTimeouts().
WithEksdInstaller().
Build(context.Background())
tt.Expect(err).To(BeNil())
tt.Expect(deps.EksdInstaller).NotTo(BeNil())
}
type dummyDockerClient struct{}
func (b dummyDockerClient) PullImage(ctx context.Context, image string) error {
return nil
}
func (b dummyDockerClient) Execute(ctx context.Context, args ...string) (stdout bytes.Buffer, err error) {
return bytes.Buffer{}, nil
}
func (b dummyDockerClient) Login(ctx context.Context, endpoint, username, password string) error {
return nil
}
| 610 |
eks-anywhere | aws | Go | package diagnostics
import (
"fmt"
"path"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/constants"
)
const (
logAnalysisAnalyzerPrefix = "log analysis:"
)
type analyzerFactory struct{}
func NewAnalyzerFactory() *analyzerFactory {
return &analyzerFactory{}
}
func (a *analyzerFactory) DefaultAnalyzers() []*Analyze {
var analyzers []*Analyze
return append(analyzers, a.defaultDeploymentAnalyzers()...)
}
func (a *analyzerFactory) defaultDeploymentAnalyzers() []*Analyze {
d := []eksaDeployment{
{
Name: "coredns",
Namespace: constants.KubeSystemNamespace,
ExpectedReplicas: 2,
},
}
return a.generateDeploymentAnalyzers(d)
}
func (a *analyzerFactory) ManagementClusterAnalyzers() []*Analyze {
var analyzers []*Analyze
analyzers = append(analyzers, a.managementClusterDeploymentAnalyzers()...)
return append(analyzers, a.managementClusterCrdAnalyzers()...)
}
func (a *analyzerFactory) managementClusterCrdAnalyzers() []*Analyze {
crds := []string{
fmt.Sprintf("clusters.%s", v1alpha1.GroupVersion.Group),
fmt.Sprintf("bundles.%s", v1alpha1.GroupVersion.Group),
}
return a.generateCrdAnalyzers(crds)
}
func (a *analyzerFactory) PackageAnalyzers() []*Analyze {
var analyzers []*Analyze
analyzers = append(analyzers, a.packageDeploymentAnalyzers()...)
return append(analyzers, a.packageCrdAnalyzers()...)
}
func (a *analyzerFactory) packageCrdAnalyzers() []*Analyze {
crds := []string{
"packagebundlecontrollers.packages.eks.amazonaws.com",
"packagebundles.packages.eks.amazonaws.com",
"packagecontrollers.packages.eks.amazonaws.com",
"packages.packages.eks.amazonaws.com",
}
return a.generateCrdAnalyzers(crds)
}
func (a *analyzerFactory) packageDeploymentAnalyzers() []*Analyze {
d := []eksaDeployment{
{
Name: "eks-anywhere-packages",
Namespace: constants.EksaPackagesName,
ExpectedReplicas: 1,
},
}
return a.generateDeploymentAnalyzers(d)
}
func (a *analyzerFactory) managementClusterDeploymentAnalyzers() []*Analyze {
d := []eksaDeployment{
{
Name: "capt-controller-manager",
Namespace: constants.CaptSystemNamespace,
ExpectedReplicas: 1,
}, {
Name: "capv-controller-manager",
Namespace: constants.CapvSystemNamespace,
ExpectedReplicas: 1,
}, {
Name: "capc-controller-manager",
Namespace: constants.CapcSystemNamespace,
ExpectedReplicas: 1,
}, {
Name: "capx-controller-manager",
Namespace: constants.CapxSystemNamespace,
ExpectedReplicas: 1,
}, {
Name: "cert-manager-webhook",
Namespace: constants.CertManagerNamespace,
ExpectedReplicas: 1,
}, {
Name: "cert-manager-cainjector",
Namespace: constants.CertManagerNamespace,
ExpectedReplicas: 1,
}, {
Name: "cert-manager",
Namespace: constants.CertManagerNamespace,
ExpectedReplicas: 1,
}, {
Name: "capi-controller-manager",
Namespace: constants.CapiSystemNamespace,
ExpectedReplicas: 1,
}, {
Name: "capi-kubeadm-control-plane-controller-manager",
Namespace: constants.CapiKubeadmControlPlaneSystemNamespace,
ExpectedReplicas: 1,
}, {
Name: "capi-kubeadm-control-plane-controller-manager",
Namespace: constants.CapiKubeadmControlPlaneSystemNamespace,
ExpectedReplicas: 1,
}, {
Name: "capi-kubeadm-bootstrap-controller-manager",
Namespace: constants.CapiKubeadmBootstrapSystemNamespace,
ExpectedReplicas: 1,
},
}
return a.generateDeploymentAnalyzers(d)
}
func (a *analyzerFactory) EksaGitopsAnalyzers() []*Analyze {
crds := []string{
fmt.Sprintf("gitopsconfigs.%s", v1alpha1.GroupVersion.Group),
}
return a.generateCrdAnalyzers(crds)
}
func (a *analyzerFactory) EksaOidcAnalyzers() []*Analyze {
crds := []string{
fmt.Sprintf("oidcconfigs.%s", v1alpha1.GroupVersion.Group),
}
return a.generateCrdAnalyzers(crds)
}
func (a *analyzerFactory) EksaExternalEtcdAnalyzers() []*Analyze {
deployments := []eksaDeployment{
{
Name: "etcdadm-controller-controller-manager",
Namespace: constants.EtcdAdmControllerSystemNamespace,
ExpectedReplicas: 1,
}, {
Name: "etcdadm-bootstrap-provider-controller-manager",
Namespace: constants.EtcdAdmBootstrapProviderSystemNamespace,
ExpectedReplicas: 1,
},
}
return a.generateDeploymentAnalyzers(deployments)
}
func (a *analyzerFactory) DataCenterConfigAnalyzers(datacenter v1alpha1.Ref) []*Analyze {
switch datacenter.Kind {
case v1alpha1.VSphereDatacenterKind:
return a.eksaVsphereAnalyzers()
case v1alpha1.DockerDatacenterKind:
return a.eksaDockerAnalyzers()
case v1alpha1.CloudStackDatacenterKind:
return a.eksaCloudstackAnalyzers()
case v1alpha1.SnowDatacenterKind:
return a.eksaSnowAnalyzers()
case v1alpha1.NutanixDatacenterKind:
return a.eksaNutanixAnalyzers()
default:
return nil
}
}
func (a *analyzerFactory) eksaVsphereAnalyzers() []*Analyze {
var analyzers []*Analyze
crds := []string{
fmt.Sprintf("vspheredatacenterconfigs.%s", v1alpha1.GroupVersion.Group),
fmt.Sprintf("vspheremachineconfigs.%s", v1alpha1.GroupVersion.Group),
}
analyzers = append(analyzers, a.generateCrdAnalyzers(crds)...)
analyzers = append(analyzers, a.vsphereDiagnosticAnalyzers()...)
return analyzers
}
func (a *analyzerFactory) eksaCloudstackAnalyzers() []*Analyze {
crds := []string{
fmt.Sprintf("cloudstackdatacenterconfigs.%s", v1alpha1.GroupVersion.Group),
fmt.Sprintf("cloudstackmachineconfigs.%s", v1alpha1.GroupVersion.Group),
}
analyzers := a.generateCrdAnalyzers(crds)
return append(analyzers, a.validControlPlaneIPAnalyzer())
}
func (a *analyzerFactory) eksaSnowAnalyzers() []*Analyze {
crds := []string{
fmt.Sprintf("snowdatacenterconfigs.%s", v1alpha1.GroupVersion.Group),
fmt.Sprintf("snowmachineconfigs.%s", v1alpha1.GroupVersion.Group),
}
analyzers := a.generateCrdAnalyzers(crds)
return append(analyzers, a.validControlPlaneIPAnalyzer())
}
func (a *analyzerFactory) eksaDockerAnalyzers() []*Analyze {
var analyazers []*Analyze
crds := []string{
fmt.Sprintf("dockerdatacenterconfigs.%s", v1alpha1.GroupVersion.Group),
}
deployments := []eksaDeployment{
{
Name: "local-path-provisioner",
Namespace: constants.LocalPathStorageNamespace,
ExpectedReplicas: 1,
},
}
analyazers = append(analyazers, a.generateCrdAnalyzers(crds)...)
return append(analyazers, a.generateDeploymentAnalyzers(deployments)...)
}
func (a *analyzerFactory) eksaNutanixAnalyzers() []*Analyze {
crds := []string{
fmt.Sprintf("nutanixdatacenterconfigs.%s", v1alpha1.GroupVersion.Group),
fmt.Sprintf("nutanixmachineconfigs.%s", v1alpha1.GroupVersion.Group),
}
analyzers := a.generateCrdAnalyzers(crds)
return append(analyzers, a.validControlPlaneIPAnalyzer())
}
// EksaLogTextAnalyzers given a slice of Collectors will check which namespaced log collectors are present
// and return the log analyzers associated with the namespace in the namespaceLogTextAnalyzersMap.
func (a *analyzerFactory) EksaLogTextAnalyzers(collectors []*Collect) []*Analyze {
var analyzers []*Analyze
analyzersMap := a.namespaceLogTextAnalyzersMap()
for _, collector := range collectors {
if collector.Logs != nil {
analyzer, ok := analyzersMap[collector.Logs.Namespace]
if ok {
analyzers = append(analyzers, analyzer...)
}
}
}
return analyzers
}
// namespaceLogTextAnalyzersMap is used to associated log text analyzers with the logs collected from a specific namespace.
// the key of the analyzers map is the namespace name, and the value are the associated log text analyzers.
func (a *analyzerFactory) namespaceLogTextAnalyzersMap() map[string][]*Analyze {
return map[string][]*Analyze{
constants.CapiKubeadmControlPlaneSystemNamespace: a.capiKubeadmControlPlaneSystemLogAnalyzers(),
}
}
func (a *analyzerFactory) capiKubeadmControlPlaneSystemLogAnalyzers() []*Analyze {
capiCpManagerPod := "capi-kubeadm-control-plane-controller-manager-*"
capiCpManagerContainerLogFile := capiCpManagerPod + ".log"
fullManagerPodLogPath := path.Join(logpath(constants.CapiKubeadmControlPlaneSystemNamespace), capiCpManagerContainerLogFile)
return []*Analyze{
{
TextAnalyze: &textAnalyze{
analyzeMeta: analyzeMeta{
CheckName: fmt.Sprintf("%s: API server pod missing. Log: %s", logAnalysisAnalyzerPrefix, fullManagerPodLogPath),
},
FileName: fullManagerPodLogPath,
RegexPattern: `machine (.*?) reports APIServerPodHealthy condition is false \(Error, Pod kube-apiserver-(.*?) is missing\)`,
Outcomes: []*outcome{
{
Fail: &singleOutcome{
When: "true",
Message: fmt.Sprintf("Node failed to launch correctly; API server pod is missing. See %s", fullManagerPodLogPath),
},
},
{
Pass: &singleOutcome{
When: "false",
Message: "API server pods launched correctly",
},
},
},
},
},
}
}
type eksaDeployment struct {
Name string
Namespace string
ExpectedReplicas int
}
func (a *analyzerFactory) generateDeploymentAnalyzers(deployments []eksaDeployment) []*Analyze {
var deploymentAnalyzers []*Analyze
for _, d := range deployments {
deploymentAnalyzers = append(deploymentAnalyzers, a.deploymentAnalyzer(d))
}
return deploymentAnalyzers
}
func (a *analyzerFactory) deploymentAnalyzer(deployment eksaDeployment) *Analyze {
return &Analyze{
DeploymentStatus: &deploymentStatus{
Name: deployment.Name,
Namespace: deployment.Namespace,
Outcomes: []*outcome{
{
Fail: &singleOutcome{
When: fmt.Sprintf("< %d", deployment.ExpectedReplicas),
Message: fmt.Sprintf("%s is not ready.", deployment.Name),
},
}, {
Pass: &singleOutcome{
Message: fmt.Sprintf("%s is running.", deployment.Name),
},
},
},
},
}
}
func (a *analyzerFactory) generateCrdAnalyzers(crds []string) []*Analyze {
var crdAnalyzers []*Analyze
for _, crd := range crds {
crdAnalyzers = append(crdAnalyzers, a.crdAnalyzer(crd))
}
return crdAnalyzers
}
func (a *analyzerFactory) crdAnalyzer(crdName string) *Analyze {
return &Analyze{
CustomResourceDefinition: &customResourceDefinition{
analyzeMeta: analyzeMeta{
CheckName: crdName,
},
Outcomes: []*outcome{
{
Fail: &singleOutcome{
When: "< 1",
Message: fmt.Sprintf("%s is not present on cluster", crdName),
},
},
{
Pass: &singleOutcome{
Message: fmt.Sprintf("%s is present on the cluster", crdName),
},
},
},
CustomResourceDefinitionName: crdName,
},
}
}
// vsphereDiagnosticAnalyzers will return diagnostic analyzers to analyze the condition of vSphere cluster.
func (a *analyzerFactory) vsphereDiagnosticAnalyzers() []*Analyze {
return []*Analyze{a.validControlPlaneIPAnalyzer(), a.vcenterSessionValidatePermissionAnalyzer()}
}
// validControlPlaneIPAnalyzer analyzes whether a valid control plane IP is used to connect
// to API server.
func (a *analyzerFactory) validControlPlaneIPAnalyzer() *Analyze {
runPingPod := "ping-host-ip"
runPingPodLog := fmt.Sprintf("%s.log", runPingPod)
fullRunPingPodLogPath := path.Join(runPingPod, runPingPodLog)
return &Analyze{
TextAnalyze: &textAnalyze{
analyzeMeta: analyzeMeta{
CheckName: fmt.Sprintf("%s: Destination Host Unreachable. Log: %s", logAnalysisAnalyzerPrefix, fullRunPingPodLogPath),
},
FileName: fullRunPingPodLogPath,
RegexPattern: `exit code: 0`,
Outcomes: []*outcome{
{
Fail: &singleOutcome{
When: "false",
Message: fmt.Sprintf("The control plane endpoint host is unavailable. See %s", fullRunPingPodLogPath),
},
},
{
Pass: &singleOutcome{
When: "true",
Message: "Control plane IP verified.",
},
},
},
},
}
}
// vcenterSessionValidateAnalyzer analyzes whether the vcenter user has Session validate permissions for CAPV
// to be able to look up existing valid sessions to reuse them instead of having to create new ones.
func (a *analyzerFactory) vcenterSessionValidatePermissionAnalyzer() *Analyze {
capvManagerPod := "capv-controller-manager-*"
capvManagerContainerLogFile := capvManagerPod + ".log"
fullManagerPodLogPath := path.Join(logpath(constants.CapvSystemNamespace), capvManagerContainerLogFile)
return &Analyze{
TextAnalyze: &textAnalyze{
analyzeMeta: analyzeMeta{
CheckName: fmt.Sprintf("%s: Session Validate permission missing. Log: %s", logAnalysisAnalyzerPrefix, fullManagerPodLogPath),
},
FileName: fullManagerPodLogPath,
RegexPattern: `session "msg"="error checking if session is active" "error"="ServerFaultCode: Permission to perform this operation was denied."`,
Outcomes: []*outcome{
{
Fail: &singleOutcome{
When: "true",
Message: fmt.Sprintf("VCenter user doesn't have Sessions.ValidateSession permission. See %s", fullManagerPodLogPath),
},
},
{
Pass: &singleOutcome{
When: "false",
Message: "VCenter user has Sessions.ValidateSession permission.",
},
},
},
},
}
}
// vmsAccessAnalyzer will analyze if vms have access to the API server of vSphere cluster
// not used yet but it will once the workflows are updated to support this usecase.
func (a *analyzerFactory) vmsAccessAnalyzer() *Analyze { //nolint:unused
runBashPod := "check-cloud-controller"
runBashPodLog := fmt.Sprintf("%s.log", runBashPod)
vSphereCloudControllerPodLogPath := path.Join(runBashPod, runBashPodLog)
return &Analyze{
TextAnalyze: &textAnalyze{
analyzeMeta: analyzeMeta{
CheckName: fmt.Sprintf("%s: Virtual Machine has no access to vSphere API server. Logs: %s", logAnalysisAnalyzerPrefix, vSphereCloudControllerPodLogPath),
},
FileName: vSphereCloudControllerPodLogPath,
RegexPattern: `Failed to create new client. err: Post (.*) dial tcp (.*) connect: connection timed out\n(.*)Failed to create govmomi client. err: Post (.*) dial tcp (.*) connect: connection timed out`,
Outcomes: []*outcome{
{
Fail: &singleOutcome{
When: "true",
Message: fmt.Sprintf("Failed to create client, Virtural Machines have no access to vSphere API server. See the cloud controller log in control plane node: %s", vSphereCloudControllerPodLogPath),
},
},
{
Pass: &singleOutcome{
When: "false",
Message: fmt.Sprintf("Virtual Machines have access to vSphere API server. See %s \nPlease ignore the result when this analyzer is running on bootstrap cluster", vSphereCloudControllerPodLogPath),
},
},
},
},
}
}
| 453 |
eks-anywhere | aws | Go | package diagnostics_test
import (
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test"
eksav1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/diagnostics"
)
func TestManagementClusterAnalyzers(t *testing.T) {
g := NewGomegaWithT(t)
factory := diagnostics.NewAnalyzerFactory()
analyzers := factory.ManagementClusterAnalyzers()
g.Expect(analyzers).To(HaveLen(13), "DataCenterConfigCollectors() mismatch between desired collectors and actual")
g.Expect(getDeploymentStatusAnalyzer(analyzers, "capc-controller-manager")).ToNot(BeNil(), "capc controller manager analyzer should be present")
g.Expect(getDeploymentStatusAnalyzer(analyzers, "capv-controller-manager")).ToNot(BeNil(), "capv controller manager analyzer should be present")
g.Expect(getDeploymentStatusAnalyzer(analyzers, "capt-controller-manager")).ToNot(BeNil(), "capt controller manager analyzer should be present")
g.Expect(getDeploymentStatusAnalyzer(analyzers, "capx-controller-manager")).ToNot(BeNil(), "capx controller manager analyzer should be present")
g.Expect(analyzers[11].CustomResourceDefinition.CheckName).To(Equal("clusters.anywhere.eks.amazonaws.com"))
g.Expect(analyzers[12].CustomResourceDefinition.CheckName).To(Equal("bundles.anywhere.eks.amazonaws.com"))
}
func getDeploymentStatusAnalyzer(analyzers []*diagnostics.Analyze, name string) *diagnostics.Analyze {
for _, analyzer := range analyzers {
if analyzer.DeploymentStatus != nil && analyzer.DeploymentStatus.Name == name {
return analyzer
}
}
return nil
}
func TestEksaLogTextAnalyzers(t *testing.T) {
collectorFactory := diagnostics.NewDefaultCollectorFactory(test.NewFileReader())
collectors := collectorFactory.DefaultCollectors()
collectors = append(collectors, collectorFactory.ManagementClusterCollectors()...)
analyzerFactory := diagnostics.NewAnalyzerFactory()
expectAnalzyers := analyzerFactory.EksaLogTextAnalyzers(collectors)
for _, analyzer := range expectAnalzyers {
if analyzer == nil {
t.Errorf("EksaLogTextAnalyzers failed: return a nil analyzer")
}
}
}
func TestVsphereDataCenterConfigAnalyzers(t *testing.T) {
g := NewGomegaWithT(t)
datacenter := eksav1alpha1.Ref{Kind: eksav1alpha1.VSphereDatacenterKind}
analyzerFactory := diagnostics.NewAnalyzerFactory()
analyzers := analyzerFactory.DataCenterConfigAnalyzers(datacenter)
g.Expect(analyzers).To(HaveLen(4), "DataCenterConfigAnalyzers() mismatch between desired analyzers and actual")
g.Expect(analyzers[0].CustomResourceDefinition.CustomResourceDefinitionName).To(Equal("vspheredatacenterconfigs.anywhere.eks.amazonaws.com"),
"vSphere generateCrdAnalyzers() mismatch between desired datacenter config group version and actual")
g.Expect(analyzers[1].CustomResourceDefinition.CustomResourceDefinitionName).To(Equal("vspheremachineconfigs.anywhere.eks.amazonaws.com"),
"vSphere generateCrdAnalyzers() mismatch between desired machine config group version and actual")
g.Expect(analyzers[2].TextAnalyze.RegexPattern).To(Equal("exit code: 0"),
"validControlPlaneIPAnalyzer() mismatch between desired regexPattern and actual")
g.Expect(analyzers[3].TextAnalyze.RegexPattern).To(Equal("session \"msg\"=\"error checking if session is active\" \"error\"=\"ServerFaultCode: Permission to perform this operation was denied.\""),
"vcenterSessionValidatePermissionAnalyzer() mismatch between desired regexPattern and actual")
}
func TestDockerDataCenterConfigAnalyzers(t *testing.T) {
g := NewGomegaWithT(t)
datacenter := eksav1alpha1.Ref{Kind: eksav1alpha1.DockerDatacenterKind}
analyzerFactory := diagnostics.NewAnalyzerFactory()
analyzers := analyzerFactory.DataCenterConfigAnalyzers(datacenter)
g.Expect(analyzers).To(HaveLen(2), "DataCenterConfigAnalyzers() mismatch between desired analyzers and actual")
}
func TestCloudStackDataCenterConfigAnalyzers(t *testing.T) {
g := NewGomegaWithT(t)
datacenter := eksav1alpha1.Ref{Kind: eksav1alpha1.CloudStackDatacenterKind}
analyzerFactory := diagnostics.NewAnalyzerFactory()
analyzers := analyzerFactory.DataCenterConfigAnalyzers(datacenter)
g.Expect(analyzers).To(HaveLen(3), "DataCenterConfigAnalyzers() mismatch between desired analyzers and actual")
}
func TestNutanixDataCenterConfigAnalyzers(t *testing.T) {
g := NewGomegaWithT(t)
datacenter := eksav1alpha1.Ref{Kind: eksav1alpha1.NutanixDatacenterKind}
analyzerFactory := diagnostics.NewAnalyzerFactory()
analyzers := analyzerFactory.DataCenterConfigAnalyzers(datacenter)
g.Expect(analyzers).To(HaveLen(3), "DataCenterConfigAnalyzers() mismatch between desired analyzers and actual")
g.Expect(analyzers[0].CustomResourceDefinition.CustomResourceDefinitionName).To(Equal("nutanixdatacenterconfigs.anywhere.eks.amazonaws.com"),
"Nutanix generateCrdAnalyzers() mismatch between desired datacenter config group version and actual")
g.Expect(analyzers[1].CustomResourceDefinition.CustomResourceDefinitionName).To(Equal("nutanixmachineconfigs.anywhere.eks.amazonaws.com"),
"Nutanix generateCrdAnalyzers() mismatch between desired machine config group version and actual")
g.Expect(analyzers[2].TextAnalyze.RegexPattern).To(Equal("exit code: 0"),
"validControlPlaneIPAnalyzer() mismatch between desired regexPattern and actual")
}
func TestSnowAnalyzers(t *testing.T) {
g := NewGomegaWithT(t)
datacenter := eksav1alpha1.Ref{Kind: eksav1alpha1.SnowDatacenterKind}
analyzerFactory := diagnostics.NewAnalyzerFactory()
analyzers := analyzerFactory.DataCenterConfigAnalyzers(datacenter)
g.Expect(analyzers).To(HaveLen(3), "DataCenterConfigAnalyzers() mismatch between desired analyzers and actual")
}
| 102 |
eks-anywhere | aws | Go | package diagnostics
type Analyze struct {
CustomResourceDefinition *customResourceDefinition `json:"customResourceDefinition,omitempty"`
Secret *analyzeSecret `json:"secret,omitempty"`
ImagePullSecret *imagePullSecret `json:"imagePullSecret,omitempty"`
DeploymentStatus *deploymentStatus `json:"deploymentStatus,omitempty"`
TextAnalyze *textAnalyze `json:"textAnalyze,omitempty"`
}
type customResourceDefinition struct {
analyzeMeta `json:",inline"`
Outcomes []*outcome `json:"outcomes"`
CustomResourceDefinitionName string `json:"customResourceDefinitionName"`
}
type analyzeSecret struct {
analyzeMeta `json:",inline"`
Outcomes []*outcome `json:"outcomes"`
SecretName string `json:"secretName"`
Namespace string `json:"namespace"`
Key string `json:"key,omitempty"`
}
type imagePullSecret struct {
analyzeMeta `json:",inline"`
Outcomes []*outcome `json:"outcomes"`
RegistryName string `json:"registryName"`
}
type deploymentStatus struct {
analyzeMeta `json:",inline"`
Outcomes []*outcome `json:"outcomes"`
Namespace string `json:"namespace"`
Name string `json:"name"`
}
type textAnalyze struct {
analyzeMeta `json:",inline"`
CollectorName string `json:"collectorName,omitempty"`
FileName string `json:"fileName,omitempty"`
RegexPattern string `json:"regex,omitempty"`
RegexGroups string `json:"regexGroups,omitempty"`
Outcomes []*outcome `json:"outcomes"`
}
type analyzeMeta struct {
CheckName string `json:"checkName,omitempty"`
Exclude bool `json:"exclude,omitempty"`
}
| 51 |
eks-anywhere | aws | Go | package diagnostics
import (
"fmt"
"path/filepath"
"time"
v1 "k8s.io/api/core/v1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/providers"
)
// FileReader reads files from local disk or http urls.
type FileReader interface {
ReadFile(url string) ([]byte, error)
}
// EKSACollectorFactory generates support-bundle collectors for eks-a clusters.
type EKSACollectorFactory struct {
DiagnosticCollectorImage string
reader FileReader
}
// NewCollectorFactory builds a collector factory.
func NewCollectorFactory(diagnosticCollectorImage string, reader FileReader) *EKSACollectorFactory {
return &EKSACollectorFactory{
DiagnosticCollectorImage: diagnosticCollectorImage,
reader: reader,
}
}
// NewDefaultCollectorFactory builds a collector factory that will use the default
// diagnostic collector image.
func NewDefaultCollectorFactory(reader FileReader) *EKSACollectorFactory {
return NewCollectorFactory("", reader)
}
// DefaultCollectors returns the collectors that apply to all clusters.
func (c *EKSACollectorFactory) DefaultCollectors() []*Collect {
collectors := []*Collect{
{
ClusterInfo: &clusterInfo{},
},
{
ClusterResources: &clusterResources{},
},
{
Secret: &secret{
Namespace: "eksa-system",
SecretName: "eksa-license",
IncludeValue: true,
Key: "license",
},
},
}
collectors = append(collectors, c.defaultLogCollectors()...)
return collectors
}
// EksaHostCollectors returns the collectors that interact with the kubernetes node machine hosts.
func (c *EKSACollectorFactory) EksaHostCollectors(machineConfigs []providers.MachineConfig) []*Collect {
var collectors []*Collect
collectorsMap := c.getCollectorsMap()
// we don't want to duplicate the collectors if multiple machine configs have the same OS family
osFamiliesSeen := map[v1alpha1.OSFamily]bool{}
for _, config := range machineConfigs {
if _, seen := osFamiliesSeen[config.OSFamily()]; !seen {
collectors = append(collectors, collectorsMap[config.OSFamily()]...)
osFamiliesSeen[config.OSFamily()] = true
}
}
return collectors
}
// DataCenterConfigCollectors returns the collectors for the provider datacenter config in the cluster spec.
func (c *EKSACollectorFactory) DataCenterConfigCollectors(datacenter v1alpha1.Ref, spec *cluster.Spec) []*Collect {
switch datacenter.Kind {
case v1alpha1.VSphereDatacenterKind:
return c.eksaVsphereCollectors(spec)
case v1alpha1.DockerDatacenterKind:
return c.eksaDockerCollectors()
case v1alpha1.CloudStackDatacenterKind:
return c.eksaCloudstackCollectors()
case v1alpha1.TinkerbellDatacenterKind:
return c.eksaTinkerbellCollectors()
case v1alpha1.SnowDatacenterKind:
return c.eksaSnowCollectors()
case v1alpha1.NutanixDatacenterKind:
return c.eksaNutanixCollectors()
default:
return nil
}
}
func (c *EKSACollectorFactory) eksaNutanixCollectors() []*Collect {
nutanixLogs := []*Collect{
{
Logs: &logs{
Namespace: constants.CapxSystemNamespace,
Name: logpath(constants.CapxSystemNamespace),
},
},
}
return append(nutanixLogs, c.nutanixCrdCollectors()...)
}
func (c *EKSACollectorFactory) eksaSnowCollectors() []*Collect {
snowLogs := []*Collect{
{
Logs: &logs{
Namespace: constants.CapasSystemNamespace,
Name: logpath(constants.CapasSystemNamespace),
},
},
}
return append(snowLogs, c.snowCrdCollectors()...)
}
func (c *EKSACollectorFactory) eksaTinkerbellCollectors() []*Collect {
tinkerbellLogs := []*Collect{
{
Logs: &logs{
Namespace: constants.CaptSystemNamespace,
Name: logpath(constants.CaptSystemNamespace),
},
},
}
return append(tinkerbellLogs, c.tinkerbellCrdCollectors()...)
}
func (c *EKSACollectorFactory) eksaVsphereCollectors(spec *cluster.Spec) []*Collect {
var collectors []*Collect
vsphereLogs := []*Collect{
{
Logs: &logs{
Namespace: constants.CapvSystemNamespace,
Name: logpath(constants.CapvSystemNamespace),
},
},
}
collectors = append(collectors, vsphereLogs...)
collectors = append(collectors, c.vsphereCrdCollectors()...)
collectors = append(collectors, c.apiServerCollectors(spec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host)...)
collectors = append(collectors, c.vmsAccessCollector(spec.Cluster.Spec.ControlPlaneConfiguration))
return collectors
}
func (c *EKSACollectorFactory) eksaCloudstackCollectors() []*Collect {
cloudstackLogs := []*Collect{
{
Logs: &logs{
Namespace: constants.CapcSystemNamespace,
Name: logpath(constants.CapcSystemNamespace),
},
},
}
return append(cloudstackLogs, c.cloudstackCrdCollectors()...)
}
func (c *EKSACollectorFactory) eksaDockerCollectors() []*Collect {
return []*Collect{
{
Logs: &logs{
Namespace: constants.CapdSystemNamespace,
Name: logpath(constants.CapdSystemNamespace),
},
},
}
}
// ManagementClusterCollectors returns the collectors that only apply to management clusters.
func (c *EKSACollectorFactory) ManagementClusterCollectors() []*Collect {
var collectors []*Collect
collectors = append(collectors, c.managementClusterCrdCollectors()...)
collectors = append(collectors, c.managementClusterLogCollectors()...)
return collectors
}
// PackagesCollectors returns the collectors that read information for curated packages.
func (c *EKSACollectorFactory) PackagesCollectors() []*Collect {
var collectors []*Collect
collectors = append(collectors, c.packagesCrdCollectors()...)
collectors = append(collectors, c.packagesLogCollectors()...)
return collectors
}
// FileCollectors returns the collectors that interact with files.
func (c *EKSACollectorFactory) FileCollectors(paths []string) []*Collect {
collectors := []*Collect{}
for _, path := range paths {
content, err := c.reader.ReadFile(path)
if err != nil {
content = []byte(fmt.Sprintf("Failed to retrieve file %s for collection: %s", path, err))
}
collectors = append(collectors, &Collect{
Data: &data{
Data: string(content),
Name: filepath.Base(path),
},
})
}
return collectors
}
func (c *EKSACollectorFactory) getCollectorsMap() map[v1alpha1.OSFamily][]*Collect {
return map[v1alpha1.OSFamily][]*Collect{
v1alpha1.Ubuntu: c.ubuntuHostCollectors(),
v1alpha1.Bottlerocket: c.bottleRocketHostCollectors(),
}
}
func (c *EKSACollectorFactory) bottleRocketHostCollectors() []*Collect {
return []*Collect{}
}
func (c *EKSACollectorFactory) ubuntuHostCollectors() []*Collect {
return []*Collect{
{
CopyFromHost: ©FromHost{
Name: hostlogPath("cloud-init"),
Namespace: constants.EksaDiagnosticsNamespace,
Image: c.DiagnosticCollectorImage,
HostPath: "/var/log/cloud-init.log",
},
},
{
CopyFromHost: ©FromHost{
Name: hostlogPath("cloud-init-output"),
Namespace: constants.EksaDiagnosticsNamespace,
Image: c.DiagnosticCollectorImage,
HostPath: "/var/log/cloud-init-output.log",
},
},
{
CopyFromHost: ©FromHost{
Name: hostlogPath("syslog"),
Namespace: constants.EksaDiagnosticsNamespace,
Image: c.DiagnosticCollectorImage,
HostPath: "/var/log/syslog",
Timeout: time.Minute.String(),
},
},
}
}
func (c *EKSACollectorFactory) defaultLogCollectors() []*Collect {
return []*Collect{
{
Logs: &logs{
Namespace: constants.EksaSystemNamespace,
Name: logpath(constants.EksaSystemNamespace),
},
},
{
Logs: &logs{
Namespace: constants.DefaultNamespace,
Name: logpath(constants.DefaultNamespace),
},
},
{
Logs: &logs{
Namespace: constants.KubeNodeLeaseNamespace,
Name: logpath(constants.KubeNodeLeaseNamespace),
},
},
{
Logs: &logs{
Namespace: constants.KubePublicNamespace,
Name: logpath(constants.KubePublicNamespace),
},
},
{
Logs: &logs{
Namespace: constants.KubeSystemNamespace,
Name: logpath(constants.KubeSystemNamespace),
},
},
}
}
func (c *EKSACollectorFactory) packagesLogCollectors() []*Collect {
return []*Collect{
{
Logs: &logs{
Namespace: constants.EksaPackagesName,
Name: logpath(constants.EksaPackagesName),
},
},
}
}
func (c *EKSACollectorFactory) managementClusterLogCollectors() []*Collect {
return []*Collect{
{
Logs: &logs{
Namespace: constants.CapiKubeadmBootstrapSystemNamespace,
Name: logpath(constants.CapiKubeadmBootstrapSystemNamespace),
},
},
{
Logs: &logs{
Namespace: constants.CapiKubeadmControlPlaneSystemNamespace,
Name: logpath(constants.CapiKubeadmControlPlaneSystemNamespace),
},
},
{
Logs: &logs{
Namespace: constants.CapiSystemNamespace,
Name: logpath(constants.CapiSystemNamespace),
},
},
{
Logs: &logs{
Namespace: constants.CapiWebhookSystemNamespace,
Name: logpath(constants.CapiWebhookSystemNamespace),
},
},
{
Logs: &logs{
Namespace: constants.CertManagerNamespace,
Name: logpath(constants.CertManagerNamespace),
},
},
{
Logs: &logs{
Namespace: constants.EtcdAdmBootstrapProviderSystemNamespace,
Name: logpath(constants.EtcdAdmBootstrapProviderSystemNamespace),
},
},
{
Logs: &logs{
Namespace: constants.EtcdAdmControllerSystemNamespace,
Name: logpath(constants.EtcdAdmControllerSystemNamespace),
},
},
}
}
func (c *EKSACollectorFactory) managementClusterCrdCollectors() []*Collect {
mgmtCrds := []string{
"clusters.anywhere.eks.amazonaws.com",
"bundles.anywhere.eks.amazonaws.com",
"clusters.cluster.x-k8s.io",
"machinedeployments.cluster.x-k8s.io",
"machines.cluster.x-k8s.io",
"machinehealthchecks.cluster.x-k8s.io",
"kubeadmcontrolplane.controlplane.cluster.x-k8s.io",
}
return c.generateCrdCollectors(mgmtCrds)
}
func (c *EKSACollectorFactory) snowCrdCollectors() []*Collect {
capasCrds := []string{
"awssnowclusters.infrastructure.cluster.x-k8s.io",
"awssnowmachines.infrastructure.cluster.x-k8s.io",
"awssnowmachinetemplates.infrastructure.cluster.x-k8s.io",
"snowdatacenterconfigs.anywhere.eks.amazonaws.com",
"snowmachineconfigs.anywhere.eks.amazonaws.com",
}
return c.generateCrdCollectors(capasCrds)
}
func (c *EKSACollectorFactory) tinkerbellCrdCollectors() []*Collect {
captCrds := []string{
"machines.bmc.tinkerbell.org",
"jobs.bmc.tinkerbell.org",
"tasks.bmc.tinkerbell.org",
"hardware.tinkerbell.org",
"templates.tinkerbell.org",
"tinkerbellclusters.infrastructure.cluster.x-k8s.io",
"tinkerbelldatacenterconfigs.anywhere.eks.amazonaws.com",
"tinkerbellmachineconfigs.anywhere.eks.amazonaws.com",
"tinkerbellmachines.infrastructure.cluster.x-k8s.io",
"tinkerbellmachinetemplates.infrastructure.cluster.x-k8s.io",
"tinkerbelltemplateconfigs.anywhere.eks.amazonaws.com",
"workflows.tinkerbell.org",
}
return c.generateCrdCollectors(captCrds)
}
func (c *EKSACollectorFactory) vsphereCrdCollectors() []*Collect {
capvCrds := []string{
"vsphereclusteridentities.infrastructure.cluster.x-k8s.io",
"vsphereclusters.infrastructure.cluster.x-k8s.io",
"vspheredatacenterconfigs.anywhere.eks.amazonaws.com",
"vspheremachineconfigs.anywhere.eks.amazonaws.com",
"vspheremachines.infrastructure.cluster.x-k8s.io",
"vspheremachinetemplates.infrastructure.cluster.x-k8s.io",
"vspherevms.infrastructure.cluster.x-k8s.io",
}
return c.generateCrdCollectors(capvCrds)
}
func (c *EKSACollectorFactory) cloudstackCrdCollectors() []*Collect {
crds := []string{
"cloudstackaffinitygroups.infrastructure.cluster.x-k8s.io",
"cloudstackclusters.infrastructure.cluster.x-k8s.io",
"cloudstackdatacenterconfigs.anywhere.eks.amazonaws.com",
"cloudstackisolatednetworks.infrastructure.cluster.x-k8s.io",
"cloudstackmachineconfigs.anywhere.eks.amazonaws.com",
"cloudstackmachines.infrastructure.cluster.x-k8s.io",
"cloudstackmachinestatecheckers.infrastructure.cluster.x-k8s.io",
"cloudstackmachinetemplates.infrastructure.cluster.x-k8s.io",
"cloudstackzones.infrastructure.cluster.x-k8s.io",
}
return c.generateCrdCollectors(crds)
}
func (c *EKSACollectorFactory) packagesCrdCollectors() []*Collect {
packageCrds := []string{
"packagebundlecontrollers.packages.eks.amazonaws.com",
"packagebundles.packages.eks.amazonaws.com",
"packagecontrollers.packages.eks.amazonaws.com",
"packages.packages.eks.amazonaws.com",
}
return c.generateCrdCollectors(packageCrds)
}
func (c *EKSACollectorFactory) nutanixCrdCollectors() []*Collect {
capxCrds := []string{
"nutanixclusters.infrastructure.cluster.x-k8s.io",
"nutanixdatacenterconfigs.anywhere.eks.amazonaws.com",
"nutanixmachineconfigs.anywhere.eks.amazonaws.com",
"nutanixmachines.infrastructure.cluster.x-k8s.io",
"nutanixmachinetemplates.infrastructure.cluster.x-k8s.io",
}
return c.generateCrdCollectors(capxCrds)
}
func (c *EKSACollectorFactory) generateCrdCollectors(crds []string) []*Collect {
var crdCollectors []*Collect
for _, d := range crds {
crdCollectors = append(crdCollectors, c.crdCollector(d))
}
return crdCollectors
}
func (c *EKSACollectorFactory) crdCollector(crdType string) *Collect {
command := []string{"kubectl"}
args := []string{"get", crdType, "-o", "json", "--all-namespaces"}
collectorPath := crdPath(crdType)
return &Collect{
RunPod: &runPod{
collectorMeta: collectorMeta{
CollectorName: crdType,
},
Name: collectorPath,
Namespace: constants.EksaDiagnosticsNamespace,
PodSpec: &v1.PodSpec{
Containers: []v1.Container{{
Name: collectorPath,
Image: c.DiagnosticCollectorImage,
Command: command,
Args: args,
}},
// It's possible for networking to not be working on the cluster or the nodes
// not being ready, so adding tolerations and running the pod on host networking
// to be able to pull the resources from the cluster
HostNetwork: true,
Tolerations: []v1.Toleration{{
Key: "node.kubernetes.io",
Value: "not-ready",
Effect: "NoSchedule",
}},
},
Timeout: "30s",
},
}
}
// apiServerCollectors collect connection info when running a pod on an existing cluster.
func (c *EKSACollectorFactory) apiServerCollectors(controlPlaneIP string) []*Collect {
var collectors []*Collect
collectors = append(collectors, c.controlPlaneNetworkPathCollector(controlPlaneIP)...)
return collectors
}
func (c *EKSACollectorFactory) controlPlaneNetworkPathCollector(controlPlaneIP string) []*Collect {
ports := []string{"6443", "22"}
var collectors []*Collect
collectors = append(collectors, c.hostPortCollector(ports, controlPlaneIP))
collectors = append(collectors, c.pingHostCollector(controlPlaneIP))
return collectors
}
func (c *EKSACollectorFactory) hostPortCollector(ports []string, hostIP string) *Collect {
apiServerPort := ports[0]
port := ports[1]
tempIPRequest := fmt.Sprintf("for port in %s %s; do nc -z -v -w5 %s $port; done", apiServerPort, port, hostIP)
argsIP := []string{tempIPRequest}
return &Collect{
RunPod: &runPod{
Name: "check-host-port",
Namespace: constants.EksaDiagnosticsNamespace,
PodSpec: &v1.PodSpec{
Containers: []v1.Container{{
Name: "check-host-port",
Image: c.DiagnosticCollectorImage,
Command: []string{"/bin/sh", "-c"},
Args: argsIP,
}},
},
Timeout: "30s",
},
}
}
func (c *EKSACollectorFactory) pingHostCollector(hostIP string) *Collect {
tempPingRequest := fmt.Sprintf("ping -w10 -c5 %s; echo exit code: $?", hostIP)
argsPing := []string{tempPingRequest}
return &Collect{
RunPod: &runPod{
Name: "ping-host-ip",
Namespace: constants.EksaDiagnosticsNamespace,
PodSpec: &v1.PodSpec{
Containers: []v1.Container{{
Name: "ping-host-ip",
Image: c.DiagnosticCollectorImage,
Command: []string{"/bin/sh", "-c"},
Args: argsPing,
}},
},
Timeout: "30s",
},
}
}
// vmsAccessCollector will connect to API server first, then collect vsphere-cloud-controller-manager logs
// on control plane node.
func (c *EKSACollectorFactory) vmsAccessCollector(controlPlaneConfiguration v1alpha1.ControlPlaneConfiguration) *Collect {
controlPlaneEndpointHost := controlPlaneConfiguration.Endpoint.Host
taints := controlPlaneConfiguration.Taints
tolerations := makeTolerations(taints)
makeConnection := fmt.Sprintf("curl --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -H \"Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)\" https://%s:6443/", controlPlaneEndpointHost)
getLogs := "kubectl logs -n kube-system -l k8s-app=vsphere-cloud-controller-manager"
args := []string{fmt.Sprintf("%s && %s", makeConnection, getLogs)}
return &Collect{
RunPod: &runPod{
Name: "check-cloud-controller",
Namespace: constants.EksaDiagnosticsNamespace,
PodSpec: &v1.PodSpec{
Containers: []v1.Container{{
Name: "check-cloud-controller",
Image: c.DiagnosticCollectorImage,
Command: []string{"/bin/sh", "-c"},
Args: args,
}},
ServiceAccountName: "default",
HostNetwork: true,
Tolerations: tolerations,
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
{
Weight: 10,
Preference: v1.NodeSelectorTerm{
MatchExpressions: []v1.NodeSelectorRequirement{{
Key: "node-role.kubernetes.io/control-plane",
Operator: "Exists",
}},
},
}, {
Weight: 10,
Preference: v1.NodeSelectorTerm{
MatchExpressions: []v1.NodeSelectorRequirement{{
Key: "node-role.kubernetes.io/master",
Operator: "Exists",
}},
},
},
},
},
},
},
Timeout: "20s",
},
}
}
func makeTolerations(taints []v1.Taint) []v1.Toleration {
tolerations := []v1.Toleration{
{
Key: "node.cloudprovider.kubernetes.io/uninitialized",
Value: "true",
Effect: "NoSchedule",
},
{
Key: "node.kubernetes.io/not-ready",
Effect: "NoSchedule",
},
}
if taints == nil {
toleration := v1.Toleration{
Effect: "NoSchedule",
Key: "node-role.kubernetes.io/master",
}
tolerations = append(tolerations, toleration)
} else {
for _, taint := range taints {
var toleration v1.Toleration
if taint.Key != "" {
toleration.Key = taint.Key
}
if taint.Value != "" {
toleration.Value = taint.Value
}
if taint.Effect != "" {
toleration.Effect = taint.Effect
}
tolerations = append(tolerations, toleration)
}
}
return tolerations
}
func logpath(namespace string) string {
return fmt.Sprintf("logs/%s", namespace)
}
func hostlogPath(logType string) string {
return fmt.Sprintf("hostLogs/%s", logType)
}
func crdPath(crdType string) string {
return fmt.Sprintf("crds/%s", crdType)
}
| 635 |
eks-anywhere | aws | Go | package diagnostics_test
import (
"fmt"
"path/filepath"
"testing"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/internal/test"
eksav1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/diagnostics"
"github.com/aws/eks-anywhere/pkg/filewriter"
)
func TestFileCollectors(t *testing.T) {
g := NewGomegaWithT(t)
factory := diagnostics.NewDefaultCollectorFactory(test.NewFileReader())
w, err := filewriter.NewWriter(t.TempDir())
g.Expect(err).To(BeNil())
logOut, err := w.Write("test.log", []byte("test content"))
g.Expect(err).To(BeNil())
g.Expect(logOut).To(BeAnExistingFile())
collectors := factory.FileCollectors([]string{logOut})
g.Expect(collectors).To(HaveLen(1), "DefaultCollectors() mismatch between number of desired collectors and actual")
g.Expect(collectors[0].Data.Data).To(Equal("test content"))
g.Expect(collectors[0].Data.Name).To(Equal(filepath.Base(logOut)))
collectors = factory.FileCollectors([]string{"does-not-exist.log"})
g.Expect(collectors).To(HaveLen(1), "DefaultCollectors() mismatch between number of desired collectors and actual")
g.Expect(collectors[0].Data.Data).To(ContainSubstring("Failed to retrieve file does-not-exist.log for collection"))
}
func TestVsphereDataCenterConfigCollectors(t *testing.T) {
g := NewGomegaWithT(t)
spec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster = &eksav1alpha1.Cluster{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{},
Spec: eksav1alpha1.ClusterSpec{
ControlPlaneConfiguration: eksav1alpha1.ControlPlaneConfiguration{
Endpoint: &eksav1alpha1.Endpoint{
Host: "1.1.1.1",
},
Taints: []v1.Taint{
{
Key: "test-key",
Value: "test-value",
Effect: "NoSchedule",
},
},
},
DatacenterRef: eksav1alpha1.Ref{
Kind: eksav1alpha1.VSphereDatacenterKind,
Name: "testRef",
},
ExternalEtcdConfiguration: &eksav1alpha1.ExternalEtcdConfiguration{
Count: 3,
MachineGroupRef: &eksav1alpha1.Ref{
Kind: eksav1alpha1.VSphereMachineConfigKind,
Name: "testRef",
},
},
},
Status: eksav1alpha1.ClusterStatus{},
}
})
datacenter := eksav1alpha1.Ref{Kind: eksav1alpha1.VSphereDatacenterKind}
factory := diagnostics.NewDefaultCollectorFactory(test.NewFileReader())
collectors := factory.DataCenterConfigCollectors(datacenter, spec)
g.Expect(collectors).To(HaveLen(11), "DataCenterConfigCollectors() mismatch between number of desired collectors and actual")
g.Expect(collectors[0].Logs.Namespace).To(Equal(constants.CapvSystemNamespace))
g.Expect(collectors[0].Logs.Name).To(Equal(fmt.Sprintf("logs/%s", constants.CapvSystemNamespace)))
for _, collector := range collectors[1:7] {
g.Expect(collector.RunPod.PodSpec.Containers[0].Command).To(Equal([]string{"kubectl"}))
g.Expect(collector.RunPod.Namespace).To(Equal("eksa-diagnostics"))
}
g.Expect(collectors[8].RunPod.PodSpec.Containers[0].Name).To(Equal("check-host-port"))
g.Expect(collectors[9].RunPod.PodSpec.Containers[0].Name).To(Equal("ping-host-ip"))
g.Expect(collectors[10].RunPod.PodSpec.Containers[0].Name).To(Equal("check-cloud-controller"))
}
func TestCloudStackDataCenterConfigCollectors(t *testing.T) {
g := NewGomegaWithT(t)
spec := test.NewClusterSpec(func(s *cluster.Spec) {})
datacenter := eksav1alpha1.Ref{Kind: eksav1alpha1.CloudStackDatacenterKind}
factory := diagnostics.NewDefaultCollectorFactory(test.NewFileReader())
collectors := factory.DataCenterConfigCollectors(datacenter, spec)
g.Expect(collectors).To(HaveLen(10), "DataCenterConfigCollectors() mismatch between number of desired collectors and actual")
g.Expect(collectors[0].Logs.Namespace).To(Equal(constants.CapcSystemNamespace))
g.Expect(collectors[0].Logs.Name).To(Equal(fmt.Sprintf("logs/%s", constants.CapcSystemNamespace)))
for _, collector := range collectors[1:] {
g.Expect([]string{"kubectl"}).To(Equal(collector.RunPod.PodSpec.Containers[0].Command))
g.Expect("eksa-diagnostics").To(Equal(collector.RunPod.Namespace))
}
}
func TestTinkerbellDataCenterConfigCollectors(t *testing.T) {
g := NewGomegaWithT(t)
spec := test.NewClusterSpec(func(s *cluster.Spec) {})
datacenter := eksav1alpha1.Ref{Kind: eksav1alpha1.TinkerbellDatacenterKind}
factory := diagnostics.NewDefaultCollectorFactory(test.NewFileReader())
collectors := factory.DataCenterConfigCollectors(datacenter, spec)
g.Expect(collectors).To(HaveLen(13), "DataCenterConfigCollectors() mismatch between number of desired collectors and actual")
g.Expect(collectors[0].Logs.Namespace).To(Equal(constants.CaptSystemNamespace))
g.Expect(collectors[0].Logs.Name).To(Equal(fmt.Sprintf("logs/%s", constants.CaptSystemNamespace)))
for _, collector := range collectors[1:] {
g.Expect([]string{"kubectl"}).To(Equal(collector.RunPod.PodSpec.Containers[0].Command))
g.Expect("eksa-diagnostics").To(Equal(collector.RunPod.Namespace))
}
}
func TestSnowCollectors(t *testing.T) {
g := NewGomegaWithT(t)
spec := test.NewClusterSpec(func(s *cluster.Spec) {})
datacenter := eksav1alpha1.Ref{Kind: eksav1alpha1.SnowDatacenterKind}
factory := diagnostics.NewDefaultCollectorFactory(test.NewFileReader())
collectors := factory.DataCenterConfigCollectors(datacenter, spec)
g.Expect(collectors).To(HaveLen(6), "DataCenterConfigCollectors() mismatch between number of desired collectors and actual")
g.Expect(collectors[0].Logs.Namespace).To(Equal(constants.CapasSystemNamespace))
g.Expect(collectors[0].Logs.Name).To(Equal(fmt.Sprintf("logs/%s", constants.CapasSystemNamespace)))
for _, collector := range collectors[1:] {
g.Expect([]string{"kubectl"}).To(Equal(collector.RunPod.PodSpec.Containers[0].Command))
g.Expect("eksa-diagnostics").To(Equal(collector.RunPod.Namespace))
}
}
func TestNutanixCollectors(t *testing.T) {
g := NewGomegaWithT(t)
spec := test.NewClusterSpec(func(s *cluster.Spec) {})
datacenter := eksav1alpha1.Ref{Kind: eksav1alpha1.NutanixDatacenterKind}
factory := diagnostics.NewDefaultCollectorFactory(test.NewFileReader())
collectors := factory.DataCenterConfigCollectors(datacenter, spec)
g.Expect(collectors).To(HaveLen(6), "DataCenterConfigCollectors() mismatch between number of desired collectors and actual")
g.Expect(collectors[0].Logs.Namespace).To(Equal(constants.CapxSystemNamespace))
g.Expect(collectors[0].Logs.Name).To(Equal(fmt.Sprintf("logs/%s", constants.CapxSystemNamespace)))
for _, collector := range collectors[1:] {
g.Expect([]string{"kubectl"}).To(Equal(collector.RunPod.PodSpec.Containers[0].Command))
g.Expect("eksa-diagnostics").To(Equal(collector.RunPod.Namespace))
}
}
| 149 |
eks-anywhere | aws | Go | package diagnostics
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type Collect struct {
ClusterInfo *clusterInfo `json:"clusterInfo,omitempty"`
ClusterResources *clusterResources `json:"clusterResources,omitempty"`
Secret *secret `json:"secret,omitempty"`
Logs *logs `json:"logs,omitempty"`
Data *data `json:"data,omitempty"`
CopyFromHost *copyFromHost `json:"copyFromHost,omitempty"`
Exec *exec `json:"exec,omitempty"`
RunPod *runPod `json:"runPod,omitempty"`
}
type clusterResources struct {
collectorMeta `json:",inline"`
}
type secret struct {
collectorMeta `json:",inline"`
SecretName string `json:"name"`
Namespace string `json:"namespace,omitempty"`
Key string `json:"key,omitempty"`
IncludeValue bool `json:"includeValue,omitempty"`
}
type clusterInfo struct {
collectorMeta `json:",inline"`
}
type logLimits struct {
MaxAge string `json:"maxAge,omitempty"`
MaxLines int64 `json:"maxLines,omitempty"`
SinceTime metav1.Time
}
type logs struct {
collectorMeta `json:",inline" yaml:",inline"`
Name string `json:"name,omitempty"`
Selector []string `json:"selector"`
Namespace string `json:"namespace,omitempty"`
ContainerNames []string `json:"containerNames,omitempty"`
Limits *logLimits `json:"limits,omitempty"`
}
type data struct {
Name string `json:"name,omitempty"`
Data string `json:"data,omitempty"`
}
type copyFromHost struct {
collectorMeta `json:",inline"`
Name string `json:"name,omitempty"`
Namespace string `json:"namespace"`
Image string `json:"image"`
ImagePullPolicy string `json:"imagePullPolicy,omitempty"`
ImagePullSecret *imagePullSecrets `json:"imagePullSecret,omitempty"`
Timeout string `json:"timeout,omitempty"`
HostPath string `json:"hostPath"`
ExtractArchive bool `json:"extractArchive,omitempty"`
}
type exec struct {
collectorMeta `json:",inline"`
Name string `json:"name,omitempty"`
Selector []string `json:"selector"`
Namespace string `json:"namespace"`
ContainerName string `json:"containerName,omitempty"`
Command []string `json:"command,omitempty"`
Args []string `json:"args,omitempty"`
Timeout string `json:"timeout,omitempty"`
}
type imagePullSecrets struct {
Name string `json:"name,omitempty"`
Data map[string]string `json:"data,omitempty"`
SecretType string `json:"type,omitempty"`
}
type collectorMeta struct {
CollectorName string `json:"collectorName,omitempty"`
Exclude bool `json:"exclude,omitempty"`
}
type runPod struct {
collectorMeta `json:",inline"`
Name string `json:"name,omitempty"`
Namespace string `json:"namespace"`
PodSpec *v1.PodSpec `json:"podSpec,omitempty"`
Timeout string `json:"timeout,omitempty"`
imagePullSecrets `json:",inline"`
}
| 97 |
eks-anywhere | aws | Go | package diagnostics_test
import (
"bytes"
"context"
"testing"
"time"
"github.com/golang/mock/gomock"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/internal/test"
eksav1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/diagnostics"
supportMocks "github.com/aws/eks-anywhere/pkg/diagnostics/interfaces/mocks"
"github.com/aws/eks-anywhere/pkg/executables"
mockexecutables "github.com/aws/eks-anywhere/pkg/executables/mocks"
"github.com/aws/eks-anywhere/pkg/filewriter/mocks"
"github.com/aws/eks-anywhere/pkg/providers"
providerMocks "github.com/aws/eks-anywhere/pkg/providers/mocks"
)
func TestParseTimeOptions(t *testing.T) {
type args struct {
since string
sinceTime string
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "Without time options",
args: args{
since: "",
sinceTime: "",
},
wantErr: false,
},
{
name: "Good since options",
args: args{
since: "8h",
sinceTime: "",
},
wantErr: false,
},
{
name: "Good since time options",
args: args{
since: "",
sinceTime: "2021-06-28T15:04:05Z",
},
wantErr: false,
},
{
name: "Duplicate time options",
args: args{
since: "8m",
sinceTime: "2021-06-28T15:04:05Z",
},
wantErr: true,
},
{
name: "Wrong since time options",
args: args{
since: "",
sinceTime: "2021-06-28T15:04:05Z07:00",
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, err := diagnostics.ParseTimeOptions(tt.args.since, tt.args.sinceTime)
if (err != nil) != tt.wantErr {
t.Errorf("ParseTimeOptions() error = %v, wantErr %v", err, tt.wantErr)
return
}
})
}
}
func TestGenerateBundleConfigWithExternalEtcd(t *testing.T) {
spec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster = &eksav1alpha1.Cluster{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{},
Spec: eksav1alpha1.ClusterSpec{
ControlPlaneConfiguration: eksav1alpha1.ControlPlaneConfiguration{
Endpoint: &eksav1alpha1.Endpoint{
Host: "1.1.1.1",
},
},
DatacenterRef: eksav1alpha1.Ref{
Kind: eksav1alpha1.VSphereDatacenterKind,
Name: "testRef",
},
ExternalEtcdConfiguration: &eksav1alpha1.ExternalEtcdConfiguration{
Count: 3,
MachineGroupRef: &eksav1alpha1.Ref{
Kind: eksav1alpha1.VSphereMachineConfigKind,
Name: "testRef",
},
},
},
Status: eksav1alpha1.ClusterStatus{},
}
})
t.Run(t.Name(), func(t *testing.T) {
p := givenProvider(t)
p.EXPECT().MachineConfigs(spec).Return(machineConfigs())
a := givenMockAnalyzerFactory(t)
a.EXPECT().EksaExternalEtcdAnalyzers().Return(nil)
a.EXPECT().DataCenterConfigAnalyzers(spec.Cluster.Spec.DatacenterRef).Return(nil)
a.EXPECT().DefaultAnalyzers().Return(nil)
a.EXPECT().EksaLogTextAnalyzers(gomock.Any()).Return(nil)
a.EXPECT().ManagementClusterAnalyzers().Return(nil)
a.EXPECT().PackageAnalyzers().Return(nil)
c := givenMockCollectorsFactory(t)
c.EXPECT().DefaultCollectors().Return(nil)
c.EXPECT().EksaHostCollectors(gomock.Any()).Return(nil)
c.EXPECT().ManagementClusterCollectors().Return(nil)
c.EXPECT().DataCenterConfigCollectors(spec.Cluster.Spec.DatacenterRef, spec).Return(nil)
c.EXPECT().PackagesCollectors().Return(nil)
c.EXPECT().FileCollectors(gomock.Any()).Return(nil)
w := givenWriter(t)
w.EXPECT().Write(gomock.Any(), gomock.Any())
opts := diagnostics.EksaDiagnosticBundleFactoryOpts{
AnalyzerFactory: a,
CollectorFactory: c,
Writer: w,
}
f := diagnostics.NewFactory(opts)
_, _ = f.DiagnosticBundleWorkloadCluster(spec, p, "")
})
}
func TestGenerateBundleConfigWithOidc(t *testing.T) {
spec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster = &eksav1alpha1.Cluster{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{},
Spec: eksav1alpha1.ClusterSpec{
ControlPlaneConfiguration: eksav1alpha1.ControlPlaneConfiguration{
Endpoint: &eksav1alpha1.Endpoint{
Host: "1.1.1.1",
},
},
DatacenterRef: eksav1alpha1.Ref{
Kind: eksav1alpha1.VSphereDatacenterKind,
Name: "testRef",
},
},
Status: eksav1alpha1.ClusterStatus{},
}
s.OIDCConfig = &eksav1alpha1.OIDCConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{},
Spec: eksav1alpha1.OIDCConfigSpec{},
Status: eksav1alpha1.OIDCConfigStatus{},
}
})
t.Run(t.Name(), func(t *testing.T) {
p := givenProvider(t)
p.EXPECT().MachineConfigs(spec).Return(machineConfigs())
a := givenMockAnalyzerFactory(t)
a.EXPECT().EksaOidcAnalyzers().Return(nil)
a.EXPECT().DataCenterConfigAnalyzers(spec.Cluster.Spec.DatacenterRef).Return(nil)
a.EXPECT().DefaultAnalyzers().Return(nil)
a.EXPECT().EksaLogTextAnalyzers(gomock.Any()).Return(nil)
a.EXPECT().ManagementClusterAnalyzers().Return(nil)
a.EXPECT().PackageAnalyzers().Return(nil)
w := givenWriter(t)
w.EXPECT().Write(gomock.Any(), gomock.Any())
c := givenMockCollectorsFactory(t)
c.EXPECT().DefaultCollectors().Return(nil)
c.EXPECT().EksaHostCollectors(gomock.Any()).Return(nil)
c.EXPECT().ManagementClusterCollectors().Return(nil)
c.EXPECT().DataCenterConfigCollectors(spec.Cluster.Spec.DatacenterRef, spec).Return(nil)
c.EXPECT().PackagesCollectors().Return(nil)
c.EXPECT().FileCollectors(gomock.Any()).Return(nil)
opts := diagnostics.EksaDiagnosticBundleFactoryOpts{
AnalyzerFactory: a,
CollectorFactory: c,
Writer: w,
}
f := diagnostics.NewFactory(opts)
_, _ = f.DiagnosticBundleWorkloadCluster(spec, p, "")
})
}
func TestGenerateBundleConfigWithGitOps(t *testing.T) {
spec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster = &eksav1alpha1.Cluster{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{},
Spec: eksav1alpha1.ClusterSpec{
ControlPlaneConfiguration: eksav1alpha1.ControlPlaneConfiguration{
Endpoint: &eksav1alpha1.Endpoint{
Host: "1.1.1.1",
},
},
DatacenterRef: eksav1alpha1.Ref{
Kind: eksav1alpha1.VSphereDatacenterKind,
Name: "testRef",
},
},
Status: eksav1alpha1.ClusterStatus{},
}
s.GitOpsConfig = &eksav1alpha1.GitOpsConfig{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{},
Spec: eksav1alpha1.GitOpsConfigSpec{},
Status: eksav1alpha1.GitOpsConfigStatus{},
}
})
t.Run(t.Name(), func(t *testing.T) {
p := givenProvider(t)
p.EXPECT().MachineConfigs(spec).Return(machineConfigs())
a := givenMockAnalyzerFactory(t)
a.EXPECT().EksaGitopsAnalyzers().Return(nil)
a.EXPECT().DataCenterConfigAnalyzers(spec.Cluster.Spec.DatacenterRef).Return(nil)
a.EXPECT().DefaultAnalyzers().Return(nil)
a.EXPECT().EksaLogTextAnalyzers(gomock.Any()).Return(nil)
a.EXPECT().ManagementClusterAnalyzers().Return(nil)
a.EXPECT().PackageAnalyzers().Return(nil)
w := givenWriter(t)
w.EXPECT().Write(gomock.Any(), gomock.Any())
c := givenMockCollectorsFactory(t)
c.EXPECT().DefaultCollectors().Return(nil)
c.EXPECT().EksaHostCollectors(gomock.Any()).Return(nil)
c.EXPECT().ManagementClusterCollectors().Return(nil)
c.EXPECT().DataCenterConfigCollectors(spec.Cluster.Spec.DatacenterRef, spec).Return(nil)
c.EXPECT().PackagesCollectors().Return(nil)
c.EXPECT().FileCollectors(gomock.Any()).Return(nil)
opts := diagnostics.EksaDiagnosticBundleFactoryOpts{
AnalyzerFactory: a,
CollectorFactory: c,
Writer: w,
}
f := diagnostics.NewFactory(opts)
_, _ = f.DiagnosticBundleWorkloadCluster(spec, p, "")
})
}
func TestGenerateDefaultBundle(t *testing.T) {
t.Run(t.Name(), func(t *testing.T) {
a := givenMockAnalyzerFactory(t)
a.EXPECT().DefaultAnalyzers().Return(nil)
a.EXPECT().ManagementClusterAnalyzers().Return(nil)
c := givenMockCollectorsFactory(t)
c.EXPECT().DefaultCollectors().Return(nil)
c.EXPECT().ManagementClusterCollectors().Return(nil)
w := givenWriter(t)
opts := diagnostics.EksaDiagnosticBundleFactoryOpts{
AnalyzerFactory: a,
CollectorFactory: c,
Writer: w,
}
f := diagnostics.NewFactory(opts)
_ = f.DiagnosticBundleDefault()
})
}
func TestBundleFromSpecComplete(t *testing.T) {
spec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster = &eksav1alpha1.Cluster{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{},
Spec: eksav1alpha1.ClusterSpec{
ControlPlaneConfiguration: eksav1alpha1.ControlPlaneConfiguration{
Endpoint: &eksav1alpha1.Endpoint{
Host: "1.1.1.1",
},
},
DatacenterRef: eksav1alpha1.Ref{
Kind: eksav1alpha1.VSphereDatacenterKind,
Name: "testRef",
},
ExternalEtcdConfiguration: &eksav1alpha1.ExternalEtcdConfiguration{
Count: 3,
MachineGroupRef: &eksav1alpha1.Ref{
Kind: eksav1alpha1.VSphereMachineConfigKind,
Name: "testRef",
},
},
},
Status: eksav1alpha1.ClusterStatus{},
}
})
t.Run(t.Name(), func(t *testing.T) {
ctx := context.Background()
kubeconfig := "testcluster.kubeconfig"
p := givenProvider(t)
p.EXPECT().MachineConfigs(spec).Return(machineConfigs())
a := givenMockAnalyzerFactory(t)
a.EXPECT().EksaExternalEtcdAnalyzers().Return(nil)
a.EXPECT().DataCenterConfigAnalyzers(spec.Cluster.Spec.DatacenterRef).Return(nil)
a.EXPECT().DefaultAnalyzers().Return(nil)
a.EXPECT().EksaLogTextAnalyzers(gomock.Any()).Return(nil)
a.EXPECT().ManagementClusterAnalyzers().Return(nil)
a.EXPECT().PackageAnalyzers().Return(nil)
c := givenMockCollectorsFactory(t)
c.EXPECT().DefaultCollectors().Return(nil)
c.EXPECT().EksaHostCollectors(gomock.Any()).Return(nil)
c.EXPECT().ManagementClusterCollectors().Return(nil)
c.EXPECT().DataCenterConfigCollectors(spec.Cluster.Spec.DatacenterRef, spec).Return(nil)
c.EXPECT().PackagesCollectors().Return(nil)
c.EXPECT().FileCollectors(gomock.Any()).Return(nil)
w := givenWriter(t)
w.EXPECT().Write(gomock.Any(), gomock.Any()).Times(2)
k, e := givenKubectl(t)
expectedParam := []string{"create", "namespace", constants.EksaDiagnosticsNamespace, "--kubeconfig", kubeconfig}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, nil)
expectedParam = []string{"delete", "namespace", constants.EksaDiagnosticsNamespace, "--kubeconfig", kubeconfig}
e.EXPECT().Execute(ctx, gomock.Eq(expectedParam)).Return(bytes.Buffer{}, nil)
expectedParam = []string{"apply", "-f", "-", "--kubeconfig", kubeconfig}
e.EXPECT().ExecuteWithStdin(ctx, gomock.Any(), gomock.Eq(expectedParam)).Return(bytes.Buffer{}, nil)
expectedParam = []string{"delete", "-f", "-", "--kubeconfig", kubeconfig}
e.EXPECT().ExecuteWithStdin(ctx, gomock.Any(), gomock.Eq(expectedParam)).Return(bytes.Buffer{}, nil)
returnAnalysis := []*executables.SupportBundleAnalysis{
{
Title: "itsATestYo",
IsPass: true,
IsFail: false,
IsWarn: false,
Message: "",
Uri: "",
},
}
tc := givenTroubleshootClient(t)
mockArchivePath := "/tmp/archive/path"
tc.EXPECT().Collect(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(mockArchivePath, nil)
tc.EXPECT().Analyze(ctx, gomock.Any(), mockArchivePath).Return(returnAnalysis, nil)
opts := diagnostics.EksaDiagnosticBundleFactoryOpts{
AnalyzerFactory: a,
CollectorFactory: c,
Writer: w,
Kubectl: k,
Client: tc,
}
var sinceTimeValue *time.Time
sinceTimeValue, err := diagnostics.ParseTimeOptions("1h", "")
if err != nil {
t.Errorf("ParseTimeOptions() error = %v, wantErr nil", err)
return
}
f := diagnostics.NewFactory(opts)
b, _ := f.DiagnosticBundleWorkloadCluster(spec, p, kubeconfig)
err = b.CollectAndAnalyze(ctx, sinceTimeValue)
if err != nil {
t.Errorf("CollectAndAnalyze() error = %v, wantErr nil", err)
return
}
})
}
func TestGenerateCustomBundle(t *testing.T) {
t.Run(t.Name(), func(t *testing.T) {
f := diagnostics.NewFactory(getOpts(t))
_ = f.DiagnosticBundleCustom("", "")
})
}
func givenMockAnalyzerFactory(t *testing.T) *supportMocks.MockAnalyzerFactory {
ctrl := gomock.NewController(t)
return supportMocks.NewMockAnalyzerFactory(ctrl)
}
func givenMockCollectorsFactory(t *testing.T) *supportMocks.MockCollectorFactory {
ctrl := gomock.NewController(t)
return supportMocks.NewMockCollectorFactory(ctrl)
}
func getOpts(t *testing.T) diagnostics.EksaDiagnosticBundleFactoryOpts {
return diagnostics.EksaDiagnosticBundleFactoryOpts{
AnalyzerFactory: givenMockAnalyzerFactory(t),
CollectorFactory: givenMockCollectorsFactory(t),
}
}
func givenKubectl(t *testing.T) (*executables.Kubectl, *mockexecutables.MockExecutable) {
ctrl := gomock.NewController(t)
executable := mockexecutables.NewMockExecutable(ctrl)
return executables.NewKubectl(executable), executable
}
func givenTroubleshootClient(t *testing.T) *supportMocks.MockBundleClient {
ctrl := gomock.NewController(t)
return supportMocks.NewMockBundleClient(ctrl)
}
func givenWriter(t *testing.T) *mocks.MockFileWriter {
ctrl := gomock.NewController(t)
return mocks.NewMockFileWriter(ctrl)
}
func givenProvider(t *testing.T) *providerMocks.MockProvider {
ctrl := gomock.NewController(t)
return providerMocks.NewMockProvider(ctrl)
}
func machineConfigs() []providers.MachineConfig {
var m []providers.MachineConfig
return m
}
func TestGenerateManagementClusterBundleVsphereProvider(t *testing.T) {
kubeconfig := "testcluster.kubeconfig"
spec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster = &eksav1alpha1.Cluster{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: "bootstrap-cluster",
},
Spec: eksav1alpha1.ClusterSpec{
ControlPlaneConfiguration: eksav1alpha1.ControlPlaneConfiguration{
Endpoint: &eksav1alpha1.Endpoint{
Host: "1.1.1.1",
},
},
DatacenterRef: eksav1alpha1.Ref{
Kind: "VSphereDatacenterConfig",
},
},
Status: eksav1alpha1.ClusterStatus{},
}
})
t.Run(t.Name(), func(t *testing.T) {
a := givenMockAnalyzerFactory(t)
a.EXPECT().DefaultAnalyzers().Return(nil)
a.EXPECT().ManagementClusterAnalyzers().Return(nil)
a.EXPECT().DataCenterConfigAnalyzers(spec.Cluster.Spec.DatacenterRef).Return(nil)
a.EXPECT().EksaLogTextAnalyzers(gomock.Any()).Return(nil)
c := givenMockCollectorsFactory(t)
c.EXPECT().DefaultCollectors().Return(nil)
c.EXPECT().ManagementClusterCollectors().Return(nil)
c.EXPECT().DataCenterConfigCollectors(spec.Cluster.Spec.DatacenterRef, spec).Return(nil)
c.EXPECT().FileCollectors(gomock.Any()).Return(nil)
w := givenWriter(t)
w.EXPECT().Write(gomock.Any(), gomock.Any()).Times(2)
opts := diagnostics.EksaDiagnosticBundleFactoryOpts{
AnalyzerFactory: a,
CollectorFactory: c,
Writer: w,
}
f := diagnostics.NewFactory(opts)
bootstrapBundle, _ := f.DiagnosticBundleManagementCluster(spec, kubeconfig)
err := bootstrapBundle.WriteBundleConfig()
if err != nil {
t.Errorf("WriteBundleConfig() error = %v, wantErr nil", err)
return
}
})
}
func TestGenerateManagementClusterBundleDockerProvider(t *testing.T) {
kubeconfig := "testcluster.kubeconfig"
spec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster = &eksav1alpha1.Cluster{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: "bootstrap-cluster",
},
Spec: eksav1alpha1.ClusterSpec{
DatacenterRef: eksav1alpha1.Ref{
Kind: "DockerDatacenterConfig",
},
},
Status: eksav1alpha1.ClusterStatus{},
}
})
t.Run(t.Name(), func(t *testing.T) {
a := givenMockAnalyzerFactory(t)
a.EXPECT().DefaultAnalyzers().Return(nil)
a.EXPECT().ManagementClusterAnalyzers().Return(nil)
a.EXPECT().DataCenterConfigAnalyzers(spec.Cluster.Spec.DatacenterRef).Return(nil)
a.EXPECT().EksaLogTextAnalyzers(gomock.Any()).Return(nil)
c := givenMockCollectorsFactory(t)
c.EXPECT().DefaultCollectors().Return(nil)
c.EXPECT().ManagementClusterCollectors().Return(nil)
c.EXPECT().DataCenterConfigCollectors(spec.Cluster.Spec.DatacenterRef, spec).Return(nil)
c.EXPECT().FileCollectors(gomock.Any()).Return(nil)
w := givenWriter(t)
w.EXPECT().Write(gomock.Any(), gomock.Any()).Times(2)
opts := diagnostics.EksaDiagnosticBundleFactoryOpts{
AnalyzerFactory: a,
CollectorFactory: c,
Writer: w,
}
f := diagnostics.NewFactory(opts)
bootstrapBundle, _ := f.DiagnosticBundleManagementCluster(spec, kubeconfig)
err := bootstrapBundle.WriteBundleConfig()
if err != nil {
t.Errorf("WriteBundleConfig() error = %v, wantErr nil", err)
return
}
})
}
| 549 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.