repo_name
stringlengths
1
52
repo_creator
stringclasses
6 values
programming_language
stringclasses
4 values
code
stringlengths
0
9.68M
num_lines
int64
1
234k
eks-anywhere
aws
Go
package cloudstack import ( "context" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/api/equality" cloudstackv1 "sigs.k8s.io/cluster-api-provider-cloudstack/api/v1beta2" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" ) // GetMachineTemplate gets a CloudStackMachineTemplate object using the provided client // If the object doesn't exist, it returns a NotFound error. func GetMachineTemplate(ctx context.Context, client kubernetes.Client, name, namespace string) (*cloudstackv1.CloudStackMachineTemplate, error) { m := &cloudstackv1.CloudStackMachineTemplate{} if err := client.Get(ctx, name, namespace, m); err != nil { return nil, errors.Wrap(err, "reading cloudstackMachineTemplate") } return m, nil } // machineTemplateEqual returns a boolean indicating whether the provided CloudStackMachineTemplates are equal. func machineTemplateEqual(new, old *cloudstackv1.CloudStackMachineTemplate) bool { // Compare new -> old and old -> new because DeepDerivative ignores fields in the first param // that are default values. This is important for cases where an optional field is removed // from the spec. return equality.Semantic.DeepDerivative(new.Spec, old.Spec) && equality.Semantic.DeepDerivative(old.Spec, new.Spec) }
32
eks-anywhere
aws
Go
package cloudstack import ( "fmt" "net" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/networkutils" ) func etcdMachineConfig(s *cluster.Spec) *anywherev1.CloudStackMachineConfig { if s.Cluster.Spec.ExternalEtcdConfiguration == nil || s.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef == nil { return nil } return s.CloudStackMachineConfigs[s.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name] } func controlPlaneMachineConfig(s *cluster.Spec) *anywherev1.CloudStackMachineConfig { return s.CloudStackMachineConfigs[s.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name] } func workerMachineConfig(s *cluster.Spec, workers anywherev1.WorkerNodeGroupConfiguration) *anywherev1.CloudStackMachineConfig { return s.CloudStackMachineConfigs[workers.MachineGroupRef.Name] } func controlPlaneEndpointHost(clusterSpec *cluster.Spec) (string, error) { host, port, err := getValidControlPlaneHostPort(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host) if err != nil { return "", err } return net.JoinHostPort(host, port), nil } func getValidControlPlaneHostPort(pHost string) (string, string, error) { host, port, err := anywherev1.GetControlPlaneHostPort(pHost, anywherev1.ControlEndpointDefaultPort) if err != nil { return "", "", err } if !networkutils.IsPortValid(port) { return "", "", fmt.Errorf("host %s has an invalid port", pHost) } return host, port, nil }
46
eks-anywhere
aws
Go
package cloudstack import ( "fmt" "net" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/clusterapi" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/crypto" "github.com/aws/eks-anywhere/pkg/features" "github.com/aws/eks-anywhere/pkg/providers" "github.com/aws/eks-anywhere/pkg/providers/common" "github.com/aws/eks-anywhere/pkg/registrymirror" "github.com/aws/eks-anywhere/pkg/registrymirror/containerd" "github.com/aws/eks-anywhere/pkg/templater" "github.com/aws/eks-anywhere/pkg/types" ) // TemplateBuilder is responsible for building the CAPI templates. type TemplateBuilder struct { now types.NowFunc } // NewTemplateBuilder creates a new TemplateBuilder. func NewTemplateBuilder(now types.NowFunc) *TemplateBuilder { return &TemplateBuilder{ now: now, } } // GenerateCAPISpecControlPlane builds the CAPI controlplane template containing the CAPI objects for the control plane configuration defined in the cluster.Spec. func (cs *TemplateBuilder) GenerateCAPISpecControlPlane(clusterSpec *cluster.Spec, buildOptions ...providers.BuildMapOption) (content []byte, err error) { if clusterSpec.CloudStackDatacenter == nil { return nil, fmt.Errorf("provided clusterSpec CloudStackDatacenter is nil. Unable to generate CAPI spec control plane") } var etcdMachineSpec v1alpha1.CloudStackMachineConfigSpec if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil { etcdMachineSpec = etcdMachineConfig(clusterSpec).Spec } values, err := buildTemplateMapCP(clusterSpec) if err != nil { return nil, fmt.Errorf("error building template map from CP %v", err) } for _, buildOption := range buildOptions { buildOption(values) } bytes, err := buildControlPlaneTemplate(&controlPlaneMachineConfig(clusterSpec).Spec, values) if err != nil { return nil, err } if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil { etcdMachineTemplateBytes, err := buildEtcdTemplate(&etcdMachineSpec, values) if err != nil { return nil, fmt.Errorf("marshalling etcd machine template to byte array: %v", err) } bytes = append(bytes, etcdMachineTemplateBytes...) } return bytes, nil } // GenerateCAPISpecWorkers builds the CAPI worker template containing the CAPI objects for the worker node groups configuration defined in the cluster.Spec. func (cs *TemplateBuilder) GenerateCAPISpecWorkers(clusterSpec *cluster.Spec, workloadTemplateNames, kubeadmconfigTemplateNames map[string]string) (content []byte, err error) { workerSpecs := make([][]byte, 0, len(clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations)) for _, workerNodeGroupConfiguration := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations { values, err := buildTemplateMapMD(clusterSpec, workerNodeGroupConfiguration) if err != nil { return nil, fmt.Errorf("building template map for MD %v", err) } values["workloadTemplateName"] = workloadTemplateNames[workerNodeGroupConfiguration.Name] values["workloadkubeadmconfigTemplateName"] = kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name] values["autoscalingConfig"] = workerNodeGroupConfiguration.AutoScalingConfiguration // TODO: Extract out worker MachineDeployments from templates to use apibuilder instead bytes, err := templater.Execute(defaultClusterConfigMD, values) if err != nil { return nil, err } workerSpecs = append(workerSpecs, bytes) workerMachineTemplateName := workloadTemplateNames[workerNodeGroupConfiguration.Name] workerMachineTemplate := MachineTemplate(workerMachineTemplateName, &workerMachineConfig(clusterSpec, workerNodeGroupConfiguration).Spec) workerMachineTemplateBytes, err := templater.ObjectsToYaml(workerMachineTemplate) if err != nil { return nil, fmt.Errorf("marshalling worker machine template to byte array: %v", err) } workerSpecs = append(workerSpecs, workerMachineTemplateBytes) } return templater.AppendYamlResources(workerSpecs...), nil } // nolint:gocyclo func buildTemplateMapCP(clusterSpec *cluster.Spec) (map[string]interface{}, error) { datacenterConfigSpec := clusterSpec.CloudStackDatacenter.Spec bundle := clusterSpec.VersionsBundle format := "cloud-config" host, port, err := getValidControlPlaneHostPort(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host) if err != nil { return nil, err } etcdExtraArgs := clusterapi.SecureEtcdTlsCipherSuitesExtraArgs() sharedExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs() kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)). Append(clusterapi.ControlPlaneNodeLabelsExtraArgs(clusterSpec.Cluster.Spec.ControlPlaneConfiguration)) apiServerExtraArgs := clusterapi.OIDCToExtraArgs(clusterSpec.OIDCConfig). Append(clusterapi.AwsIamAuthExtraArgs(clusterSpec.AWSIamConfig)). Append(clusterapi.PodIAMAuthExtraArgs(clusterSpec.Cluster.Spec.PodIAMConfig)). Append(sharedExtraArgs) controllerManagerExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). Append(clusterapi.NodeCIDRMaskExtraArgs(&clusterSpec.Cluster.Spec.ClusterNetwork)) controlPlaneMachineSpec := controlPlaneMachineConfig(clusterSpec).Spec controlPlaneSSHKey, err := common.StripSshAuthorizedKeyComment(controlPlaneMachineSpec.Users[0].SshAuthorizedKeys[0]) if err != nil { return nil, fmt.Errorf("formatting ssh key for cloudstack control plane template: %v", err) } var etcdMachineSpec v1alpha1.CloudStackMachineConfigSpec var etcdSSHAuthorizedKey string if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil { etcdMachineSpec = etcdMachineConfig(clusterSpec).Spec etcdSSHAuthorizedKey, err = common.StripSshAuthorizedKeyComment(etcdMachineSpec.Users[0].SshAuthorizedKeys[0]) if err != nil { return nil, fmt.Errorf("formatting ssh key for cloudstack etcd template: %v", err) } } values := map[string]interface{}{ "clusterName": clusterSpec.Cluster.Name, "controlPlaneEndpointHost": host, "controlPlaneEndpointPort": port, "controlPlaneReplicas": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Count, "kubernetesRepository": bundle.KubeDistro.Kubernetes.Repository, "kubernetesVersion": bundle.KubeDistro.Kubernetes.Tag, "etcdRepository": bundle.KubeDistro.Etcd.Repository, "etcdImageTag": bundle.KubeDistro.Etcd.Tag, "corednsRepository": bundle.KubeDistro.CoreDNS.Repository, "corednsVersion": bundle.KubeDistro.CoreDNS.Tag, "nodeDriverRegistrarImage": bundle.KubeDistro.NodeDriverRegistrar.VersionedImage(), "livenessProbeImage": bundle.KubeDistro.LivenessProbe.VersionedImage(), "externalAttacherImage": bundle.KubeDistro.ExternalAttacher.VersionedImage(), "externalProvisionerImage": bundle.KubeDistro.ExternalProvisioner.VersionedImage(), "managerImage": bundle.CloudStack.ClusterAPIController.VersionedImage(), "kubeRbacProxyImage": bundle.CloudStack.KubeRbacProxy.VersionedImage(), "kubeVipImage": bundle.CloudStack.KubeVip.VersionedImage(), "cloudstackKubeVip": !features.IsActive(features.CloudStackKubeVipDisabled()), "cloudstackAvailabilityZones": datacenterConfigSpec.AvailabilityZones, "cloudstackAnnotationSuffix": constants.CloudstackAnnotationSuffix, "cloudstackControlPlaneComputeOfferingId": controlPlaneMachineSpec.ComputeOffering.Id, "cloudstackControlPlaneComputeOfferingName": controlPlaneMachineSpec.ComputeOffering.Name, "cloudstackControlPlaneTemplateOfferingId": controlPlaneMachineSpec.Template.Id, "cloudstackControlPlaneTemplateOfferingName": controlPlaneMachineSpec.Template.Name, "cloudstackControlPlaneCustomDetails": controlPlaneMachineSpec.UserCustomDetails, "cloudstackControlPlaneSymlinks": controlPlaneMachineSpec.Symlinks, "cloudstackControlPlaneAffinity": controlPlaneMachineSpec.Affinity, "cloudstackControlPlaneAffinityGroupIds": controlPlaneMachineSpec.AffinityGroupIds, "cloudstackEtcdComputeOfferingId": etcdMachineSpec.ComputeOffering.Id, "cloudstackEtcdComputeOfferingName": etcdMachineSpec.ComputeOffering.Name, "cloudstackEtcdTemplateOfferingId": etcdMachineSpec.Template.Id, "cloudstackEtcdTemplateOfferingName": etcdMachineSpec.Template.Name, "cloudstackEtcdCustomDetails": etcdMachineSpec.UserCustomDetails, "cloudstackEtcdSymlinks": etcdMachineSpec.Symlinks, "cloudstackEtcdAffinity": etcdMachineSpec.Affinity, "cloudstackEtcdAffinityGroupIds": etcdMachineSpec.AffinityGroupIds, "controlPlaneSshUsername": controlPlaneMachineSpec.Users[0].Name, "cloudstackControlPlaneSshAuthorizedKey": controlPlaneSSHKey, "cloudstackEtcdSshAuthorizedKey": etcdSSHAuthorizedKey, "podCidrs": clusterSpec.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks, "serviceCidrs": clusterSpec.Cluster.Spec.ClusterNetwork.Services.CidrBlocks, "apiserverExtraArgs": apiServerExtraArgs.ToPartialYaml(), "kubeletExtraArgs": kubeletExtraArgs.ToPartialYaml(), "etcdExtraArgs": etcdExtraArgs.ToPartialYaml(), "etcdCipherSuites": crypto.SecureCipherSuitesString(), "controllermanagerExtraArgs": controllerManagerExtraArgs.ToPartialYaml(), "schedulerExtraArgs": sharedExtraArgs.ToPartialYaml(), "format": format, "externalEtcdVersion": bundle.KubeDistro.EtcdVersion, "etcdImage": bundle.KubeDistro.EtcdImage.VersionedImage(), "eksaSystemNamespace": constants.EksaSystemNamespace, } auditPolicy, err := common.GetAuditPolicy(clusterSpec.Cluster.Spec.KubernetesVersion) if err != nil { return nil, err } values["auditPolicy"] = auditPolicy fillDiskOffering(values, controlPlaneMachineSpec.DiskOffering, "ControlPlane") fillDiskOffering(values, etcdMachineSpec.DiskOffering, "Etcd") values["cloudstackControlPlaneAnnotations"] = values["cloudstackControlPlaneDiskOfferingProvided"].(bool) || len(controlPlaneMachineSpec.Symlinks) > 0 values["cloudstackEtcdAnnotations"] = values["cloudstackEtcdDiskOfferingProvided"].(bool) || len(etcdMachineSpec.Symlinks) > 0 if clusterSpec.Cluster.Spec.RegistryMirrorConfiguration != nil { registryMirror := registrymirror.FromCluster(clusterSpec.Cluster) values["registryMirrorMap"] = containerd.ToAPIEndpoints(registryMirror.NamespacedRegistryMap) values["mirrorBase"] = registryMirror.BaseRegistry values["insecureSkip"] = registryMirror.InsecureSkipVerify values["publicMirror"] = containerd.ToAPIEndpoint(registryMirror.CoreEKSAMirror()) if len(registryMirror.CACertContent) > 0 { values["registryCACert"] = registryMirror.CACertContent } } if clusterSpec.Cluster.Spec.ProxyConfiguration != nil { fillProxyConfigurations(values, clusterSpec, net.JoinHostPort(host, port)) } if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil { values["externalEtcd"] = true values["externalEtcdReplicas"] = clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.Count values["etcdSshUsername"] = etcdMachineSpec.Users[0].Name } if len(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Taints) > 0 { values["controlPlaneTaints"] = clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Taints } if clusterSpec.AWSIamConfig != nil { values["awsIamAuth"] = true } if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy != nil { values["upgradeRolloutStrategy"] = true values["maxSurge"] = clusterSpec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy.RollingUpdate.MaxSurge } return values, nil } func buildControlPlaneTemplate(machineSpec *v1alpha1.CloudStackMachineConfigSpec, values map[string]interface{}) (content []byte, err error) { templateName, ok := values[cpTemplateNameKey] if !ok { return nil, fmt.Errorf("unable to determine control plane template name") } machineTemplate := MachineTemplate(fmt.Sprintf("%s", templateName), machineSpec) templateBytes, err := templater.ObjectsToYaml(machineTemplate) if err != nil { return nil, fmt.Errorf("marshalling control plane machine template to byte array: %v", err) } bytes, err := templater.Execute(defaultCAPIConfigCP, values) if err != nil { return nil, err } bytes = append(bytes, templateBytes...) return bytes, nil } func buildEtcdTemplate(machineSpec *v1alpha1.CloudStackMachineConfigSpec, values map[string]interface{}) (content []byte, err error) { machineTemplateName, ok := values[etcdTemplateNameKey] if !ok { return nil, fmt.Errorf("unable to determine etcd template name") } machineTemplate := MachineTemplate(fmt.Sprintf("%s", machineTemplateName), machineSpec) machineTemplateBytes, err := templater.ObjectsToYaml(machineTemplate) if err != nil { return nil, fmt.Errorf("marshalling etcd machine template to byte array: %v", err) } return machineTemplateBytes, nil } func fillDiskOffering(values map[string]interface{}, diskOffering *v1alpha1.CloudStackResourceDiskOffering, machineType string) { if diskOffering != nil { values[fmt.Sprintf("cloudstack%sDiskOfferingProvided", machineType)] = len(diskOffering.Id) > 0 || len(diskOffering.Name) > 0 values[fmt.Sprintf("cloudstack%sDiskOfferingId", machineType)] = diskOffering.Id values[fmt.Sprintf("cloudstack%sDiskOfferingName", machineType)] = diskOffering.Name values[fmt.Sprintf("cloudstack%sDiskOfferingCustomSize", machineType)] = diskOffering.CustomSize values[fmt.Sprintf("cloudstack%sDiskOfferingPath", machineType)] = diskOffering.MountPath values[fmt.Sprintf("cloudstack%sDiskOfferingDevice", machineType)] = diskOffering.Device values[fmt.Sprintf("cloudstack%sDiskOfferingFilesystem", machineType)] = diskOffering.Filesystem values[fmt.Sprintf("cloudstack%sDiskOfferingLabel", machineType)] = diskOffering.Label } else { values[fmt.Sprintf("cloudstack%sDiskOfferingProvided", machineType)] = false } } func fillProxyConfigurations(values map[string]interface{}, clusterSpec *cluster.Spec, controlPlaneEndpoint string) { datacenterConfigSpec := clusterSpec.CloudStackDatacenter.Spec values["proxyConfig"] = true capacity := len(clusterSpec.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks) + len(clusterSpec.Cluster.Spec.ClusterNetwork.Services.CidrBlocks) + len(clusterSpec.Cluster.Spec.ProxyConfiguration.NoProxy) + 4 noProxyList := make([]string, 0, capacity) noProxyList = append(noProxyList, clusterSpec.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks...) noProxyList = append(noProxyList, clusterSpec.Cluster.Spec.ClusterNetwork.Services.CidrBlocks...) noProxyList = append(noProxyList, clusterSpec.Cluster.Spec.ProxyConfiguration.NoProxy...) noProxyList = append(noProxyList, clusterapi.NoProxyDefaults()...) for _, az := range datacenterConfigSpec.AvailabilityZones { if cloudStackManagementAPIEndpointHostname, err := v1alpha1.GetCloudStackManagementAPIEndpointHostname(az); err == nil { noProxyList = append(noProxyList, cloudStackManagementAPIEndpointHostname) } } noProxyList = append(noProxyList, controlPlaneEndpoint) values["httpProxy"] = clusterSpec.Cluster.Spec.ProxyConfiguration.HttpProxy values["httpsProxy"] = clusterSpec.Cluster.Spec.ProxyConfiguration.HttpsProxy values["noProxy"] = noProxyList } func buildTemplateMapMD(clusterSpec *cluster.Spec, workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration) (map[string]interface{}, error) { bundle := clusterSpec.VersionsBundle format := "cloud-config" kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). Append(clusterapi.WorkerNodeLabelsExtraArgs(workerNodeGroupConfiguration)). Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)) workerNodeGroupMachineSpec := workerMachineConfig(clusterSpec, workerNodeGroupConfiguration).Spec workerUser := workerNodeGroupMachineSpec.Users[0] workerSSHKey, err := common.StripSshAuthorizedKeyComment(workerUser.SshAuthorizedKeys[0]) if err != nil { return nil, fmt.Errorf("formatting ssh key for cloudstack worker template: %v", err) } values := map[string]interface{}{ "clusterName": clusterSpec.Cluster.Name, "kubernetesVersion": bundle.KubeDistro.Kubernetes.Tag, "cloudstackAnnotationSuffix": constants.CloudstackAnnotationSuffix, "cloudstackTemplateId": workerNodeGroupMachineSpec.Template.Id, "cloudstackTemplateName": workerNodeGroupMachineSpec.Template.Name, "cloudstackOfferingId": workerNodeGroupMachineSpec.ComputeOffering.Id, "cloudstackOfferingName": workerNodeGroupMachineSpec.ComputeOffering.Name, "cloudstackCustomDetails": workerNodeGroupMachineSpec.UserCustomDetails, "cloudstackSymlinks": workerNodeGroupMachineSpec.Symlinks, "cloudstackAffinity": workerNodeGroupMachineSpec.Affinity, "cloudstackAffinityGroupIds": workerNodeGroupMachineSpec.AffinityGroupIds, "workerReplicas": *workerNodeGroupConfiguration.Count, "workerSshUsername": workerNodeGroupMachineSpec.Users[0].Name, "cloudstackWorkerSshAuthorizedKey": workerSSHKey, "format": format, "kubeletExtraArgs": kubeletExtraArgs.ToPartialYaml(), "eksaSystemNamespace": constants.EksaSystemNamespace, "workerNodeGroupName": fmt.Sprintf("%s-%s", clusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name), "workerNodeGroupTaints": workerNodeGroupConfiguration.Taints, } fillDiskOffering(values, workerNodeGroupMachineSpec.DiskOffering, "") values["cloudstackAnnotations"] = values["cloudstackDiskOfferingProvided"].(bool) || len(workerNodeGroupMachineSpec.Symlinks) > 0 if clusterSpec.Cluster.Spec.RegistryMirrorConfiguration != nil { registryMirror := registrymirror.FromCluster(clusterSpec.Cluster) values["registryMirrorMap"] = containerd.ToAPIEndpoints(registryMirror.NamespacedRegistryMap) values["mirrorBase"] = registryMirror.BaseRegistry values["insecureSkip"] = registryMirror.InsecureSkipVerify if len(registryMirror.CACertContent) > 0 { values["registryCACert"] = registryMirror.CACertContent } } if clusterSpec.Cluster.Spec.ProxyConfiguration != nil { endpoint, err := controlPlaneEndpointHost(clusterSpec) if err != nil { return nil, err } fillProxyConfigurations(values, clusterSpec, endpoint) } if workerNodeGroupConfiguration.UpgradeRolloutStrategy != nil { values["upgradeRolloutStrategy"] = true values["maxSurge"] = workerNodeGroupConfiguration.UpgradeRolloutStrategy.RollingUpdate.MaxSurge values["maxUnavailable"] = workerNodeGroupConfiguration.UpgradeRolloutStrategy.RollingUpdate.MaxUnavailable } return values, nil } func getEtcdMachineSpec(clusterSpec v1alpha1.ClusterSpec, machineConfigs map[string]*v1alpha1.CloudStackMachineConfig) *v1alpha1.CloudStackMachineConfigSpec { var etcdMachineSpec *v1alpha1.CloudStackMachineConfigSpec if clusterSpec.ExternalEtcdConfiguration != nil { if clusterSpec.ExternalEtcdConfiguration.MachineGroupRef != nil && machineConfigs[clusterSpec.ExternalEtcdConfiguration.MachineGroupRef.Name] != nil { etcdMachineSpec = &machineConfigs[clusterSpec.ExternalEtcdConfiguration.MachineGroupRef.Name].Spec } } return etcdMachineSpec } func getControlPlaneMachineSpec(clusterSpec v1alpha1.ClusterSpec, machineConfigs map[string]*v1alpha1.CloudStackMachineConfig) *v1alpha1.CloudStackMachineConfigSpec { var controlPlaneMachineSpec *v1alpha1.CloudStackMachineConfigSpec if clusterSpec.ControlPlaneConfiguration.MachineGroupRef != nil && machineConfigs[clusterSpec.ControlPlaneConfiguration.MachineGroupRef.Name] != nil { controlPlaneMachineSpec = &machineConfigs[clusterSpec.ControlPlaneConfiguration.MachineGroupRef.Name].Spec } return controlPlaneMachineSpec }
397
eks-anywhere
aws
Go
package cloudstack_test import ( "path" "testing" "time" . "github.com/onsi/gomega" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/clusterapi" "github.com/aws/eks-anywhere/pkg/providers/cloudstack" ) const ( testClusterConfigMainFilename = "cluster_main.yaml" testClusterConfigMainWithAZsFilename = "cluster_main_with_availability_zones.yaml" testDataDir = "testdata" ) func TestTemplateBuilderGenerateCAPISpecControlPlaneNilDatacenter(t *testing.T) { g := NewWithT(t) templateBuilder := cloudstack.NewTemplateBuilder(time.Now) clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) clusterSpec.CloudStackDatacenter = nil _, err := templateBuilder.GenerateCAPISpecControlPlane(clusterSpec) g.Expect(err).To(MatchError(ContainSubstring("provided clusterSpec CloudStackDatacenter is nil. Unable to generate CAPI spec control plane"))) } func TestTemplateBuilderGenerateCAPISpecControlPlaneNoKubernetesVersion(t *testing.T) { g := NewWithT(t) templateBuilder := cloudstack.NewTemplateBuilder(time.Now) clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) clusterSpec.Cluster.Spec.KubernetesVersion = "" _, err := templateBuilder.GenerateCAPISpecControlPlane(clusterSpec) g.Expect(err).To(MatchError(ContainSubstring("error building template map from CP error converting kubeVersion"))) } func TestTemplateBuilderGenerateCAPISpecControlPlaneMissingNames(t *testing.T) { clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) tests := []struct { name string buildOption func(values map[string]interface{}) wantErr string }{ { name: "missing control plane template name", buildOption: func(values map[string]interface{}) { values["etcdTemplateName"] = clusterapi.EtcdMachineTemplateName(clusterSpec.Cluster) }, wantErr: "unable to determine control plane template name", }, { name: "missing etcd machine template name", buildOption: func(values map[string]interface{}) { values["controlPlaneTemplateName"] = clusterapi.ControlPlaneMachineTemplateName(clusterSpec.Cluster) }, wantErr: "", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) templateBuilder := cloudstack.NewTemplateBuilder(time.Now) _, err := templateBuilder.GenerateCAPISpecControlPlane(clusterSpec, tt.buildOption) g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr))) }) } } func TestTemplateBuilderGenerateCAPISpecControlPlaneInvalidSSHKey(t *testing.T) { clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name tests := []struct { name string machineConfigName string wantErr string }{ { name: "invalid controlplane ssh key", machineConfigName: controlPlaneMachineConfigName, wantErr: "formatting ssh key for cloudstack control plane template: ssh", }, { name: "invalid etcd ssh key", machineConfigName: etcdMachineConfigName, wantErr: "formatting ssh key for cloudstack etcd template: ssh", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) spec := clusterSpec.DeepCopy() templateBuilder := cloudstack.NewTemplateBuilder(time.Now) machineConfig := spec.CloudStackMachineConfigs[tt.machineConfigName] machineConfig.Spec.Users[0].SshAuthorizedKeys[0] = "ssh-rsa AAAA B3NzaC1K73CeQ== [email protected]" _, err := templateBuilder.GenerateCAPISpecControlPlane(spec, func(values map[string]interface{}) { values["controlPlaneTemplateName"] = clusterapi.ControlPlaneMachineTemplateName(clusterSpec.Cluster) values["etcdTemplateName"] = clusterapi.EtcdMachineTemplateName(clusterSpec.Cluster) }) g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr))) }) } } func TestTemplateBuilderGenerateCAPISpecControlPlaneInvalidEndpoint(t *testing.T) { g := NewWithT(t) clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host = "1.1.1.1::" templateBuilder := cloudstack.NewTemplateBuilder(time.Now) _, err := templateBuilder.GenerateCAPISpecControlPlane(clusterSpec, func(values map[string]interface{}) { values["controlPlaneTemplateName"] = clusterapi.ControlPlaneMachineTemplateName(clusterSpec.Cluster) values["etcdTemplateName"] = clusterapi.EtcdMachineTemplateName(clusterSpec.Cluster) }) g.Expect(err).To(MatchError(ContainSubstring("error building template map from CP host 1.1.1.1:: is invalid: address 1.1.1.1::: too many colons in address"))) } func TestTemplateBuilderGenerateCAPISpecWorkersInvalidSSHKey(t *testing.T) { g := NewWithT(t) templateBuilder := cloudstack.NewTemplateBuilder(time.Now) clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) firstMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name machineConfig := clusterSpec.CloudStackMachineConfigs[firstMachineConfigName] machineConfig.Spec.Users[0].SshAuthorizedKeys[0] = "ssh-rsa AAAA B3NzaC1K73CeQ== [email protected]" machineTemplateNames, kubeadmConfigTemplateNames := clusterapi.InitialTemplateNamesForWorkers(clusterSpec) _, err := templateBuilder.GenerateCAPISpecWorkers(clusterSpec, machineTemplateNames, kubeadmConfigTemplateNames) g.Expect(err).To( MatchError(ContainSubstring("formatting ssh key for cloudstack worker template: ssh")), ) } func TestTemplateBuilderGenerateCAPISpecWorkersInvalidEndpoint(t *testing.T) { g := NewWithT(t) templateBuilder := cloudstack.NewTemplateBuilder(time.Now) clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) clusterSpec.Cluster.Spec.ProxyConfiguration = &v1alpha1.ProxyConfiguration{} clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host = "1.1.1.1::" machineTemplateNames, kubeadmConfigTemplateNames := clusterapi.InitialTemplateNamesForWorkers(clusterSpec) _, err := templateBuilder.GenerateCAPISpecWorkers(clusterSpec, machineTemplateNames, kubeadmConfigTemplateNames) g.Expect(err).To(MatchError(ContainSubstring("building template map for MD host 1.1.1.1:: is invalid: address 1.1.1.1::: too many colons in address"))) }
153
eks-anywhere
aws
Go
package cloudstack import ( "context" "errors" "fmt" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/logger" "github.com/aws/eks-anywhere/pkg/networkutils" "github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder" "github.com/aws/eks-anywhere/pkg/types" ) type Validator struct { cmk ProviderCmkClient netClient networkutils.NetClient skipIpCheck bool } func NewValidator(cmk ProviderCmkClient, netClient networkutils.NetClient, skipIpCheck bool) *Validator { return &Validator{ cmk: cmk, netClient: netClient, skipIpCheck: skipIpCheck, } } type localAvailabilityZone struct { *anywherev1.CloudStackAvailabilityZone ZoneId string DomainId string } // ProviderCmkClient defines the methods used by Cmk as a separate interface to be mockable when injected into other objects. type ProviderCmkClient interface { GetManagementApiEndpoint(profile string) (string, error) ValidateServiceOfferingPresent(ctx context.Context, profile string, zoneId string, serviceOffering anywherev1.CloudStackResourceIdentifier) error ValidateDiskOfferingPresent(ctx context.Context, profile string, zoneId string, diskOffering anywherev1.CloudStackResourceDiskOffering) error ValidateTemplatePresent(ctx context.Context, profile string, domainId string, zoneId string, account string, template anywherev1.CloudStackResourceIdentifier) error ValidateAffinityGroupsPresent(ctx context.Context, profile string, domainId string, account string, affinityGroupIds []string) error ValidateZoneAndGetId(ctx context.Context, profile string, zone anywherev1.CloudStackZone) (string, error) ValidateNetworkPresent(ctx context.Context, profile string, domainId string, network anywherev1.CloudStackResourceIdentifier, zoneId string, account string) error ValidateDomainAndGetId(ctx context.Context, profile string, domain string) (string, error) ValidateAccountPresent(ctx context.Context, profile string, account string, domainId string) error } func (v *Validator) ValidateCloudStackDatacenterConfig(ctx context.Context, datacenterConfig *anywherev1.CloudStackDatacenterConfig) error { localAvailabilityZones, err := generateLocalAvailabilityZones(ctx, datacenterConfig) if err != nil { return err } for _, az := range localAvailabilityZones { endpoint, err := v.cmk.GetManagementApiEndpoint(az.CredentialsRef) if err != nil { return err } if endpoint != az.ManagementApiEndpoint { return fmt.Errorf("cloudstack secret management url (%s) differs from cluster spec management url (%s)", endpoint, az.ManagementApiEndpoint) } domainId, err := v.cmk.ValidateDomainAndGetId(ctx, az.CredentialsRef, az.Domain) if err != nil { return err } az.DomainId = domainId if err := v.cmk.ValidateAccountPresent(ctx, az.CredentialsRef, az.Account, az.DomainId); err != nil { return err } zoneId, err := v.cmk.ValidateZoneAndGetId(ctx, az.CredentialsRef, az.CloudStackAvailabilityZone.Zone) if err != nil { return err } if err := v.cmk.ValidateNetworkPresent(ctx, az.CredentialsRef, az.DomainId, az.CloudStackAvailabilityZone.Zone.Network, zoneId, az.Account); err != nil { return err } } logger.MarkPass("Datacenter validated") return nil } func generateLocalAvailabilityZones(ctx context.Context, datacenterConfig *anywherev1.CloudStackDatacenterConfig) ([]localAvailabilityZone, error) { localAvailabilityZones := []localAvailabilityZone{} if datacenterConfig == nil { return nil, errors.New("CloudStack Datacenter Config is null") } for _, az := range datacenterConfig.Spec.AvailabilityZones { availabilityZone := localAvailabilityZone{ CloudStackAvailabilityZone: &az, } localAvailabilityZones = append(localAvailabilityZones, availabilityZone) } if len(localAvailabilityZones) <= 0 { return nil, fmt.Errorf("CloudStackDatacenterConfig domain or availabilityZones is not set or is empty") } return localAvailabilityZones, nil } // TODO: dry out machine configs validations. // Cyclomatic complexity is high. The exception below can probably be removed once the above todo is done. // nolint:gocyclo func (v *Validator) ValidateClusterMachineConfigs(ctx context.Context, clusterSpec *cluster.Spec) error { controlPlaneMachineConfig := controlPlaneMachineConfig(clusterSpec) if controlPlaneMachineConfig == nil { return fmt.Errorf("cannot find CloudStackMachineConfig %v for control plane", clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name) } if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil { etcdMachineConfig := etcdMachineConfig(clusterSpec) if etcdMachineConfig == nil { return fmt.Errorf("cannot find CloudStackMachineConfig %v for etcd machines", clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name) } if etcdMachineConfig.Spec.Template != controlPlaneMachineConfig.Spec.Template { return fmt.Errorf("control plane and etcd machines must have the same template specified") } } for _, workerNodeGroupConfiguration := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations { workerNodeGroupMachineConfig, ok := clusterSpec.CloudStackMachineConfigs[workerNodeGroupConfiguration.MachineGroupRef.Name] if !ok { return fmt.Errorf("cannot find CloudStackMachineConfig %v for worker nodes", workerNodeGroupConfiguration.MachineGroupRef.Name) } if controlPlaneMachineConfig.Spec.Template != workerNodeGroupMachineConfig.Spec.Template { return fmt.Errorf("control plane and worker nodes must have the same template specified") } } for _, machineConfig := range clusterSpec.CloudStackMachineConfigs { if err := v.validateMachineConfig(ctx, clusterSpec.CloudStackDatacenter, machineConfig); err != nil { return fmt.Errorf("machine config %s validation failed: %v", machineConfig.Name, err) } } logger.MarkPass("Validated cluster Machine Configs") return nil } func (v *Validator) ValidateControlPlaneEndpointUniqueness(endpoint string) error { if v.skipIpCheck { logger.Info("Skipping control plane endpoint uniqueness check") return nil } host, port, err := getValidControlPlaneHostPort(endpoint) if err != nil { return fmt.Errorf("invalid endpoint: %v", err) } if networkutils.IsPortInUse(v.netClient, host, port) { return fmt.Errorf("endpoint <%s> is already in use", endpoint) } return nil } func (v *Validator) validateMachineConfig(ctx context.Context, datacenterConfig *anywherev1.CloudStackDatacenterConfig, machineConfig *anywherev1.CloudStackMachineConfig) error { localAvailabilityZones, err := generateLocalAvailabilityZones(ctx, datacenterConfig) if err != nil { return err } for _, az := range localAvailabilityZones { zoneId, err := v.cmk.ValidateZoneAndGetId(ctx, az.CredentialsRef, az.CloudStackAvailabilityZone.Zone) if err != nil { return err } if err := v.cmk.ValidateTemplatePresent(ctx, az.CredentialsRef, az.DomainId, zoneId, az.Account, machineConfig.Spec.Template); err != nil { return fmt.Errorf("validating template: %v", err) } if err := v.cmk.ValidateServiceOfferingPresent(ctx, az.CredentialsRef, zoneId, machineConfig.Spec.ComputeOffering); err != nil { return fmt.Errorf("validating service offering: %v", err) } if machineConfig.Spec.DiskOffering != nil && (len(machineConfig.Spec.DiskOffering.Id) > 0 || len(machineConfig.Spec.DiskOffering.Name) > 0) { if err := v.cmk.ValidateDiskOfferingPresent(ctx, az.CredentialsRef, zoneId, *machineConfig.Spec.DiskOffering); err != nil { return fmt.Errorf("validating disk offering: %v", err) } } if len(machineConfig.Spec.AffinityGroupIds) > 0 { if err := v.cmk.ValidateAffinityGroupsPresent(ctx, az.CredentialsRef, az.DomainId, az.Account, machineConfig.Spec.AffinityGroupIds); err != nil { return fmt.Errorf("validating affinity group ids: %v", err) } } } return nil } // ValidateSecretsUnchanged checks the secret to see if it has not been changed. func (v *Validator) ValidateSecretsUnchanged(ctx context.Context, cluster *types.Cluster, execConfig *decoder.CloudStackExecConfig, client ProviderKubectlClient) error { for _, profile := range execConfig.Profiles { secret, err := client.GetSecretFromNamespace(ctx, cluster.KubeconfigFile, profile.Name, constants.EksaSystemNamespace) if apierrors.IsNotFound(err) { // When the secret is not found we allow for new secrets continue } if err != nil { return fmt.Errorf("getting secret for profile %s: %v", profile.Name, err) } if secretDifferentFromProfile(secret, profile) { return fmt.Errorf("profile '%s' is different from the secret", profile.Name) } } return nil } func secretDifferentFromProfile(secret *corev1.Secret, profile decoder.CloudStackProfileConfig) bool { return string(secret.Data[decoder.APIUrlKey]) != profile.ManagementUrl || string(secret.Data[decoder.APIKeyKey]) != profile.ApiKey || string(secret.Data[decoder.SecretKeyKey]) != profile.SecretKey || string(secret.Data[decoder.VerifySslKey]) != profile.VerifySsl }
223
eks-anywhere
aws
Go
// Code generated by MockGen. DO NOT EDIT. // Source: github.com/aws/eks-anywhere/pkg/providers/cloudstack (interfaces: ProviderValidator,ValidatorRegistry) // Package cloudstack is a generated GoMock package. package cloudstack import ( context "context" reflect "reflect" v1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" cluster "github.com/aws/eks-anywhere/pkg/cluster" decoder "github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder" types "github.com/aws/eks-anywhere/pkg/types" gomock "github.com/golang/mock/gomock" ) // MockProviderValidator is a mock of ProviderValidator interface. type MockProviderValidator struct { ctrl *gomock.Controller recorder *MockProviderValidatorMockRecorder } // MockProviderValidatorMockRecorder is the mock recorder for MockProviderValidator. type MockProviderValidatorMockRecorder struct { mock *MockProviderValidator } // NewMockProviderValidator creates a new mock instance. func NewMockProviderValidator(ctrl *gomock.Controller) *MockProviderValidator { mock := &MockProviderValidator{ctrl: ctrl} mock.recorder = &MockProviderValidatorMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockProviderValidator) EXPECT() *MockProviderValidatorMockRecorder { return m.recorder } // ValidateCloudStackDatacenterConfig mocks base method. func (m *MockProviderValidator) ValidateCloudStackDatacenterConfig(arg0 context.Context, arg1 *v1alpha1.CloudStackDatacenterConfig) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateCloudStackDatacenterConfig", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // ValidateCloudStackDatacenterConfig indicates an expected call of ValidateCloudStackDatacenterConfig. func (mr *MockProviderValidatorMockRecorder) ValidateCloudStackDatacenterConfig(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateCloudStackDatacenterConfig", reflect.TypeOf((*MockProviderValidator)(nil).ValidateCloudStackDatacenterConfig), arg0, arg1) } // ValidateClusterMachineConfigs mocks base method. func (m *MockProviderValidator) ValidateClusterMachineConfigs(arg0 context.Context, arg1 *cluster.Spec) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateClusterMachineConfigs", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // ValidateClusterMachineConfigs indicates an expected call of ValidateClusterMachineConfigs. func (mr *MockProviderValidatorMockRecorder) ValidateClusterMachineConfigs(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateClusterMachineConfigs", reflect.TypeOf((*MockProviderValidator)(nil).ValidateClusterMachineConfigs), arg0, arg1) } // ValidateControlPlaneEndpointUniqueness mocks base method. func (m *MockProviderValidator) ValidateControlPlaneEndpointUniqueness(arg0 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateControlPlaneEndpointUniqueness", arg0) ret0, _ := ret[0].(error) return ret0 } // ValidateControlPlaneEndpointUniqueness indicates an expected call of ValidateControlPlaneEndpointUniqueness. func (mr *MockProviderValidatorMockRecorder) ValidateControlPlaneEndpointUniqueness(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateControlPlaneEndpointUniqueness", reflect.TypeOf((*MockProviderValidator)(nil).ValidateControlPlaneEndpointUniqueness), arg0) } // ValidateSecretsUnchanged mocks base method. func (m *MockProviderValidator) ValidateSecretsUnchanged(arg0 context.Context, arg1 *types.Cluster, arg2 *decoder.CloudStackExecConfig, arg3 ProviderKubectlClient) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateSecretsUnchanged", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // ValidateSecretsUnchanged indicates an expected call of ValidateSecretsUnchanged. func (mr *MockProviderValidatorMockRecorder) ValidateSecretsUnchanged(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateSecretsUnchanged", reflect.TypeOf((*MockProviderValidator)(nil).ValidateSecretsUnchanged), arg0, arg1, arg2, arg3) } // MockValidatorRegistry is a mock of ValidatorRegistry interface. type MockValidatorRegistry struct { ctrl *gomock.Controller recorder *MockValidatorRegistryMockRecorder } // MockValidatorRegistryMockRecorder is the mock recorder for MockValidatorRegistry. type MockValidatorRegistryMockRecorder struct { mock *MockValidatorRegistry } // NewMockValidatorRegistry creates a new mock instance. func NewMockValidatorRegistry(ctrl *gomock.Controller) *MockValidatorRegistry { mock := &MockValidatorRegistry{ctrl: ctrl} mock.recorder = &MockValidatorRegistryMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockValidatorRegistry) EXPECT() *MockValidatorRegistryMockRecorder { return m.recorder } // Get mocks base method. func (m *MockValidatorRegistry) Get(arg0 *decoder.CloudStackExecConfig) (ProviderValidator, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0) ret0, _ := ret[0].(ProviderValidator) ret1, _ := ret[1].(error) return ret0, ret1 } // Get indicates an expected call of Get. func (mr *MockValidatorRegistryMockRecorder) Get(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockValidatorRegistry)(nil).Get), arg0) }
134
eks-anywhere
aws
Go
package cloudstack import ( "context" "fmt" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/filewriter" "github.com/aws/eks-anywhere/pkg/networkutils" "github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder" "github.com/aws/eks-anywhere/pkg/types" ) // ValidatorRegistry exposes a single method for retrieving the CloudStack validator, and abstracts away how they are injected. type ValidatorRegistry interface { Get(execConfig *decoder.CloudStackExecConfig) (ProviderValidator, error) } // CmkBuilder defines the interface to be consumed by the ValidatorFactory which enables it to build a new CloudStackClient. type CmkBuilder interface { BuildCloudstackClient(writer filewriter.FileWriter, config *decoder.CloudStackExecConfig) (ProviderCmkClient, error) } // ValidatorFactory implements the ValidatorRegistry interface and holds the necessary structs for building fresh Validator objects. type ValidatorFactory struct { builder CmkBuilder writer filewriter.FileWriter skipIPCheck bool } // ProviderValidator exposes a common interface to avoid coupling on implementation details and to support mocking. type ProviderValidator interface { ValidateCloudStackDatacenterConfig(ctx context.Context, datacenterConfig *anywherev1.CloudStackDatacenterConfig) error ValidateClusterMachineConfigs(ctx context.Context, clusterSpec *cluster.Spec) error ValidateControlPlaneEndpointUniqueness(endpoint string) error ValidateSecretsUnchanged(ctx context.Context, cluster *types.Cluster, execConfig *decoder.CloudStackExecConfig, client ProviderKubectlClient) error } // NewValidatorFactory initializes a factory for the CloudStack provider validator. func NewValidatorFactory(builder CmkBuilder, writer filewriter.FileWriter, skipIPCheck bool) ValidatorFactory { return ValidatorFactory{ builder: builder, writer: writer, skipIPCheck: skipIPCheck, } } // Get returns a validator for a particular cloudstack exec config. func (r ValidatorFactory) Get(execConfig *decoder.CloudStackExecConfig) (ProviderValidator, error) { cmk, err := r.builder.BuildCloudstackClient(r.writer, execConfig) if err != nil { return nil, fmt.Errorf("building cmk executable: %v", err) } return NewValidator(cmk, &networkutils.DefaultNetClient{}, r.skipIPCheck), nil }
58
eks-anywhere
aws
Go
package cloudstack_test import ( "testing" . "github.com/onsi/gomega" "github.com/aws/eks-anywhere/pkg/executables" "github.com/aws/eks-anywhere/pkg/executables/cmk" "github.com/aws/eks-anywhere/pkg/providers/cloudstack" "github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder" ) func TestRegistryGetWithNilExecConfig(t *testing.T) { g := NewGomegaWithT(t) executableBuilder := executables.NewLocalExecutablesBuilder() registry := cloudstack.NewValidatorFactory(cmk.NewCmkBuilder(executableBuilder), nil, false) _, err := registry.Get(nil) g.Expect(err).NotTo(BeNil()) } func TestRegistryGetSuccess(t *testing.T) { g := NewGomegaWithT(t) executableBuilder := executables.NewLocalExecutablesBuilder() registry := cloudstack.NewValidatorFactory(cmk.NewCmkBuilder(executableBuilder), nil, false) validator, err := registry.Get(&decoder.CloudStackExecConfig{Profiles: []decoder.CloudStackProfileConfig{ { Name: "test", ApiKey: "apikey", SecretKey: "secretKey", ManagementUrl: "test-url", }, }}) g.Expect(err).To(BeNil()) g.Expect(validator).ToNot(BeNil()) }
37
eks-anywhere
aws
Go
package cloudstack import ( "context" _ "embed" "errors" "net" "path" "testing" "time" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" apierrors "k8s.io/apimachinery/pkg/api/errors" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder" "github.com/aws/eks-anywhere/pkg/providers/cloudstack/mocks" "github.com/aws/eks-anywhere/pkg/types" ) const ( testClusterConfigMainFilename = "cluster_main.yaml" testClusterConfigMainWithAZsFilename = "cluster_main_with_availability_zones.yaml" testDataDir = "testdata" ) type DummyNetClient struct{} func (n *DummyNetClient) DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) { // add dummy case for coverage if address == "255.255.255.255:6443" { return &net.IPConn{}, nil } return nil, errors.New("") } var testTemplate = v1alpha1.CloudStackResourceIdentifier{ Name: "centos7-k8s-118", } var testOffering = v1alpha1.CloudStackResourceIdentifier{ Name: "m4-large", } func thenErrorExpected(t *testing.T, expected string, err error) { if err == nil { t.Fatalf("Expected=<%s> actual=<nil>", expected) } actual := err.Error() if expected != actual { t.Fatalf("Expected=<%s> actual=<%s>", expected, actual) } } func TestValidateCloudStackDatacenterConfig(t *testing.T) { ctx := context.Background() setupContext(t) cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) validator := NewValidator(cmk, &DummyNetClient{}, true) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") } setupMockForAvailabilityZonesValidation(cmk, ctx, datacenterConfig.Spec.AvailabilityZones) err = validator.ValidateCloudStackDatacenterConfig(ctx, datacenterConfig) if err != nil { t.Fatalf("failed to validate CloudStackDataCenterConfig: %v", err) } } func TestValidateCloudStackDatacenterConfigWithAZ(t *testing.T) { ctx := context.Background() setupContext(t) cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) validator := NewValidator(cmk, &DummyNetClient{}, true) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainWithAZsFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") } setupMockForAvailabilityZonesValidation(cmk, ctx, datacenterConfig.Spec.AvailabilityZones) err = validator.ValidateCloudStackDatacenterConfig(ctx, datacenterConfig) if err != nil { t.Fatalf("failed to validate CloudStackDataCenterConfig: %v", err) } } func TestValidateSkipControlPlaneIpCheck(t *testing.T) { cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) validator := NewValidator(cmk, &DummyNetClient{}, true) if err := validator.ValidateControlPlaneEndpointUniqueness("invalid_url_skip_check"); err != nil { t.Fatalf("expected no error, validation should be skipped") } } func TestValidateControlPlaneIpCheck(t *testing.T) { cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) validator := NewValidator(cmk, &DummyNetClient{}, false) err := validator.ValidateControlPlaneEndpointUniqueness("255.255.255.255:6443") thenErrorExpected(t, "endpoint <255.255.255.255:6443> is already in use", err) } func TestValidateControlPlaneIpCheckUniqueIpSuccess(t *testing.T) { cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) validator := NewValidator(cmk, &DummyNetClient{}, false) if err := validator.ValidateControlPlaneEndpointUniqueness("1.1.1.1:6443"); err != nil { t.Fatalf("Expected endpoint to be valid and unused") } } func TestValidateControlPlaneIpCheckUniqueIpInvalidEndpointPort(t *testing.T) { cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) validator := NewValidator(cmk, &DummyNetClient{}, false) err := validator.ValidateControlPlaneEndpointUniqueness("1.1.1.1:") thenErrorExpected(t, "invalid endpoint: host 1.1.1.1: has an invalid port", err) } func TestValidateControlPlaneIpCheckUniqueIpInvalidEndpointHost(t *testing.T) { cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) validator := NewValidator(cmk, &DummyNetClient{}, false) err := validator.ValidateControlPlaneEndpointUniqueness("invalid::host") thenErrorExpected(t, "invalid endpoint: host invalid::host is invalid: address invalid::host: too many colons in address", err) } func TestValidateDatacenterInconsistentManagementEndpoints(t *testing.T) { ctx := context.Background() setupContext(t) cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) validator := NewValidator(cmk, &DummyNetClient{}, true) setupMockForAvailabilityZonesValidation(cmk, ctx, clusterSpec.CloudStackDatacenter.Spec.AvailabilityZones) clusterSpec.CloudStackDatacenter.Spec.AvailabilityZones[0].ManagementApiEndpoint = "abcefg.com" err := validator.ValidateCloudStackDatacenterConfig(ctx, clusterSpec.CloudStackDatacenter) thenErrorExpected(t, "cloudstack secret management url (http://127.16.0.1:8080/client/api) differs from cluster spec management url (abcefg.com)", err) } func TestSetupAndValidateUsersNil(t *testing.T) { ctx := context.Background() cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) validator := NewValidator(cmk, &DummyNetClient{}, true) controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name clusterSpec.CloudStackMachineConfigs[controlPlaneMachineConfigName].Spec.Users = nil workerNodeMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name clusterSpec.CloudStackMachineConfigs[workerNodeMachineConfigName].Spec.Users = nil etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name clusterSpec.CloudStackMachineConfigs[etcdMachineConfigName].Spec.Users = nil setupMockForAvailabilityZonesValidation(cmk, ctx, clusterSpec.CloudStackDatacenter.Spec.AvailabilityZones) cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) _ = validator.ValidateCloudStackDatacenterConfig(ctx, clusterSpec.CloudStackDatacenter) err := validator.ValidateClusterMachineConfigs(ctx, clusterSpec) if err != nil { t.Fatalf("validator.ValidateClusterMachineConfigs() err = %v, want err = nil", err) } } func TestSetupAndValidateSshAuthorizedKeysNil(t *testing.T) { ctx := context.Background() cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) validator := NewValidator(cmk, &DummyNetClient{}, true) controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name clusterSpec.CloudStackMachineConfigs[controlPlaneMachineConfigName].Spec.Users[0].SshAuthorizedKeys = nil workerNodeMachineConfigName := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name clusterSpec.CloudStackMachineConfigs[workerNodeMachineConfigName].Spec.Users[0].SshAuthorizedKeys = nil etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name clusterSpec.CloudStackMachineConfigs[etcdMachineConfigName].Spec.Users[0].SshAuthorizedKeys = nil setupMockForAvailabilityZonesValidation(cmk, ctx, clusterSpec.CloudStackDatacenter.Spec.AvailabilityZones) cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) _ = validator.ValidateCloudStackDatacenterConfig(ctx, clusterSpec.CloudStackDatacenter) err := validator.ValidateClusterMachineConfigs(ctx, clusterSpec) if err != nil { t.Fatalf("validator.ValidateClusterMachineConfigs() err = %v, want err = nil", err) } } //func setupMockForAvailabilityZonesValidation(cmk *mocks.MockProviderCmkClient, ctx context.Context, datacenterConfig *v1alpha1.CloudStackDatacenterConfig) { // if len(datacenterConfig.Spec.Zones) > 0 { // cmk.EXPECT().ValidateZoneAndGetId(ctx, gomock.Any(), datacenterConfig.Spec.Zones[0]).AnyTimes().Return("4e3b338d-87a6-4189-b931-a1747edeea8f", nil) // } // cmk.EXPECT().ValidateDomainAndGetId(ctx, gomock.Any(), datacenterConfig.Spec.Domain).AnyTimes().Return("5300cdac-74d5-11ec-8696-c81f66d3e965", nil) // cmk.EXPECT().ValidateAccountPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) // cmk.EXPECT().ValidateNetworkPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) // cmk.EXPECT().GetManagementApiEndpoint(gomock.Any()).AnyTimes().MaxTimes(1).Return("http://127.16.0.1:8080/client/api", nil) //} func setupMockForAvailabilityZonesValidation(cmk *mocks.MockProviderCmkClient, ctx context.Context, azs []v1alpha1.CloudStackAvailabilityZone) { for _, az := range azs { cmk.EXPECT().ValidateZoneAndGetId(ctx, gomock.Any(), az.Zone).AnyTimes().Return("4e3b338d-87a6-4189-b931-a1747edeea82", nil) cmk.EXPECT().ValidateDomainAndGetId(ctx, gomock.Any(), az.Domain).AnyTimes().Return("5300cdac-74d5-11ec-8696-c81f66d3e962", nil) cmk.EXPECT().ValidateAccountPresent(ctx, gomock.Any(), az.Account, gomock.Any()).AnyTimes().Return(nil) cmk.EXPECT().ValidateNetworkPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) cmk.EXPECT().GetManagementApiEndpoint(az.CredentialsRef).AnyTimes().Return(az.ManagementApiEndpoint, nil) } } func TestSetupAndValidateCreateClusterCPMachineGroupRefNonexistent(t *testing.T) { ctx := context.Background() cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) validator := NewValidator(cmk, &DummyNetClient{}, true) clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name = "nonexistent" setupMockForAvailabilityZonesValidation(cmk, ctx, clusterSpec.CloudStackDatacenter.Spec.AvailabilityZones) err := validator.ValidateClusterMachineConfigs(ctx, clusterSpec) thenErrorExpected(t, "cannot find CloudStackMachineConfig nonexistent for control plane", err) } func TestSetupAndValidateCreateClusterWorkerMachineGroupRefNonexistent(t *testing.T) { ctx := context.Background() cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) validator := NewValidator(cmk, &DummyNetClient{}, true) clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name = "nonexistent" setupMockForAvailabilityZonesValidation(cmk, ctx, clusterSpec.CloudStackDatacenter.Spec.AvailabilityZones) err := validator.ValidateClusterMachineConfigs(ctx, clusterSpec) thenErrorExpected(t, "cannot find CloudStackMachineConfig nonexistent for worker nodes", err) } func TestSetupAndValidateCreateClusterEtcdMachineGroupRefNonexistent(t *testing.T) { ctx := context.Background() cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) validator := NewValidator(cmk, &DummyNetClient{}, true) clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name = "nonexistent" setupMockForAvailabilityZonesValidation(cmk, ctx, clusterSpec.CloudStackDatacenter.Spec.AvailabilityZones) err := validator.ValidateClusterMachineConfigs(ctx, clusterSpec) thenErrorExpected(t, "cannot find CloudStackMachineConfig nonexistent for etcd machines", err) } func TestSetupAndValidateCreateClusterTemplateDifferent(t *testing.T) { ctx := context.Background() cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) validator := NewValidator(cmk, &DummyNetClient{}, true) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") } controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name clusterSpec.CloudStackMachineConfigs[controlPlaneMachineConfigName].Spec.Template = v1alpha1.CloudStackResourceIdentifier{Name: "different"} setupMockForAvailabilityZonesValidation(cmk, ctx, datacenterConfig.Spec.AvailabilityZones) err = validator.ValidateClusterMachineConfigs(ctx, clusterSpec) thenErrorExpected(t, "control plane and etcd machines must have the same template specified", err) } func TestValidateMachineConfigsHappyCase(t *testing.T) { ctx := context.Background() cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) validator := NewValidator(cmk, &DummyNetClient{}, true) setupMockForAvailabilityZonesValidation(cmk, ctx, clusterSpec.CloudStackDatacenter.Spec.AvailabilityZones) cmk.EXPECT().ValidateTemplatePresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), clusterSpec.CloudStackDatacenter.Spec.AvailabilityZones[0].Account, testTemplate).Times(3) cmk.EXPECT().ValidateServiceOfferingPresent(ctx, gomock.Any(), gomock.Any(), testOffering).Times(3) cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) cmk.EXPECT().ValidateAffinityGroupsPresent(ctx, gomock.Any(), gomock.Any(), clusterSpec.CloudStackDatacenter.Spec.AvailabilityZones[0].Account, gomock.Any()).Times(3) _ = validator.ValidateCloudStackDatacenterConfig(ctx, clusterSpec.CloudStackDatacenter) err := validator.ValidateClusterMachineConfigs(ctx, clusterSpec) assert.Nil(t, err) } func TestValidateCloudStackMachineConfig(t *testing.T) { ctx := context.Background() cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) machineConfigs, err := v1alpha1.GetCloudStackMachineConfigs(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") } validator := NewValidator(cmk, &DummyNetClient{}, true) cmk.EXPECT().ValidateZoneAndGetId(ctx, gomock.Any(), gomock.Any()).Times(3).Return("4e3b338d-87a6-4189-b931-a1747edeea82", nil) cmk.EXPECT().ValidateTemplatePresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), datacenterConfig.Spec.AvailabilityZones[0].Account, testTemplate).Times(3) cmk.EXPECT().ValidateServiceOfferingPresent(ctx, gomock.Any(), gomock.Any(), testOffering).Times(3) cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) cmk.EXPECT().ValidateAffinityGroupsPresent(ctx, gomock.Any(), gomock.Any(), datacenterConfig.Spec.AvailabilityZones[0].Account, gomock.Any()).Times(3) for _, machineConfig := range machineConfigs { err := validator.validateMachineConfig(ctx, datacenterConfig, machineConfig) if err != nil { t.Fatalf("failed to validate CloudStackMachineConfig: %v", err) } } } func TestValidateMachineConfigsWithAffinity(t *testing.T) { ctx := context.Background() cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) controlPlaneMachineConfig(clusterSpec).Spec.Affinity = "pro" controlPlaneMachineConfig(clusterSpec).Spec.AffinityGroupIds = []string{} etcdMachineConfig(clusterSpec).Spec.Affinity = "anti" etcdMachineConfig(clusterSpec).Spec.AffinityGroupIds = []string{} for _, machineConfig := range clusterSpec.CloudStackMachineConfigs { machineConfig.Spec.Affinity = "no" machineConfig.Spec.AffinityGroupIds = []string{} } validator := NewValidator(cmk, &DummyNetClient{}, true) cmk.EXPECT().ValidateZoneAndGetId(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return("4e3b338d-87a6-4189-b931-a1747edeea8f", nil) cmk.EXPECT().ValidateDomainAndGetId(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateAccountPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) cmk.EXPECT().ValidateNetworkPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) cmk.EXPECT().GetManagementApiEndpoint(gomock.Any()).AnyTimes().Return("http://127.16.0.1:8080/client/api", nil) cmk.EXPECT().ValidateTemplatePresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), clusterSpec.CloudStackDatacenter.Spec.AvailabilityZones[0].Account, testTemplate).AnyTimes() cmk.EXPECT().ValidateServiceOfferingPresent(ctx, gomock.Any(), gomock.Any(), testOffering).AnyTimes() cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateAffinityGroupsPresent(ctx, gomock.Any(), gomock.Any(), clusterSpec.CloudStackDatacenter.Spec.AvailabilityZones[0].Account, gomock.Any()).AnyTimes() // Valid affinity types err := validator.ValidateClusterMachineConfigs(ctx, clusterSpec) assert.Nil(t, err) } func TestValidateSecretsUnchangedSuccess(t *testing.T) { ctx := context.Background() mockCtrl := gomock.NewController(t) kubectl := mocks.NewMockProviderKubectlClient(mockCtrl) cmk := mocks.NewMockProviderCmkClient(mockCtrl) validator := NewValidator(cmk, &DummyNetClient{}, true) cluster := &types.Cluster{ Name: "test", } kubectl.EXPECT().GetSecretFromNamespace(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(expectedSecret, nil) err := validator.ValidateSecretsUnchanged(ctx, cluster, testExecConfig, kubectl) assert.Nil(t, err) } func TestValidateSecretsUnchangedFailureSecretChanged(t *testing.T) { ctx := context.Background() mockCtrl := gomock.NewController(t) kubectl := mocks.NewMockProviderKubectlClient(mockCtrl) cmk := mocks.NewMockProviderCmkClient(mockCtrl) validator := NewValidator(cmk, &DummyNetClient{}, true) cluster := &types.Cluster{ Name: "test", } modifiedSecret := expectedSecret.DeepCopy() modifiedSecret.Data["api-key"] = []byte("updated-api-key") kubectl.EXPECT().GetSecretFromNamespace(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(modifiedSecret, nil) err := validator.ValidateSecretsUnchanged(ctx, cluster, testExecConfig, kubectl) thenErrorExpected(t, "profile 'global' is different from the secret", err) } func TestValidateSecretsUnchangedFailureGettingSecret(t *testing.T) { ctx := context.Background() mockCtrl := gomock.NewController(t) kubectl := mocks.NewMockProviderKubectlClient(mockCtrl) cmk := mocks.NewMockProviderCmkClient(mockCtrl) validator := NewValidator(cmk, &DummyNetClient{}, true) cluster := &types.Cluster{ Name: "test", } kubectl.EXPECT().GetSecretFromNamespace(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, apierrors.NewBadRequest("test-error")) err := validator.ValidateSecretsUnchanged(ctx, cluster, testExecConfig, kubectl) thenErrorExpected(t, "getting secret for profile global: test-error", err) } func TestValidateSecretsUnchangedFailureSecretNotFound(t *testing.T) { ctx := context.Background() mockCtrl := gomock.NewController(t) kubectl := mocks.NewMockProviderKubectlClient(mockCtrl) cmk := mocks.NewMockProviderCmkClient(mockCtrl) validator := NewValidator(cmk, &DummyNetClient{}, true) cluster := &types.Cluster{ Name: "test", } kubectl.EXPECT().GetSecretFromNamespace(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, notFoundError) err := validator.ValidateSecretsUnchanged(ctx, cluster, testExecConfig, kubectl) assert.Nil(t, err) } var testProfiles = []decoder.CloudStackProfileConfig{ { Name: "global", ApiKey: "test-key1", SecretKey: "test-secret1", ManagementUrl: "http://127.16.0.1:8080/client/api", VerifySsl: "false", }, } var testExecConfig = &decoder.CloudStackExecConfig{ Profiles: testProfiles, }
431
eks-anywhere
aws
Go
package cloudstack import ( "context" "time" "github.com/go-logr/logr" "github.com/pkg/errors" cloudstackv1 "sigs.k8s.io/cluster-api-provider-cloudstack/api/v1beta2" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/clusterapi" capiyaml "github.com/aws/eks-anywhere/pkg/clusterapi/yaml" "github.com/aws/eks-anywhere/pkg/yamlutil" ) type ( // Workers represents the cloudstack specific CAPI spec for worker nodes. Workers = clusterapi.Workers[*cloudstackv1.CloudStackMachineTemplate] workersBuilder = capiyaml.WorkersBuilder[*cloudstackv1.CloudStackMachineTemplate] ) // WorkersSpec generates a cloudstack specific CAPI spec for an eks-a cluster worker nodes. // It talks to the cluster with a client to detect changes in immutable objects and generates new // names for them. func WorkersSpec(ctx context.Context, logger logr.Logger, client kubernetes.Client, spec *cluster.Spec) (*Workers, error) { templateBuilder := NewTemplateBuilder(time.Now) machineTemplateNames, kubeadmConfigTemplateNames := clusterapi.InitialTemplateNamesForWorkers(spec) workersYaml, err := templateBuilder.GenerateCAPISpecWorkers(spec, machineTemplateNames, kubeadmConfigTemplateNames) if err != nil { return nil, err } parser, builder, err := newWorkersParserAndBuilder(logger) if err != nil { return nil, err } if err = parser.Parse(workersYaml, builder); err != nil { return nil, errors.Wrap(err, "parsing cloudstack CAPI workers yaml") } workers := builder.Workers if err = workers.UpdateImmutableObjectNames(ctx, client, GetMachineTemplate, machineTemplateEqual); err != nil { return nil, errors.Wrap(err, "updating cloudstack worker immutable object names") } return workers, nil } func newWorkersParserAndBuilder(logger logr.Logger) (*yamlutil.Parser, *workersBuilder, error) { parser, builder, err := capiyaml.NewWorkersParserAndBuilder( logger, machineTemplateMapping(), ) if err != nil { return nil, nil, errors.Wrap(err, "building cloudstack workers parser and builder") } return parser, builder, nil } func machineTemplateMapping() yamlutil.Mapping[*cloudstackv1.CloudStackMachineTemplate] { return yamlutil.NewMapping( "CloudStackMachineTemplate", func() *cloudstackv1.CloudStackMachineTemplate { return &cloudstackv1.CloudStackMachineTemplate{} }, ) }
72
eks-anywhere
aws
Go
package cloudstack_test import ( "context" "encoding/json" "os" "testing" "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" cloudstackv1 "sigs.k8s.io/cluster-api-provider-cloudstack/api/v1beta2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "github.com/aws/eks-anywhere/internal/test" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/clusterapi" "github.com/aws/eks-anywhere/pkg/controller/clientutil" "github.com/aws/eks-anywhere/pkg/providers/cloudstack" "github.com/aws/eks-anywhere/pkg/utils/ptr" ) func TestWorkersSpec(t *testing.T) { logger := test.NewNullLogger() ctx := context.Background() spec := test.NewFullClusterSpec(t, "testdata/test_worker_spec.yaml") for _, tc := range []struct { Name string Configure func(*cluster.Spec) Exists func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] Expect func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] }{ // Create { Name: "Create", Expect: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(), MachineDeployment: machineDeployment(), ProviderMachineTemplate: machineTemplate(), }, } }, }, { Name: "CreateMultipleWorkerNodeGroups", Configure: func(s *cluster.Spec) { // Re-use the existing worker node group. s.Cluster.Spec.WorkerNodeGroupConfigurations = append( s.Cluster.Spec.WorkerNodeGroupConfigurations, s.Cluster.Spec.WorkerNodeGroupConfigurations[0], ) s.Cluster.Spec.WorkerNodeGroupConfigurations[1].Name = "md-1" }, Expect: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(), MachineDeployment: machineDeployment(), ProviderMachineTemplate: machineTemplate(), }, { KubeadmConfigTemplate: kubeadmConfigTemplate(func(kct *bootstrapv1.KubeadmConfigTemplate) { kct.Name = "test-md-1-1" }), MachineDeployment: machineDeployment(func(md *clusterv1.MachineDeployment) { md.Name = "test-md-1" md.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-1-1" md.Spec.Template.Spec.InfrastructureRef.Name = "test-md-1-1" }), ProviderMachineTemplate: machineTemplate(func(csmt *cloudstackv1.CloudStackMachineTemplate) { csmt.Name = "test-md-1-1" }), }, } }, }, { Name: "CreateTaints", Configure: func(s *cluster.Spec) { s.Cluster.Spec.WorkerNodeGroupConfigurations[0].Taints = []corev1.Taint{ { Key: "test-taint", Value: "value", Effect: "Effect", }, } }, Expect: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(func(kct *bootstrapv1.KubeadmConfigTemplate) { kct.Spec.Template.Spec.JoinConfiguration.NodeRegistration.Taints = []corev1.Taint{ { Key: "test-taint", Value: "value", Effect: "Effect", }, } }), MachineDeployment: machineDeployment(), ProviderMachineTemplate: machineTemplate(func(csmt *cloudstackv1.CloudStackMachineTemplate) {}), }, } }, }, { Name: "CreateDiskOffering", Configure: func(s *cluster.Spec) { s.CloudStackMachineConfigs["test"].Spec.DiskOffering = &anywherev1.CloudStackResourceDiskOffering{ CustomSize: 10, MountPath: "/mnt/sda", Device: "/dev/sda", Filesystem: "ext3", Label: "label", } }, Expect: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(), MachineDeployment: machineDeployment(), ProviderMachineTemplate: machineTemplate(func(csmt *cloudstackv1.CloudStackMachineTemplate) { spec := &csmt.Spec.Spec.Spec clientutil.AddAnnotation(csmt, "device.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1", "/dev/sda") clientutil.AddAnnotation(csmt, "filesystem.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1", "ext3") clientutil.AddAnnotation(csmt, "label.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1", "label") clientutil.AddAnnotation(csmt, "mountpath.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1", "/mnt/sda") spec.DiskOffering = cloudstackv1.CloudStackResourceDiskOffering{ CustomSize: 10, MountPath: "/mnt/sda", Device: "/dev/sda", Filesystem: "ext3", Label: "label", } }), }, } }, }, { Name: "CreateSymlinks", Configure: func(s *cluster.Spec) { s.CloudStackMachineConfigs["test"].Spec.Symlinks = map[string]string{"foo": "bar"} }, Expect: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(func(kct *bootstrapv1.KubeadmConfigTemplate) { kct.Spec.Template.Spec.PreKubeadmCommands = append( kct.Spec.Template.Spec.PreKubeadmCommands, "if [ ! -L foo ] ;\n then\n mv foo foo-$(tr -dc A-Za-z0-9 \u003c /dev/urandom | head -c 10) ;\n mkdir -p bar \u0026\u0026 ln -s bar foo ;\n else echo \"foo already symlnk\" ;\nfi", ) }), MachineDeployment: machineDeployment(), ProviderMachineTemplate: machineTemplate(func(csmt *cloudstackv1.CloudStackMachineTemplate) { clientutil.AddAnnotation(csmt, "symlinks.cloudstack.anywhere.eks.amazonaws.com/v1alpha1", "foo:bar") }), }, } }, }, { Name: "CreateAffinityGroupIDs", Configure: func(s *cluster.Spec) { s.CloudStackMachineConfigs["test"].Spec.AffinityGroupIds = []string{"affinity_group_id"} }, Expect: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(), MachineDeployment: machineDeployment(), ProviderMachineTemplate: machineTemplate(func(csmt *cloudstackv1.CloudStackMachineTemplate) { csmt.Spec.Spec.Spec.AffinityGroupIDs = []string{"affinity_group_id"} }), }, } }, }, { Name: "CreateUserCustomDetails", Configure: func(s *cluster.Spec) { s.CloudStackMachineConfigs["test"].Spec.UserCustomDetails = map[string]string{"qux": "baz"} }, Expect: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(), MachineDeployment: machineDeployment(), ProviderMachineTemplate: machineTemplate(func(csmt *cloudstackv1.CloudStackMachineTemplate) { csmt.Spec.Spec.Spec.Details = map[string]string{"qux": "baz"} }), }, } }, }, // Upgrade { Name: "UpgradeTaints", Configure: func(s *cluster.Spec) { s.Cluster.Spec.WorkerNodeGroupConfigurations[0].Taints = []corev1.Taint{ { Key: "change-taint", Value: "value", Effect: "Effect", }, } }, Exists: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(func(kct *bootstrapv1.KubeadmConfigTemplate) { kct.Spec.Template.Spec.JoinConfiguration.NodeRegistration.Taints = []corev1.Taint{ { Key: "test-taint", Value: "value", Effect: "Effect", }, } }), MachineDeployment: machineDeployment(), ProviderMachineTemplate: machineTemplate(), }, } }, Expect: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(func(kct *bootstrapv1.KubeadmConfigTemplate) { kct.Name = "test-md-0-2" kct.Spec.Template.Spec.JoinConfiguration.NodeRegistration.Taints = []corev1.Taint{ { Key: "change-taint", Value: "value", Effect: "Effect", }, } }), MachineDeployment: machineDeployment(func(md *clusterv1.MachineDeployment) { md.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-0-2" }), ProviderMachineTemplate: machineTemplate(), }, } }, }, { Name: "UpgradeComputeOffering", Configure: func(s *cluster.Spec) { s.CloudStackMachineConfigs["test"].Spec.ComputeOffering = anywherev1.CloudStackResourceIdentifier{ Name: "m4-medium", } }, Exists: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(), MachineDeployment: machineDeployment(), ProviderMachineTemplate: machineTemplate(), }, } }, Expect: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(func(kct *bootstrapv1.KubeadmConfigTemplate) { kct.Name = "test-md-0-1" }), MachineDeployment: machineDeployment(func(md *clusterv1.MachineDeployment) { md.Spec.Template.Spec.InfrastructureRef.Name = "test-md-0-2" md.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-0-1" }), ProviderMachineTemplate: machineTemplate(func(csmt *cloudstackv1.CloudStackMachineTemplate) { csmt.Name = "test-md-0-2" csmt.Spec.Spec.Spec.Offering = cloudstackv1.CloudStackResourceIdentifier{ Name: "m4-medium", } }), }, } }, }, { Name: "UpgradeDiskOffering", Configure: func(s *cluster.Spec) { s.CloudStackMachineConfigs["test"].Spec.DiskOffering = &anywherev1.CloudStackResourceDiskOffering{ CustomSize: 10, MountPath: "/mnt/sda", Device: "/dev/sda", Filesystem: "ext3", Label: "label", } }, Exists: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(), MachineDeployment: machineDeployment(), ProviderMachineTemplate: machineTemplate(), }, } }, Expect: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(), MachineDeployment: machineDeployment(func(md *clusterv1.MachineDeployment) { md.Spec.Template.Spec.InfrastructureRef.Name = "test-md-0-2" }), ProviderMachineTemplate: machineTemplate(func(csmt *cloudstackv1.CloudStackMachineTemplate) { csmt.Name = "test-md-0-2" csmt.Spec.Spec.Spec.DiskOffering = cloudstackv1.CloudStackResourceDiskOffering{ CustomSize: 10, MountPath: "/mnt/sda", Device: "/dev/sda", Filesystem: "ext3", Label: "label", } clientutil.AddAnnotation(csmt, "device.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1", "/dev/sda") clientutil.AddAnnotation(csmt, "filesystem.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1", "ext3") clientutil.AddAnnotation(csmt, "label.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1", "label") clientutil.AddAnnotation(csmt, "mountpath.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1", "/mnt/sda") }), }, } }, }, { Name: "UpgradeSymlinks", Configure: func(s *cluster.Spec) { s.CloudStackMachineConfigs["test"].Spec.Symlinks = map[string]string{"foo": "bar"} }, Exists: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(), MachineDeployment: machineDeployment(), ProviderMachineTemplate: machineTemplate(), }, } }, Expect: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(func(kct *bootstrapv1.KubeadmConfigTemplate) { kct.Name = "test-md-0-2" kct.Spec.Template.Spec.PreKubeadmCommands = append( kct.Spec.Template.Spec.PreKubeadmCommands, "if [ ! -L foo ] ;\n then\n mv foo foo-$(tr -dc A-Za-z0-9 \u003c /dev/urandom | head -c 10) ;\n mkdir -p bar \u0026\u0026 ln -s bar foo ;\n else echo \"foo already symlnk\" ;\nfi", ) }), MachineDeployment: machineDeployment(func(md *clusterv1.MachineDeployment) { md.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-0-2" }), ProviderMachineTemplate: machineTemplate(func(csmt *cloudstackv1.CloudStackMachineTemplate) { clientutil.AddAnnotation(csmt, "symlinks.cloudstack.anywhere.eks.amazonaws.com/v1alpha1", "foo:bar") }), }, } }, }, { Name: "UpgradeAffinityGroups", Configure: func(s *cluster.Spec) { s.CloudStackMachineConfigs["test"].Spec.AffinityGroupIds = []string{"changed"} }, Exists: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(), MachineDeployment: machineDeployment(), ProviderMachineTemplate: machineTemplate(), }, } }, Expect: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(), MachineDeployment: machineDeployment(func(md *clusterv1.MachineDeployment) { md.Spec.Template.Spec.InfrastructureRef.Name = "test-md-0-2" }), ProviderMachineTemplate: machineTemplate(func(csmt *cloudstackv1.CloudStackMachineTemplate) { csmt.Name = "test-md-0-2" csmt.Spec.Spec.Spec.AffinityGroupIDs = []string{"changed"} }), }, } }, }, { Name: "UpgradeUserCustomDetails", Configure: func(s *cluster.Spec) { s.CloudStackMachineConfigs["test"].Spec.UserCustomDetails = map[string]string{"qux": "baz"} }, Exists: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(), MachineDeployment: machineDeployment(), ProviderMachineTemplate: machineTemplate(func(csmt *cloudstackv1.CloudStackMachineTemplate) { csmt.Spec.Spec.Spec.Details = map[string]string{"foo": "bar"} }), }, } }, Expect: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(), MachineDeployment: machineDeployment(func(md *clusterv1.MachineDeployment) { md.Spec.Template.Spec.InfrastructureRef.Name = "test-md-0-2" }), ProviderMachineTemplate: machineTemplate(func(csmt *cloudstackv1.CloudStackMachineTemplate) { csmt.Name = "test-md-0-2" csmt.Spec.Spec.Spec.Details = map[string]string{"qux": "baz"} }), }, } }, }, // Remove { Name: "RemoveDiskOffering", Configure: func(s *cluster.Spec) { s.CloudStackMachineConfigs["test"].Spec.DiskOffering = nil }, Exists: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(), MachineDeployment: machineDeployment(func(md *clusterv1.MachineDeployment) { md.Spec.Template.Spec.InfrastructureRef.Name = "test-md-0-2" }), ProviderMachineTemplate: machineTemplate(func(csmt *cloudstackv1.CloudStackMachineTemplate) { csmt.Name = "test-md-0-2" csmt.Spec.Spec.Spec.DiskOffering = cloudstackv1.CloudStackResourceDiskOffering{ CustomSize: 10, MountPath: "/mnt/sda", Device: "/dev/sda", Filesystem: "ext3", Label: "label", } clientutil.AddAnnotation(csmt, "device.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1", "/dev/sda") clientutil.AddAnnotation(csmt, "filesystem.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1", "ext3") clientutil.AddAnnotation(csmt, "label.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1", "label") clientutil.AddAnnotation(csmt, "mountpath.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1", "/mnt/sda") }), }, } }, Expect: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(), MachineDeployment: machineDeployment(func(md *clusterv1.MachineDeployment) { md.Spec.Template.Spec.InfrastructureRef.Name = "test-md-0-3" }), ProviderMachineTemplate: machineTemplate(func(csmt *cloudstackv1.CloudStackMachineTemplate) { csmt.Name = "test-md-0-3" }), }, } }, }, { Name: "RemoveSymlinks", Configure: func(s *cluster.Spec) { s.CloudStackMachineConfigs["test"].Spec.Symlinks = nil }, Exists: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(func(kct *bootstrapv1.KubeadmConfigTemplate) { kct.Name = "test-md-0-2" kct.Spec.Template.Spec.PreKubeadmCommands = append( kct.Spec.Template.Spec.PreKubeadmCommands, "if [ ! -L foo ] ;\n then\n mv foo foo-$(tr -dc A-Za-z0-9 \u003c /dev/urandom | head -c 10) ;\n mkdir -p bar \u0026\u0026 ln -s bar foo ;\n else echo \"foo already symlnk\" ;\nfi", ) }), MachineDeployment: machineDeployment(func(md *clusterv1.MachineDeployment) { md.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-0-2" }), ProviderMachineTemplate: machineTemplate(func(csmt *cloudstackv1.CloudStackMachineTemplate) { clientutil.AddAnnotation(csmt, "symlinks.cloudstack.anywhere.eks.amazonaws.com/v1alpha1", "foo:bar") }), }, } }, Expect: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(func(kct *bootstrapv1.KubeadmConfigTemplate) { kct.Name = "test-md-0-2" }), MachineDeployment: machineDeployment(func(md *clusterv1.MachineDeployment) { md.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-0-2" }), ProviderMachineTemplate: machineTemplate(), }, } }, }, { Name: "RemoveAffinityGroups", Configure: func(s *cluster.Spec) { s.CloudStackMachineConfigs["test"].Spec.AffinityGroupIds = nil }, Exists: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(), MachineDeployment: machineDeployment(), ProviderMachineTemplate: machineTemplate(func(csmt *cloudstackv1.CloudStackMachineTemplate) { csmt.Spec.Spec.Spec.AffinityGroupIDs = []string{"affinity_group_id"} }), }, } }, Expect: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(), MachineDeployment: machineDeployment(func(md *clusterv1.MachineDeployment) { md.Spec.Template.Spec.InfrastructureRef.Name = "test-md-0-2" }), ProviderMachineTemplate: machineTemplate(func(csmt *cloudstackv1.CloudStackMachineTemplate) { csmt.Name = "test-md-0-2" csmt.Spec.Spec.Spec.AffinityGroupIDs = nil }), }, } }, }, { Name: "RemoveUserCustomDetails", Configure: func(s *cluster.Spec) { s.CloudStackMachineConfigs["test"].Spec.UserCustomDetails = nil }, Exists: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(), MachineDeployment: machineDeployment(), ProviderMachineTemplate: machineTemplate(func(csmt *cloudstackv1.CloudStackMachineTemplate) { csmt.Spec.Spec.Spec.Details = map[string]string{"foo": "bar"} }), }, } }, Expect: func() []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate] { return []clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ { KubeadmConfigTemplate: kubeadmConfigTemplate(), MachineDeployment: machineDeployment(func(md *clusterv1.MachineDeployment) { md.Spec.Template.Spec.InfrastructureRef.Name = "test-md-0-2" }), ProviderMachineTemplate: machineTemplate(func(csmt *cloudstackv1.CloudStackMachineTemplate) { csmt.Name = "test-md-0-2" csmt.Spec.Spec.Spec.Details = nil }), }, } }, }, } { t.Run(tc.Name, func(t *testing.T) { g := NewWithT(t) // Copy the foundational spec already read from disk so we don't pollute tests. spec := spec.DeepCopy() if tc.Configure != nil { tc.Configure(spec) } // Build a client with all the objects that should already exist in the cluster. var objects []kubernetes.Object if tc.Exists != nil { for _, group := range tc.Exists() { objects = append(objects, group.Objects()...) } } client := test.NewFakeKubeClient(clientutil.ObjectsToClientObjects(objects)...) expect := tc.Expect() workers, err := cloudstack.WorkersSpec(ctx, logger, client, spec) g.Expect(err).NotTo(HaveOccurred()) // Optionally dump expect and got. This proved useful in debugging as the Ginkgo output // gets truncated. Compare the files in your IDE. if os.Getenv("T_DUMP") == "true" { expectGroups, _ := json.MarshalIndent(expect, "", "\t") receivedGroups, _ := json.MarshalIndent(workers.Groups, "", "\t") _ = os.WriteFile("groups_expected.json", expectGroups, 0o666) _ = os.WriteFile("groups_received.json", receivedGroups, 0o666) _ = os.WriteFile("groups_expected_received.diff", []byte(cmp.Diff(expectGroups, receivedGroups)), 0o666) } g.Expect(workers).NotTo(BeNil()) g.Expect(workers.Groups).To(HaveLen(len(expect))) for _, e := range expect { g.Expect(workers.Groups).To(ContainElement(e)) } }) } } func TestWorkersSpecErrorFromClient(t *testing.T) { g := NewWithT(t) logger := test.NewNullLogger() ctx := context.Background() spec := test.NewFullClusterSpec(t, "testdata/cluster_main_multiple_worker_node_groups.yaml") client := test.NewFakeKubeClientAlwaysError() _, err := cloudstack.WorkersSpec(ctx, logger, client, spec) g.Expect(err).To(MatchError(ContainSubstring("updating cloudstack worker immutable object names"))) } func TestWorkersSpecMachineTemplateNotFound(t *testing.T) { g := NewWithT(t) logger := test.NewNullLogger() ctx := context.Background() spec := test.NewFullClusterSpec(t, "testdata/cluster_main_multiple_worker_node_groups.yaml") client := test.NewFakeKubeClient(machineDeployment()) _, err := cloudstack.WorkersSpec(ctx, logger, client, spec) g.Expect(err).NotTo(HaveOccurred()) } func TestWorkersSpecRegistryMirrorConfiguration(t *testing.T) { g := NewWithT(t) logger := test.NewNullLogger() ctx := context.Background() spec := test.NewFullClusterSpec(t, "testdata/cluster_main_multiple_worker_node_groups.yaml") client := test.NewFakeKubeClient() tests := []struct { name string mirrorConfig *anywherev1.RegistryMirrorConfiguration files []bootstrapv1.File }{ { name: "insecure skip verify", mirrorConfig: test.RegistryMirrorInsecureSkipVerifyEnabled(), files: test.RegistryMirrorConfigFilesInsecureSkipVerify(), }, { name: "insecure skip verify with ca cert", mirrorConfig: test.RegistryMirrorInsecureSkipVerifyEnabledAndCACert(), files: test.RegistryMirrorConfigFilesInsecureSkipVerifyAndCACert(), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { spec.Cluster.Spec.RegistryMirrorConfiguration = tt.mirrorConfig workers, err := cloudstack.WorkersSpec(ctx, logger, client, spec) g.Expect(err).NotTo(HaveOccurred()) g.Expect(workers).NotTo(BeNil()) g.Expect(workers.Groups).To(HaveLen(2)) g.Expect(workers.Groups).To(ConsistOf( clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ KubeadmConfigTemplate: kubeadmConfigTemplate(func(kct *bootstrapv1.KubeadmConfigTemplate) { kct.Spec.Template.Spec.Files = append(kct.Spec.Template.Spec.Files, tt.files...) preKubeadmCommands := append([]string{"swapoff -a"}, test.RegistryMirrorSudoPreKubeadmCommands()...) kct.Spec.Template.Spec.PreKubeadmCommands = append(preKubeadmCommands, kct.Spec.Template.Spec.PreKubeadmCommands[1:]...) }), MachineDeployment: machineDeployment(), ProviderMachineTemplate: machineTemplate(), }, clusterapi.WorkerGroup[*cloudstackv1.CloudStackMachineTemplate]{ KubeadmConfigTemplate: kubeadmConfigTemplate( func(kct *bootstrapv1.KubeadmConfigTemplate) { kct.Name = "test-md-1-1" kct.Spec.Template.Spec.Files = append(kct.Spec.Template.Spec.Files, tt.files...) preKubeadmCommands := append([]string{"swapoff -a"}, test.RegistryMirrorSudoPreKubeadmCommands()...) kct.Spec.Template.Spec.PreKubeadmCommands = append(preKubeadmCommands, kct.Spec.Template.Spec.PreKubeadmCommands[1:]...) }, ), MachineDeployment: machineDeployment( func(md *clusterv1.MachineDeployment) { md.Name = "test-md-1" md.Spec.Template.Spec.InfrastructureRef.Name = "test-md-1-1" md.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-1-1" md.Spec.Replicas = ptr.Int32(2) }, ), ProviderMachineTemplate: machineTemplate( func(vmt *cloudstackv1.CloudStackMachineTemplate) { vmt.Name = "test-md-1-1" }, ), }, )) }) } } func machineDeployment(opts ...func(*clusterv1.MachineDeployment)) *clusterv1.MachineDeployment { o := &clusterv1.MachineDeployment{ TypeMeta: metav1.TypeMeta{ Kind: "MachineDeployment", APIVersion: "cluster.x-k8s.io/v1beta1", }, ObjectMeta: metav1.ObjectMeta{ Name: "test-md-0", Namespace: "eksa-system", Labels: map[string]string{"cluster.x-k8s.io/cluster-name": "test"}, }, Spec: clusterv1.MachineDeploymentSpec{ ClusterName: "test", Replicas: ptr.Int32(3), Selector: metav1.LabelSelector{ MatchLabels: map[string]string{}, }, Template: clusterv1.MachineTemplateSpec{ ObjectMeta: clusterv1.ObjectMeta{ Labels: map[string]string{"cluster.x-k8s.io/cluster-name": "test"}, }, Spec: clusterv1.MachineSpec{ ClusterName: "test", Bootstrap: clusterv1.Bootstrap{ ConfigRef: &corev1.ObjectReference{ Kind: "KubeadmConfigTemplate", Name: "test-md-0-1", APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", }, }, InfrastructureRef: corev1.ObjectReference{ Kind: "CloudStackMachineTemplate", Name: "test-md-0-1", APIVersion: "infrastructure.cluster.x-k8s.io/v1beta2", }, Version: ptr.String("v1.21.2-eks-1-21-4"), }, }, }, } for _, opt := range opts { opt(o) } return o } func kubeadmConfigTemplate(opts ...func(*bootstrapv1.KubeadmConfigTemplate)) *bootstrapv1.KubeadmConfigTemplate { o := &bootstrapv1.KubeadmConfigTemplate{ TypeMeta: metav1.TypeMeta{ Kind: "KubeadmConfigTemplate", APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", }, ObjectMeta: metav1.ObjectMeta{ Name: "test-md-0-1", Namespace: "eksa-system", }, Spec: bootstrapv1.KubeadmConfigTemplateSpec{ Template: bootstrapv1.KubeadmConfigTemplateResource{ Spec: bootstrapv1.KubeadmConfigSpec{ JoinConfiguration: &bootstrapv1.JoinConfiguration{ NodeRegistration: bootstrapv1.NodeRegistrationOptions{ Name: "{{ ds.meta_data.hostname }}", CRISocket: "/var/run/containerd/containerd.sock", Taints: []corev1.Taint{ { Key: "key2", Value: "val2", Effect: "PreferNoSchedule", TimeAdded: nil, }, }, KubeletExtraArgs: map[string]string{ "anonymous-auth": "false", "provider-id": "cloudstack:///'{{ ds.meta_data.instance_id }}'", "read-only-port": "0", "tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", }, }, }, PreKubeadmCommands: []string{ `swapoff -a`, `hostname "{{ ds.meta_data.hostname }}"`, `echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts`, `echo "127.0.0.1 localhost" >>/etc/hosts`, `echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts`, `echo "{{ ds.meta_data.hostname }}" >/etc/hostname`, }, Users: []bootstrapv1.User{ { Name: "mySshUsername", Sudo: ptr.String("ALL=(ALL) NOPASSWD:ALL"), SSHAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ=="}, }, }, Format: bootstrapv1.Format("cloud-config"), }, }, }, } for _, opt := range opts { opt(o) } return o } func machineTemplate(opts ...func(*cloudstackv1.CloudStackMachineTemplate)) *cloudstackv1.CloudStackMachineTemplate { o := &cloudstackv1.CloudStackMachineTemplate{ TypeMeta: metav1.TypeMeta{ Kind: "CloudStackMachineTemplate", APIVersion: "infrastructure.cluster.x-k8s.io/v1beta2", }, ObjectMeta: metav1.ObjectMeta{ Name: "test-md-0-1", Namespace: "eksa-system", }, Spec: cloudstackv1.CloudStackMachineTemplateSpec{ Spec: cloudstackv1.CloudStackMachineTemplateResource{ Spec: cloudstackv1.CloudStackMachineSpec{ Details: map[string]string{"foo": "bar"}, Offering: cloudstackv1.CloudStackResourceIdentifier{ Name: "m4-large", }, Template: cloudstackv1.CloudStackResourceIdentifier{ ID: "", Name: "centos7-k8s-118", }, AffinityGroupIDs: []string{"worker-affinity"}, Affinity: "", }, }, }, } for _, opt := range opts { opt(o) } return o }
853
eks-anywhere
aws
Go
package decoder import ( b64 "encoding/base64" "fmt" "os" "strconv" "strings" "gopkg.in/ini.v1" apiv1 "k8s.io/api/core/v1" ) const ( EksacloudStackCloudConfigB64SecretKey = "EKSA_CLOUDSTACK_B64ENCODED_SECRET" CloudStackCloudConfigB64SecretKey = "CLOUDSTACK_B64ENCODED_SECRET" EksaCloudStackHostPathToMount = "EKSA_CLOUDSTACK_HOST_PATHS_TO_MOUNT" defaultVerifySslValue = "true" CloudStackGlobalAZ = "global" APIKeyKey = "api-key" SecretKeyKey = "secret-key" APIUrlKey = "api-url" VerifySslKey = "verify-ssl" ) // ParseCloudStackCredsFromSecrets parses a list of secrets to extract out the api keys, secret keys, and urls. func ParseCloudStackCredsFromSecrets(secrets []apiv1.Secret) (*CloudStackExecConfig, error) { if len(secrets) == 0 { return nil, fmt.Errorf("no secrets provided - unable to generate CloudStackExecConfig") } cloudstackProfiles := make([]CloudStackProfileConfig, 0, len(secrets)) for _, secret := range secrets { apiKey, ok := secret.Data[APIKeyKey] if !ok { return nil, fmt.Errorf("secret %s is missing required key %s", secret.Name, APIKeyKey) } secretKey, ok := secret.Data[SecretKeyKey] if !ok { return nil, fmt.Errorf("secret %s is missing required key %s", secret.Name, SecretKeyKey) } apiURL, ok := secret.Data[APIUrlKey] if !ok { return nil, fmt.Errorf("secret %s is missing required key %s", secret.Name, APIUrlKey) } verifySsl, ok := secret.Data[VerifySslKey] if !ok { verifySsl = []byte(defaultVerifySslValue) } cloudstackProfiles = append( cloudstackProfiles, CloudStackProfileConfig{ Name: secret.Name, ApiKey: string(apiKey), SecretKey: string(secretKey), ManagementUrl: string(apiURL), VerifySsl: string(verifySsl), }, ) } return &CloudStackExecConfig{ Profiles: cloudstackProfiles, }, nil } // ParseCloudStackCredsFromEnv parses the input b64 string into the ini object to extract out the api key, secret key, and url. func ParseCloudStackCredsFromEnv() (*CloudStackExecConfig, error) { cloudStackB64EncodedSecret, ok := os.LookupEnv(EksacloudStackCloudConfigB64SecretKey) if !ok { return nil, fmt.Errorf("%s is not set or is empty", EksacloudStackCloudConfigB64SecretKey) } decodedString, err := b64.StdEncoding.DecodeString(cloudStackB64EncodedSecret) if err != nil { return nil, fmt.Errorf("decoding value for %s with base64: %v", EksacloudStackCloudConfigB64SecretKey, err) } cfg, err := ini.Load(decodedString) if err != nil { return nil, fmt.Errorf("extracting values from %s with ini: %v", EksacloudStackCloudConfigB64SecretKey, err) } var cloudstackProfiles []CloudStackProfileConfig sections := cfg.Sections() for _, section := range sections { if section.Name() == "DEFAULT" { continue } profile, err := parseCloudStackProfileSection(section) if err != nil { return nil, err } cloudstackProfiles = append(cloudstackProfiles, *profile) } if len(cloudstackProfiles) == 0 { return nil, fmt.Errorf("no instance found from %s", EksacloudStackCloudConfigB64SecretKey) } return &CloudStackExecConfig{ Profiles: cloudstackProfiles, }, nil } func parseCloudStackProfileSection(section *ini.Section) (*CloudStackProfileConfig, error) { apiKey, err := section.GetKey(APIKeyKey) if err != nil { return nil, fmt.Errorf("extracting value of 'api-key' from %s: %v", section.Name(), err) } secretKey, err := section.GetKey(SecretKeyKey) if err != nil { return nil, fmt.Errorf("extracting value of 'secret-key' from %s: %v", EksacloudStackCloudConfigB64SecretKey, err) } apiURL, err := section.GetKey(APIUrlKey) if err != nil { return nil, fmt.Errorf("extracting value of 'api-url' from %s: %v", EksacloudStackCloudConfigB64SecretKey, err) } verifySslValue := defaultVerifySslValue if verifySsl, err := section.GetKey(VerifySslKey); err == nil { verifySslValue = verifySsl.Value() if _, err := strconv.ParseBool(verifySslValue); err != nil { return nil, fmt.Errorf("'verify-ssl' has invalid boolean string %s: %v", verifySslValue, err) } } return &CloudStackProfileConfig{ Name: strings.ToLower(section.Name()), ApiKey: apiKey.Value(), SecretKey: secretKey.Value(), ManagementUrl: apiURL.Value(), VerifySsl: verifySslValue, }, nil } type CloudStackExecConfig struct { Profiles []CloudStackProfileConfig } type CloudStackProfileConfig struct { Name string ApiKey string SecretKey string ManagementUrl string VerifySsl string Timeout string }
146
eks-anywhere
aws
Go
package decoder_test import ( _ "embed" "encoding/base64" "os" "reflect" "testing" . "github.com/onsi/gomega" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder" ) type testContext struct { oldCloudStackCloudConfigSecret string isCloudStackCloudConfigSecretSet bool } func (tctx *testContext) backupContext() { tctx.oldCloudStackCloudConfigSecret, tctx.isCloudStackCloudConfigSecretSet = os.LookupEnv(decoder.EksacloudStackCloudConfigB64SecretKey) } func (tctx *testContext) restoreContext() { if tctx.isCloudStackCloudConfigSecretSet { os.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, tctx.oldCloudStackCloudConfigSecret) } } func TestCloudStackConfigDecoderFromEnv(t *testing.T) { tests := []struct { name string configFile string wantErr bool wantConfig *decoder.CloudStackExecConfig }{ { name: "Valid config", configFile: "../testdata/cloudstack_config_valid.ini", wantErr: false, wantConfig: &decoder.CloudStackExecConfig{ Profiles: []decoder.CloudStackProfileConfig{ { Name: decoder.CloudStackGlobalAZ, ApiKey: "test-key1", SecretKey: "test-secret1", ManagementUrl: "http://127.16.0.1:8080/client/api", VerifySsl: "false", Timeout: "", }, }, }, }, { name: "Multiple profiles config", configFile: "../testdata/cloudstack_config_multiple_profiles.ini", wantErr: false, wantConfig: &decoder.CloudStackExecConfig{ Profiles: []decoder.CloudStackProfileConfig{ { Name: decoder.CloudStackGlobalAZ, ApiKey: "test-key1", SecretKey: "test-secret1", ManagementUrl: "http://127.16.0.1:8080/client/api", VerifySsl: "false", }, { Name: "instance2", ApiKey: "test-key2", SecretKey: "test-secret2", ManagementUrl: "http://127.16.0.2:8080/client/api", VerifySsl: "true", Timeout: "", }, }, }, }, { name: "Missing apikey", configFile: "../testdata/cloudstack_config_missing_apikey.ini", wantErr: true, }, { name: "Missing secretkey", configFile: "../testdata/cloudstack_config_missing_secretkey.ini", wantErr: true, }, { name: "Missing apiurl", configFile: "../testdata/cloudstack_config_missing_apiurl.ini", wantErr: true, }, { name: "Missing verifyssl", configFile: "../testdata/cloudstack_config_missing_verifyssl.ini", wantErr: false, wantConfig: &decoder.CloudStackExecConfig{ Profiles: []decoder.CloudStackProfileConfig{ { Name: decoder.CloudStackGlobalAZ, ApiKey: "test-key1", SecretKey: "test-secret1", ManagementUrl: "http://127.16.0.1:8080/client/api", VerifySsl: "true", Timeout: "", }, }, }, }, { name: "Invalid INI format", configFile: "../testdata/cloudstack_config_invalid_format.ini", wantErr: true, }, { name: "Invalid veryfyssl value", configFile: "../testdata/cloudstack_config_invalid_verifyssl.ini", wantErr: true, }, { name: "No sections", configFile: "../testdata/cloudstack_config_no_sections.ini", wantErr: true, }, } for _, tc := range tests { t.Run(tc.name, func(tt *testing.T) { g := NewWithT(t) configString := test.ReadFile(t, tc.configFile) encodedConfig := base64.StdEncoding.EncodeToString([]byte(configString)) tt.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, encodedConfig) gotConfig, err := decoder.ParseCloudStackCredsFromEnv() if tc.wantErr { g.Expect(err).NotTo(BeNil()) } else { g.Expect(err).To(BeNil()) if !reflect.DeepEqual(tc.wantConfig, gotConfig) { t.Errorf("%v got = %v, want %v", tc.name, gotConfig, tc.wantConfig) } } }) } } func TestCloudStackConfigDecoderFromSecrets(t *testing.T) { tests := []struct { name string secrets []apiv1.Secret wantErr bool wantConfig *decoder.CloudStackExecConfig }{ { name: "Valid config", secrets: []apiv1.Secret{ { ObjectMeta: v1.ObjectMeta{Name: "global"}, Data: map[string][]byte{ decoder.APIKeyKey: []byte("test-key1"), decoder.APIUrlKey: []byte("http://127.16.0.1:8080/client/api"), decoder.SecretKeyKey: []byte("test-secret1"), decoder.VerifySslKey: []byte("false"), }, }, }, wantErr: false, wantConfig: &decoder.CloudStackExecConfig{ Profiles: []decoder.CloudStackProfileConfig{ { Name: decoder.CloudStackGlobalAZ, ApiKey: "test-key1", SecretKey: "test-secret1", ManagementUrl: "http://127.16.0.1:8080/client/api", VerifySsl: "false", Timeout: "", }, }, }, }, { name: "Empty config", secrets: []apiv1.Secret{ { ObjectMeta: v1.ObjectMeta{Name: "global"}, Data: map[string][]byte{}, }, }, wantErr: true, wantConfig: nil, }, { name: "Missing apikey", secrets: []apiv1.Secret{ { ObjectMeta: v1.ObjectMeta{Name: "global"}, Data: map[string][]byte{ decoder.APIUrlKey: []byte("http://127.16.0.1:8080/client/api"), decoder.SecretKeyKey: []byte("test-secret1"), decoder.VerifySslKey: []byte("false"), }, }, }, wantErr: true, wantConfig: nil, }, { name: "Missing api url", secrets: []apiv1.Secret{ { ObjectMeta: v1.ObjectMeta{Name: "global"}, Data: map[string][]byte{ decoder.APIKeyKey: []byte("test-key1"), decoder.SecretKeyKey: []byte("test-secret1"), decoder.VerifySslKey: []byte("false"), }, }, }, wantErr: true, wantConfig: nil, }, { name: "Missing secret key", secrets: []apiv1.Secret{ { ObjectMeta: v1.ObjectMeta{Name: "global"}, Data: map[string][]byte{ decoder.APIKeyKey: []byte("test-key1"), decoder.APIUrlKey: []byte("http://127.16.0.1:8080/client/api"), decoder.VerifySslKey: []byte("false"), }, }, }, wantErr: true, wantConfig: nil, }, { name: "Missing verify ssl", secrets: []apiv1.Secret{ { ObjectMeta: v1.ObjectMeta{Name: "global"}, Data: map[string][]byte{ decoder.APIKeyKey: []byte("test-key1"), decoder.SecretKeyKey: []byte("test-secret1"), decoder.APIUrlKey: []byte("http://127.16.0.1:8080/client/api"), }, }, }, wantErr: false, wantConfig: &decoder.CloudStackExecConfig{ Profiles: []decoder.CloudStackProfileConfig{ { Name: decoder.CloudStackGlobalAZ, ApiKey: "test-key1", SecretKey: "test-secret1", ManagementUrl: "http://127.16.0.1:8080/client/api", VerifySsl: "true", Timeout: "", }, }, }, }, } for _, tc := range tests { t.Run(tc.name, func(tt *testing.T) { g := NewWithT(t) gotConfig, err := decoder.ParseCloudStackCredsFromSecrets(tc.secrets) if tc.wantErr { g.Expect(err).NotTo(BeNil()) } else { g.Expect(err).To(BeNil()) if !reflect.DeepEqual(tc.wantConfig, gotConfig) { t.Errorf("%v got = %v, want %v", tc.name, gotConfig, tc.wantConfig) } } }) } } func TestCloudStackConfigDecoderInvalidEncoding(t *testing.T) { g := NewWithT(t) t.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, "xxx") _, err := decoder.ParseCloudStackCredsFromEnv() g.Expect(err).NotTo(BeNil()) } func TestCloudStackConfigDecoderNoEnvVariable(t *testing.T) { var tctx testContext tctx.backupContext() os.Clearenv() g := NewWithT(t) _, err := decoder.ParseCloudStackCredsFromEnv() g.Expect(err).NotTo(BeNil()) tctx.restoreContext() }
303
eks-anywhere
aws
Go
// Code generated by MockGen. DO NOT EDIT. // Source: github.com/aws/eks-anywhere/pkg/providers/cloudstack (interfaces: ProviderCmkClient,ProviderKubectlClient) // Package mocks is a generated GoMock package. package mocks import ( context "context" reflect "reflect" v1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" executables "github.com/aws/eks-anywhere/pkg/executables" types "github.com/aws/eks-anywhere/pkg/types" v1beta1 "github.com/aws/etcdadm-controller/api/v1beta1" gomock "github.com/golang/mock/gomock" v1 "k8s.io/api/core/v1" v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" v1beta11 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" ) // MockProviderCmkClient is a mock of ProviderCmkClient interface. type MockProviderCmkClient struct { ctrl *gomock.Controller recorder *MockProviderCmkClientMockRecorder } // MockProviderCmkClientMockRecorder is the mock recorder for MockProviderCmkClient. type MockProviderCmkClientMockRecorder struct { mock *MockProviderCmkClient } // NewMockProviderCmkClient creates a new mock instance. func NewMockProviderCmkClient(ctrl *gomock.Controller) *MockProviderCmkClient { mock := &MockProviderCmkClient{ctrl: ctrl} mock.recorder = &MockProviderCmkClientMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockProviderCmkClient) EXPECT() *MockProviderCmkClientMockRecorder { return m.recorder } // GetManagementApiEndpoint mocks base method. func (m *MockProviderCmkClient) GetManagementApiEndpoint(arg0 string) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetManagementApiEndpoint", arg0) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // GetManagementApiEndpoint indicates an expected call of GetManagementApiEndpoint. func (mr *MockProviderCmkClientMockRecorder) GetManagementApiEndpoint(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetManagementApiEndpoint", reflect.TypeOf((*MockProviderCmkClient)(nil).GetManagementApiEndpoint), arg0) } // ValidateAccountPresent mocks base method. func (m *MockProviderCmkClient) ValidateAccountPresent(arg0 context.Context, arg1, arg2, arg3 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateAccountPresent", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // ValidateAccountPresent indicates an expected call of ValidateAccountPresent. func (mr *MockProviderCmkClientMockRecorder) ValidateAccountPresent(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateAccountPresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateAccountPresent), arg0, arg1, arg2, arg3) } // ValidateAffinityGroupsPresent mocks base method. func (m *MockProviderCmkClient) ValidateAffinityGroupsPresent(arg0 context.Context, arg1, arg2, arg3 string, arg4 []string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateAffinityGroupsPresent", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(error) return ret0 } // ValidateAffinityGroupsPresent indicates an expected call of ValidateAffinityGroupsPresent. func (mr *MockProviderCmkClientMockRecorder) ValidateAffinityGroupsPresent(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateAffinityGroupsPresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateAffinityGroupsPresent), arg0, arg1, arg2, arg3, arg4) } // ValidateDiskOfferingPresent mocks base method. func (m *MockProviderCmkClient) ValidateDiskOfferingPresent(arg0 context.Context, arg1, arg2 string, arg3 v1alpha1.CloudStackResourceDiskOffering) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateDiskOfferingPresent", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // ValidateDiskOfferingPresent indicates an expected call of ValidateDiskOfferingPresent. func (mr *MockProviderCmkClientMockRecorder) ValidateDiskOfferingPresent(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateDiskOfferingPresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateDiskOfferingPresent), arg0, arg1, arg2, arg3) } // ValidateDomainAndGetId mocks base method. func (m *MockProviderCmkClient) ValidateDomainAndGetId(arg0 context.Context, arg1, arg2 string) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateDomainAndGetId", arg0, arg1, arg2) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // ValidateDomainAndGetId indicates an expected call of ValidateDomainAndGetId. func (mr *MockProviderCmkClientMockRecorder) ValidateDomainAndGetId(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateDomainAndGetId", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateDomainAndGetId), arg0, arg1, arg2) } // ValidateNetworkPresent mocks base method. func (m *MockProviderCmkClient) ValidateNetworkPresent(arg0 context.Context, arg1, arg2 string, arg3 v1alpha1.CloudStackResourceIdentifier, arg4, arg5 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateNetworkPresent", arg0, arg1, arg2, arg3, arg4, arg5) ret0, _ := ret[0].(error) return ret0 } // ValidateNetworkPresent indicates an expected call of ValidateNetworkPresent. func (mr *MockProviderCmkClientMockRecorder) ValidateNetworkPresent(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateNetworkPresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateNetworkPresent), arg0, arg1, arg2, arg3, arg4, arg5) } // ValidateServiceOfferingPresent mocks base method. func (m *MockProviderCmkClient) ValidateServiceOfferingPresent(arg0 context.Context, arg1, arg2 string, arg3 v1alpha1.CloudStackResourceIdentifier) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateServiceOfferingPresent", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // ValidateServiceOfferingPresent indicates an expected call of ValidateServiceOfferingPresent. func (mr *MockProviderCmkClientMockRecorder) ValidateServiceOfferingPresent(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateServiceOfferingPresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateServiceOfferingPresent), arg0, arg1, arg2, arg3) } // ValidateTemplatePresent mocks base method. func (m *MockProviderCmkClient) ValidateTemplatePresent(arg0 context.Context, arg1, arg2, arg3, arg4 string, arg5 v1alpha1.CloudStackResourceIdentifier) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateTemplatePresent", arg0, arg1, arg2, arg3, arg4, arg5) ret0, _ := ret[0].(error) return ret0 } // ValidateTemplatePresent indicates an expected call of ValidateTemplatePresent. func (mr *MockProviderCmkClientMockRecorder) ValidateTemplatePresent(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateTemplatePresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateTemplatePresent), arg0, arg1, arg2, arg3, arg4, arg5) } // ValidateZoneAndGetId mocks base method. func (m *MockProviderCmkClient) ValidateZoneAndGetId(arg0 context.Context, arg1 string, arg2 v1alpha1.CloudStackZone) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateZoneAndGetId", arg0, arg1, arg2) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // ValidateZoneAndGetId indicates an expected call of ValidateZoneAndGetId. func (mr *MockProviderCmkClientMockRecorder) ValidateZoneAndGetId(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateZoneAndGetId", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateZoneAndGetId), arg0, arg1, arg2) } // MockProviderKubectlClient is a mock of ProviderKubectlClient interface. type MockProviderKubectlClient struct { ctrl *gomock.Controller recorder *MockProviderKubectlClientMockRecorder } // MockProviderKubectlClientMockRecorder is the mock recorder for MockProviderKubectlClient. type MockProviderKubectlClientMockRecorder struct { mock *MockProviderKubectlClient } // NewMockProviderKubectlClient creates a new mock instance. func NewMockProviderKubectlClient(ctrl *gomock.Controller) *MockProviderKubectlClient { mock := &MockProviderKubectlClient{ctrl: ctrl} mock.recorder = &MockProviderKubectlClientMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockProviderKubectlClient) EXPECT() *MockProviderKubectlClientMockRecorder { return m.recorder } // ApplyKubeSpecFromBytes mocks base method. func (m *MockProviderKubectlClient) ApplyKubeSpecFromBytes(arg0 context.Context, arg1 *types.Cluster, arg2 []byte) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ApplyKubeSpecFromBytes", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // ApplyKubeSpecFromBytes indicates an expected call of ApplyKubeSpecFromBytes. func (mr *MockProviderKubectlClientMockRecorder) ApplyKubeSpecFromBytes(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyKubeSpecFromBytes", reflect.TypeOf((*MockProviderKubectlClient)(nil).ApplyKubeSpecFromBytes), arg0, arg1, arg2) } // CreateNamespaceIfNotPresent mocks base method. func (m *MockProviderKubectlClient) CreateNamespaceIfNotPresent(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateNamespaceIfNotPresent", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // CreateNamespaceIfNotPresent indicates an expected call of CreateNamespaceIfNotPresent. func (mr *MockProviderKubectlClientMockRecorder) CreateNamespaceIfNotPresent(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNamespaceIfNotPresent", reflect.TypeOf((*MockProviderKubectlClient)(nil).CreateNamespaceIfNotPresent), arg0, arg1, arg2) } // DeleteEksaCloudStackDatacenterConfig mocks base method. func (m *MockProviderKubectlClient) DeleteEksaCloudStackDatacenterConfig(arg0 context.Context, arg1, arg2, arg3 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteEksaCloudStackDatacenterConfig", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // DeleteEksaCloudStackDatacenterConfig indicates an expected call of DeleteEksaCloudStackDatacenterConfig. func (mr *MockProviderKubectlClientMockRecorder) DeleteEksaCloudStackDatacenterConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteEksaCloudStackDatacenterConfig", reflect.TypeOf((*MockProviderKubectlClient)(nil).DeleteEksaCloudStackDatacenterConfig), arg0, arg1, arg2, arg3) } // DeleteEksaCloudStackMachineConfig mocks base method. func (m *MockProviderKubectlClient) DeleteEksaCloudStackMachineConfig(arg0 context.Context, arg1, arg2, arg3 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteEksaCloudStackMachineConfig", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // DeleteEksaCloudStackMachineConfig indicates an expected call of DeleteEksaCloudStackMachineConfig. func (mr *MockProviderKubectlClientMockRecorder) DeleteEksaCloudStackMachineConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteEksaCloudStackMachineConfig", reflect.TypeOf((*MockProviderKubectlClient)(nil).DeleteEksaCloudStackMachineConfig), arg0, arg1, arg2, arg3) } // GetEksaCloudStackDatacenterConfig mocks base method. func (m *MockProviderKubectlClient) GetEksaCloudStackDatacenterConfig(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.CloudStackDatacenterConfig, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetEksaCloudStackDatacenterConfig", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*v1alpha1.CloudStackDatacenterConfig) ret1, _ := ret[1].(error) return ret0, ret1 } // GetEksaCloudStackDatacenterConfig indicates an expected call of GetEksaCloudStackDatacenterConfig. func (mr *MockProviderKubectlClientMockRecorder) GetEksaCloudStackDatacenterConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaCloudStackDatacenterConfig", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetEksaCloudStackDatacenterConfig), arg0, arg1, arg2, arg3) } // GetEksaCloudStackMachineConfig mocks base method. func (m *MockProviderKubectlClient) GetEksaCloudStackMachineConfig(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.CloudStackMachineConfig, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetEksaCloudStackMachineConfig", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*v1alpha1.CloudStackMachineConfig) ret1, _ := ret[1].(error) return ret0, ret1 } // GetEksaCloudStackMachineConfig indicates an expected call of GetEksaCloudStackMachineConfig. func (mr *MockProviderKubectlClientMockRecorder) GetEksaCloudStackMachineConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaCloudStackMachineConfig", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetEksaCloudStackMachineConfig), arg0, arg1, arg2, arg3) } // GetEksaCluster mocks base method. func (m *MockProviderKubectlClient) GetEksaCluster(arg0 context.Context, arg1 *types.Cluster, arg2 string) (*v1alpha1.Cluster, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetEksaCluster", arg0, arg1, arg2) ret0, _ := ret[0].(*v1alpha1.Cluster) ret1, _ := ret[1].(error) return ret0, ret1 } // GetEksaCluster indicates an expected call of GetEksaCluster. func (mr *MockProviderKubectlClientMockRecorder) GetEksaCluster(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaCluster", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetEksaCluster), arg0, arg1, arg2) } // GetEtcdadmCluster mocks base method. func (m *MockProviderKubectlClient) GetEtcdadmCluster(arg0 context.Context, arg1 *types.Cluster, arg2 string, arg3 ...executables.KubectlOpt) (*v1beta1.EtcdadmCluster, error) { m.ctrl.T.Helper() varargs := []interface{}{arg0, arg1, arg2} for _, a := range arg3 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "GetEtcdadmCluster", varargs...) ret0, _ := ret[0].(*v1beta1.EtcdadmCluster) ret1, _ := ret[1].(error) return ret0, ret1 } // GetEtcdadmCluster indicates an expected call of GetEtcdadmCluster. func (mr *MockProviderKubectlClientMockRecorder) GetEtcdadmCluster(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEtcdadmCluster", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetEtcdadmCluster), varargs...) } // GetKubeadmControlPlane mocks base method. func (m *MockProviderKubectlClient) GetKubeadmControlPlane(arg0 context.Context, arg1 *types.Cluster, arg2 string, arg3 ...executables.KubectlOpt) (*v1beta11.KubeadmControlPlane, error) { m.ctrl.T.Helper() varargs := []interface{}{arg0, arg1, arg2} for _, a := range arg3 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "GetKubeadmControlPlane", varargs...) ret0, _ := ret[0].(*v1beta11.KubeadmControlPlane) ret1, _ := ret[1].(error) return ret0, ret1 } // GetKubeadmControlPlane indicates an expected call of GetKubeadmControlPlane. func (mr *MockProviderKubectlClientMockRecorder) GetKubeadmControlPlane(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetKubeadmControlPlane", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetKubeadmControlPlane), varargs...) } // GetMachineDeployment mocks base method. func (m *MockProviderKubectlClient) GetMachineDeployment(arg0 context.Context, arg1 string, arg2 ...executables.KubectlOpt) (*v1beta10.MachineDeployment, error) { m.ctrl.T.Helper() varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "GetMachineDeployment", varargs...) ret0, _ := ret[0].(*v1beta10.MachineDeployment) ret1, _ := ret[1].(error) return ret0, ret1 } // GetMachineDeployment indicates an expected call of GetMachineDeployment. func (mr *MockProviderKubectlClientMockRecorder) GetMachineDeployment(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() varargs := append([]interface{}{arg0, arg1}, arg2...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMachineDeployment", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetMachineDeployment), varargs...) } // GetSecretFromNamespace mocks base method. func (m *MockProviderKubectlClient) GetSecretFromNamespace(arg0 context.Context, arg1, arg2, arg3 string) (*v1.Secret, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetSecretFromNamespace", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*v1.Secret) ret1, _ := ret[1].(error) return ret0, ret1 } // GetSecretFromNamespace indicates an expected call of GetSecretFromNamespace. func (mr *MockProviderKubectlClientMockRecorder) GetSecretFromNamespace(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSecretFromNamespace", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetSecretFromNamespace), arg0, arg1, arg2, arg3) } // LoadSecret mocks base method. func (m *MockProviderKubectlClient) LoadSecret(arg0 context.Context, arg1, arg2, arg3, arg4 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LoadSecret", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(error) return ret0 } // LoadSecret indicates an expected call of LoadSecret. func (mr *MockProviderKubectlClientMockRecorder) LoadSecret(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadSecret", reflect.TypeOf((*MockProviderKubectlClient)(nil).LoadSecret), arg0, arg1, arg2, arg3, arg4) } // SearchCloudStackDatacenterConfig mocks base method. func (m *MockProviderKubectlClient) SearchCloudStackDatacenterConfig(arg0 context.Context, arg1, arg2, arg3 string) ([]*v1alpha1.CloudStackDatacenterConfig, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SearchCloudStackDatacenterConfig", arg0, arg1, arg2, arg3) ret0, _ := ret[0].([]*v1alpha1.CloudStackDatacenterConfig) ret1, _ := ret[1].(error) return ret0, ret1 } // SearchCloudStackDatacenterConfig indicates an expected call of SearchCloudStackDatacenterConfig. func (mr *MockProviderKubectlClientMockRecorder) SearchCloudStackDatacenterConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SearchCloudStackDatacenterConfig", reflect.TypeOf((*MockProviderKubectlClient)(nil).SearchCloudStackDatacenterConfig), arg0, arg1, arg2, arg3) } // SearchCloudStackMachineConfig mocks base method. func (m *MockProviderKubectlClient) SearchCloudStackMachineConfig(arg0 context.Context, arg1, arg2, arg3 string) ([]*v1alpha1.CloudStackMachineConfig, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SearchCloudStackMachineConfig", arg0, arg1, arg2, arg3) ret0, _ := ret[0].([]*v1alpha1.CloudStackMachineConfig) ret1, _ := ret[1].(error) return ret0, ret1 } // SearchCloudStackMachineConfig indicates an expected call of SearchCloudStackMachineConfig. func (mr *MockProviderKubectlClientMockRecorder) SearchCloudStackMachineConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SearchCloudStackMachineConfig", reflect.TypeOf((*MockProviderKubectlClient)(nil).SearchCloudStackMachineConfig), arg0, arg1, arg2, arg3) } // SetEksaControllerEnvVar mocks base method. func (m *MockProviderKubectlClient) SetEksaControllerEnvVar(arg0 context.Context, arg1, arg2, arg3 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetEksaControllerEnvVar", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // SetEksaControllerEnvVar indicates an expected call of SetEksaControllerEnvVar. func (mr *MockProviderKubectlClientMockRecorder) SetEksaControllerEnvVar(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEksaControllerEnvVar", reflect.TypeOf((*MockProviderKubectlClient)(nil).SetEksaControllerEnvVar), arg0, arg1, arg2, arg3) } // UpdateAnnotation mocks base method. func (m *MockProviderKubectlClient) UpdateAnnotation(arg0 context.Context, arg1, arg2 string, arg3 map[string]string, arg4 ...executables.KubectlOpt) error { m.ctrl.T.Helper() varargs := []interface{}{arg0, arg1, arg2, arg3} for _, a := range arg4 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "UpdateAnnotation", varargs...) ret0, _ := ret[0].(error) return ret0 } // UpdateAnnotation indicates an expected call of UpdateAnnotation. func (mr *MockProviderKubectlClientMockRecorder) UpdateAnnotation(arg0, arg1, arg2, arg3 interface{}, arg4 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() varargs := append([]interface{}{arg0, arg1, arg2, arg3}, arg4...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAnnotation", reflect.TypeOf((*MockProviderKubectlClient)(nil).UpdateAnnotation), varargs...) }
448
eks-anywhere
aws
Go
package reconciler_test import ( "os" "testing" "github.com/aws/eks-anywhere/internal/test/envtest" ) var env *envtest.Environment func TestMain(m *testing.M) { os.Exit(envtest.RunWithEnvironment(m, envtest.WithAssignment(&env))) }
15
eks-anywhere
aws
Go
package reconciler import ( "context" "fmt" "github.com/go-logr/logr" "github.com/pkg/errors" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" c "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/controller" "github.com/aws/eks-anywhere/pkg/controller/clientutil" "github.com/aws/eks-anywhere/pkg/controller/clusters" "github.com/aws/eks-anywhere/pkg/providers/cloudstack" ) // IPValidator is an interface that defines methods to validate the control plane IP. type IPValidator interface { ValidateControlPlaneIP(ctx context.Context, log logr.Logger, spec *c.Spec) (controller.Result, error) } // CNIReconciler is an interface for reconciling CNI in the CloudStack cluster reconciler. type CNIReconciler interface { Reconcile(ctx context.Context, logger logr.Logger, client client.Client, spec *c.Spec) (controller.Result, error) } // RemoteClientRegistry is an interface that defines methods for remote clients. type RemoteClientRegistry interface { GetClient(ctx context.Context, cluster client.ObjectKey) (client.Client, error) } // Reconciler for CloudStack. type Reconciler struct { client client.Client ipValidator IPValidator cniReconciler CNIReconciler remoteClientRegistry RemoteClientRegistry validatorRegistry cloudstack.ValidatorRegistry } // New defines a new CloudStack reconciler. func New(client client.Client, ipValidator IPValidator, cniReconciler CNIReconciler, remoteClientRegistry RemoteClientRegistry, validatorRegistry cloudstack.ValidatorRegistry) *Reconciler { return &Reconciler{ client: client, ipValidator: ipValidator, cniReconciler: cniReconciler, remoteClientRegistry: remoteClientRegistry, validatorRegistry: validatorRegistry, } } // Reconcile reconciles cluster to desired state. func (r *Reconciler) Reconcile(ctx context.Context, log logr.Logger, cluster *anywherev1.Cluster) (controller.Result, error) { log = log.WithValues("provider", "cloudstack") clusterSpec, err := c.BuildSpec(ctx, clientutil.NewKubeClient(r.client), cluster) if err != nil { return controller.Result{}, err } return controller.NewPhaseRunner[*c.Spec]().Register( r.ipValidator.ValidateControlPlaneIP, r.ValidateDatacenterConfig, r.ValidateMachineConfig, clusters.CleanupStatusAfterValidate, r.ReconcileControlPlane, r.CheckControlPlaneReady, r.ReconcileCNI, r.ReconcileWorkers, ).Run(ctx, log, clusterSpec) } // ValidateDatacenterConfig updates the cluster status if the CloudStackDatacenter status indicates that the spec is invalid. func (r *Reconciler) ValidateDatacenterConfig(ctx context.Context, log logr.Logger, spec *c.Spec) (controller.Result, error) { log = log.WithValues("phase", "validateDatacenterConfig") log.Info("Validating datacenter config") dataCenterConfig := spec.CloudStackDatacenter if dataCenterConfig.Status.SpecValid { return controller.Result{}, nil } if dataCenterConfig.Status.FailureMessage != nil { failureMessage := fmt.Sprintf("Invalid %s CloudStackDatacenterConfig: %s", dataCenterConfig.Name, *dataCenterConfig.Status.FailureMessage) spec.Cluster.Status.FailureMessage = &failureMessage log.Error(errors.New(*dataCenterConfig.Status.FailureMessage), "Invalid CloudStackDatacenterConfig", "datacenterConfig", klog.KObj(dataCenterConfig)) } else { log.Info("CloudStackDatacenterConfig hasn't been validated yet", klog.KObj(dataCenterConfig)) } return controller.ResultWithReturn(), nil } // ValidateMachineConfig performs additional, context-aware validations on the machine configs. func (r *Reconciler) ValidateMachineConfig(ctx context.Context, log logr.Logger, spec *c.Spec) (controller.Result, error) { log = log.WithValues("phase", "validateMachineConfigs") log.Info("Validating machine config") datacenterConfig := spec.CloudStackDatacenter execConfig, err := cloudstack.GetCloudstackExecConfig(ctx, r.client, datacenterConfig) if err != nil { return controller.Result{}, err } validator, err := r.validatorRegistry.Get(execConfig) if err != nil { return controller.Result{}, err } if err = validator.ValidateClusterMachineConfigs(ctx, spec); err != nil { log.Error(err, "Invalid CloudStackMachineConfig") failureMessage := err.Error() spec.Cluster.Status.FailureMessage = &failureMessage return controller.ResultWithReturn(), nil } return controller.Result{}, nil } // ReconcileControlPlane applies the control plane CAPI objects to the cluster. func (r *Reconciler) ReconcileControlPlane(ctx context.Context, log logr.Logger, spec *c.Spec) (controller.Result, error) { log = log.WithValues("phase", "reconcileControlPlane") log.Info("Applying control plane CAPI objects") cp, err := cloudstack.ControlPlaneSpec(ctx, log, clientutil.NewKubeClient(r.client), spec) if err != nil { return controller.Result{}, err } return clusters.ReconcileControlPlane(ctx, r.client, &clusters.ControlPlane{ Cluster: cp.Cluster, ProviderCluster: cp.ProviderCluster, KubeadmControlPlane: cp.KubeadmControlPlane, ControlPlaneMachineTemplate: cp.ControlPlaneMachineTemplate, EtcdCluster: cp.EtcdCluster, EtcdMachineTemplate: cp.EtcdMachineTemplate, }) } // CheckControlPlaneReady checks whether the control plane for an eks-a cluster is ready or not. // Requeues with the appropriate wait times whenever the control plane is not ready yet. func (r *Reconciler) CheckControlPlaneReady(ctx context.Context, log logr.Logger, spec *c.Spec) (controller.Result, error) { log = log.WithValues("phase", "checkControlPlaneReady") return clusters.CheckControlPlaneReady(ctx, r.client, log, spec.Cluster) } // ReconcileWorkerNodes validates the cluster definition and reconciles the worker nodes // to the desired state. func (r *Reconciler) ReconcileWorkerNodes(ctx context.Context, log logr.Logger, cluster *anywherev1.Cluster) (controller.Result, error) { log = log.WithValues("provider", "cloudstack", "reconcile type", "workers") clusterSpec, err := c.BuildSpec(ctx, clientutil.NewKubeClient(r.client), cluster) if err != nil { return controller.Result{}, errors.Wrap(err, "building cluster Spec for worker node reconcile") } return controller.NewPhaseRunner[*c.Spec]().Register( r.ReconcileWorkers, ).Run(ctx, log, clusterSpec) } // ReconcileWorkers applies the worker CAPI objects to the cluster. func (r *Reconciler) ReconcileWorkers(ctx context.Context, log logr.Logger, clusterSpec *c.Spec) (controller.Result, error) { log = log.WithValues("phase", "reconcileWorkers") log.Info("Applying worker CAPI objects") w, err := cloudstack.WorkersSpec(ctx, log, clientutil.NewKubeClient(r.client), clusterSpec) if err != nil { return controller.Result{}, errors.Wrap(err, "Generate worker node CAPI spec") } return clusters.ReconcileWorkersForEKSA(ctx, log, r.client, clusterSpec.Cluster, clusters.ToWorkers(w)) } // ReconcileCNI reconciles the CNI to the desired state. func (r *Reconciler) ReconcileCNI(ctx context.Context, log logr.Logger, clusterSpec *c.Spec) (controller.Result, error) { log = log.WithValues("phase", "reconcileCNI") client, err := r.remoteClientRegistry.GetClient(ctx, controller.CapiClusterObjectKey(clusterSpec.Cluster)) if err != nil { return controller.Result{}, err } return r.cniReconciler.Reconcile(ctx, log, client, clusterSpec) }
180
eks-anywhere
aws
Go
package reconciler_test import ( "context" "math" "testing" "time" etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" cloudstackv1 "sigs.k8s.io/cluster-api-provider-cloudstack/api/v1beta2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/internal/test/envtest" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" clusterspec "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/clusterapi" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/controller" "github.com/aws/eks-anywhere/pkg/controller/clientutil" "github.com/aws/eks-anywhere/pkg/providers/cloudstack" "github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder" "github.com/aws/eks-anywhere/pkg/providers/cloudstack/reconciler" cloudstackreconcilermocks "github.com/aws/eks-anywhere/pkg/providers/cloudstack/reconciler/mocks" "github.com/aws/eks-anywhere/pkg/utils/ptr" ) const ( clusterNamespace = "test-namespace" ) func TestReconcilerReconcileSuccess(t *testing.T) { tt := newReconcilerTest(t) // We want to check that the cluster status is cleaned up if validations are passed tt.cluster.Status.FailureMessage = ptr.String("invalid cluster") capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) { c.Name = tt.cluster.Name }) tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster, tt.secret) tt.createAllObjs() logger := test.NewNullLogger() remoteClient := env.Client() spec := tt.buildSpec() tt.ipValidator.EXPECT().ValidateControlPlaneIP(tt.ctx, logger, spec).Return(controller.Result{}, nil) tt.remoteClientRegistry.EXPECT().GetClient( tt.ctx, client.ObjectKey{Name: "workload-cluster", Namespace: constants.EksaSystemNamespace}, ).Return(remoteClient, nil).Times(1) tt.cniReconciler.EXPECT().Reconcile(tt.ctx, logger, remoteClient, tt.buildSpec()) ctrl := gomock.NewController(t) validator := cloudstack.NewMockProviderValidator(ctrl) tt.validatorRegistry.EXPECT().Get(tt.execConfig).Return(validator, nil).Times(1) validator.EXPECT().ValidateClusterMachineConfigs(tt.ctx, spec).Return(nil).Times(1) result, err := tt.reconciler().Reconcile(tt.ctx, logger, tt.cluster) tt.Expect(err).NotTo(HaveOccurred()) tt.Expect(result).To(Equal(controller.Result{})) tt.ShouldEventuallyExist(tt.ctx, &controlplanev1.KubeadmControlPlane{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name, Namespace: constants.EksaSystemNamespace, }, }, ) tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero()) tt.Expect(tt.cluster.Status.FailureMessage).To(BeNil()) } func TestReconcilerValidateDatacenterConfigRequeue(t *testing.T) { tt := newReconcilerTest(t) tt.datacenterConfig.Status.SpecValid = false capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) { c.Name = tt.cluster.Name }) tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster, tt.secret) tt.createAllObjs() logger := test.NewNullLogger() tt.ipValidator.EXPECT().ValidateControlPlaneIP(tt.ctx, logger, tt.buildSpec()).Return(controller.Result{}, nil) result, err := tt.reconciler().Reconcile(tt.ctx, logger, tt.cluster) tt.Expect(err).NotTo(HaveOccurred()) tt.Expect(result).To(Equal(controller.ResultWithReturn())) tt.Expect(tt.datacenterConfig.Status.FailureMessage).To(BeNil()) } func TestReconcilerValidateDatacenterConfigFail(t *testing.T) { tt := newReconcilerTest(t) tt.datacenterConfig.Status.SpecValid = false tt.datacenterConfig.Status.FailureMessage = ptr.String("Invalid CloudStackDatacenterConfig") capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) { c.Name = tt.cluster.Name }) tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster, tt.secret) tt.createAllObjs() logger := test.NewNullLogger() tt.ipValidator.EXPECT().ValidateControlPlaneIP(tt.ctx, logger, tt.buildSpec()).Return(controller.Result{}, nil) _, err := tt.reconciler().Reconcile(tt.ctx, logger, tt.cluster) tt.Expect(err).To(BeNil()) tt.Expect(&tt.datacenterConfig.Status.FailureMessage).To(HaveValue(Equal("Invalid CloudStackDatacenterConfig"))) } func TestReconcilerValidateMachineConfigInvalidSecret(t *testing.T) { tt := newReconcilerTest(t) tt.createAllObjs() spec := tt.buildSpec() logger := test.NewNullLogger() tt.ipValidator.EXPECT().ValidateControlPlaneIP(tt.ctx, logger, spec).Return(controller.Result{}, nil) result, err := tt.reconciler().Reconcile(tt.ctx, logger, tt.cluster) tt.Expect(err).To(MatchError(ContainSubstring("Secret \"global\" not found"))) tt.Expect(result).To(Equal(controller.Result{})) tt.Expect(tt.cluster.Status.FailureMessage).To(BeNil()) } func TestReconcilerValidateMachineConfigGetValidatorFail(t *testing.T) { tt := newReconcilerTest(t) tt.eksaSupportObjs = append(tt.eksaSupportObjs, tt.secret) tt.createAllObjs() spec := tt.buildSpec() logger := test.NewNullLogger() tt.ipValidator.EXPECT().ValidateControlPlaneIP(tt.ctx, logger, spec).Return(controller.Result{}, nil) errMsg := "building cmk executable: nil exec config for CloudMonkey, unable to proceed" tt.validatorRegistry.EXPECT().Get(tt.execConfig).Return(nil, errors.New(errMsg)).Times(1) result, err := tt.reconciler().Reconcile(tt.ctx, logger, tt.cluster) tt.Expect(err).To(MatchError(ContainSubstring(errMsg))) tt.Expect(result).To(Equal(controller.Result{})) tt.Expect(tt.cluster.Status.FailureMessage).To(BeNil()) } func TestReconcilerValidateMachineConfigFail(t *testing.T) { tt := newReconcilerTest(t) tt.eksaSupportObjs = append(tt.eksaSupportObjs, tt.secret) tt.createAllObjs() spec := tt.buildSpec() logger := test.NewNullLogger() tt.ipValidator.EXPECT().ValidateControlPlaneIP(tt.ctx, logger, spec).Return(controller.Result{}, nil) ctrl := gomock.NewController(t) validator := cloudstack.NewMockProviderValidator(ctrl) tt.validatorRegistry.EXPECT().Get(tt.execConfig).Return(validator, nil).Times(1) errMsg := "Invalid CloudStackMachineConfig: validating service offering" validator.EXPECT().ValidateClusterMachineConfigs(tt.ctx, spec).Return(errors.New(errMsg)).Times(1) result, err := tt.reconciler().Reconcile(tt.ctx, logger, tt.cluster) tt.Expect(err).To(BeNil()) tt.Expect(result).To(Equal(controller.Result{Result: &reconcile.Result{}}), "result should stop reconciliation") tt.Expect(tt.cluster.Status.FailureMessage).To(HaveValue(ContainSubstring(errMsg))) } func TestReconcilerControlPlaneIsNotReady(t *testing.T) { tt := newReconcilerTest(t) capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) { c.Name = tt.cluster.Name }) capiCluster.Status.Conditions = clusterv1.Conditions{ { Type: clusterapi.ControlPlaneReadyCondition, Status: corev1.ConditionFalse, LastTransitionTime: metav1.NewTime(time.Now()), }, } tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster, tt.secret) tt.createAllObjs() logger := test.NewNullLogger() spec := tt.buildSpec() tt.ipValidator.EXPECT().ValidateControlPlaneIP(tt.ctx, logger, spec).Return(controller.Result{}, nil) ctrl := gomock.NewController(t) validator := cloudstack.NewMockProviderValidator(ctrl) tt.validatorRegistry.EXPECT().Get(tt.execConfig).Return(validator, nil).Times(1) validator.EXPECT().ValidateClusterMachineConfigs(tt.ctx, spec).Return(nil).Times(1) result, err := tt.reconciler().Reconcile(tt.ctx, logger, tt.cluster) tt.Expect(err).NotTo(HaveOccurred()) tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero()) tt.Expect(result).To(Equal(controller.ResultWithRequeue(30 * time.Second))) } func TestReconcileControlPlaneUnstackedEtcdSuccess(t *testing.T) { tt := newReconcilerTest(t) tt.cluster.Spec.ExternalEtcdConfiguration = &anywherev1.ExternalEtcdConfiguration{ Count: 1, MachineGroupRef: &anywherev1.Ref{ Kind: anywherev1.CloudStackMachineConfigKind, Name: tt.machineConfigControlPlane.Name, }, } tt.eksaSupportObjs = append(tt.eksaSupportObjs, tt.secret) tt.createAllObjs() logger := test.NewNullLogger() result, err := tt.reconciler().ReconcileControlPlane(tt.ctx, logger, tt.buildSpec()) tt.Expect(err).NotTo(HaveOccurred()) tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero()) tt.Expect(result).To(Equal(controller.Result{})) capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) { c.Name = tt.cluster.Name }) tt.ShouldEventuallyExist(tt.ctx, capiCluster) tt.ShouldEventuallyExist(tt.ctx, &cloudstackv1.CloudStackCluster{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name, Namespace: constants.EksaSystemNamespace, }, }) tt.ShouldEventuallyExist(tt.ctx, &controlplanev1.KubeadmControlPlane{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name, Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &cloudstackv1.CloudStackMachineTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-control-plane-1", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &cloudstackv1.CloudStackMachineTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-etcd-1", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &etcdv1.EtcdadmCluster{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-etcd", Namespace: constants.EksaSystemNamespace, }, }, ) } func TestReconcileControlPlaneStackedEtcdSuccess(t *testing.T) { tt := newReconcilerTest(t) tt.eksaSupportObjs = append(tt.eksaSupportObjs, tt.secret) tt.createAllObjs() logger := test.NewNullLogger() result, err := tt.reconciler().ReconcileControlPlane(tt.ctx, logger, tt.buildSpec()) tt.Expect(err).NotTo(HaveOccurred()) tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero()) tt.Expect(result).To(Equal(controller.Result{})) capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) { c.Name = tt.cluster.Name }) tt.ShouldEventuallyExist(tt.ctx, capiCluster) tt.ShouldEventuallyExist(tt.ctx, &cloudstackv1.CloudStackCluster{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name, Namespace: constants.EksaSystemNamespace, }, }) tt.ShouldEventuallyExist(tt.ctx, &controlplanev1.KubeadmControlPlane{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name, Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &cloudstackv1.CloudStackMachineTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-control-plane-1", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyNotExist(tt.ctx, &cloudstackv1.CloudStackMachineTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-etcd-1", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyNotExist(tt.ctx, &etcdv1.EtcdadmCluster{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-etcd", Namespace: constants.EksaSystemNamespace, }, }, ) } func TestReconcilerReconcileControlPlaneFailure(t *testing.T) { tt := newReconcilerTest(t) tt.eksaSupportObjs = append(tt.eksaSupportObjs, tt.secret) tt.createAllObjs() spec := tt.buildSpec() spec.Cluster.Spec.KubernetesVersion = "" _, err := tt.reconciler().ReconcileControlPlane(tt.ctx, test.NewNullLogger(), spec) tt.Expect(err).To(MatchError(ContainSubstring("generating cloudstack control plane yaml spec"))) } func TestReconcileCNISuccess(t *testing.T) { tt := newReconcilerTest(t) tt.withFakeClient() logger := test.NewNullLogger() remoteClient := fake.NewClientBuilder().Build() tt.eksaSupportObjs = append(tt.eksaSupportObjs, tt.secret) spec := tt.buildSpec() tt.remoteClientRegistry.EXPECT().GetClient( tt.ctx, client.ObjectKey{Name: "workload-cluster", Namespace: "eksa-system"}, ).Return(remoteClient, nil) tt.cniReconciler.EXPECT().Reconcile(tt.ctx, logger, remoteClient, spec) result, err := tt.reconciler().ReconcileCNI(tt.ctx, logger, spec) tt.Expect(err).NotTo(HaveOccurred()) tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero()) tt.Expect(result).To(Equal(controller.Result{})) } func TestReconcileCNIErrorClientRegistry(t *testing.T) { tt := newReconcilerTest(t) tt.withFakeClient() logger := test.NewNullLogger() tt.eksaSupportObjs = append(tt.eksaSupportObjs, tt.secret) spec := tt.buildSpec() tt.remoteClientRegistry.EXPECT().GetClient( tt.ctx, client.ObjectKey{Name: "workload-cluster", Namespace: "eksa-system"}, ).Return(nil, errors.New("building client")) result, err := tt.reconciler().ReconcileCNI(tt.ctx, logger, spec) tt.Expect(err).To(MatchError(ContainSubstring("building client"))) tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero()) tt.Expect(result).To(Equal(controller.Result{})) } func TestReconcilerReconcileWorkersSuccess(t *testing.T) { tt := newReconcilerTest(t) tt.cluster.Name = "mgmt-cluster" capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) { c.Name = tt.cluster.Name }) tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster, tt.secret) tt.createAllObjs() logger := test.NewNullLogger() result, err := tt.reconciler().ReconcileWorkers(tt.ctx, logger, tt.buildSpec()) tt.Expect(err).NotTo(HaveOccurred()) tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero()) tt.Expect(result).To(Equal(controller.Result{})) tt.ShouldEventuallyExist(tt.ctx, &clusterv1.MachineDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-md-0", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &bootstrapv1.KubeadmConfigTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-md-0-1", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &cloudstackv1.CloudStackMachineTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-md-0-1", Namespace: constants.EksaSystemNamespace, }, }, ) } func TestReconcilerReconcileWorkersFailure(t *testing.T) { tt := newReconcilerTest(t) tt.cluster.Name = "mgmt-cluster" tt.cluster.SetSelfManaged() capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) { c.Name = tt.cluster.Name }) tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster, tt.secret) tt.createAllObjs() clusterSpec := tt.buildSpec() clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count = ptr.Int(int(math.Inf(1))) logger := test.NewNullLogger() _, err := tt.reconciler().ReconcileWorkers(tt.ctx, logger, clusterSpec) tt.Expect(err).To(MatchError(ContainSubstring("Generate worker node CAPI spec"))) } func TestReconcilerReconcileWorkerNodesSuccess(t *testing.T) { tt := newReconcilerTest(t) tt.cluster.Name = "mgmt-cluster" tt.cluster.SetSelfManaged() capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) { c.Name = tt.cluster.Name }) tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster, tt.secret) tt.createAllObjs() logger := test.NewNullLogger() result, err := tt.reconciler().ReconcileWorkerNodes(tt.ctx, logger, tt.cluster) tt.Expect(err).NotTo(HaveOccurred()) tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero()) tt.Expect(result).To(Equal(controller.Result{})) tt.ShouldEventuallyExist(tt.ctx, &bootstrapv1.KubeadmConfigTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: capiCluster.Name + "-md-0-1", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &cloudstackv1.CloudStackMachineTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: capiCluster.Name + "-md-0-1", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &clusterv1.MachineDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: capiCluster.Name + "-md-0", Namespace: constants.EksaSystemNamespace, }, }, ) } func TestReconcilerReconcileWorkerNodesFailure(t *testing.T) { tt := newReconcilerTest(t) tt.cluster.Name = "mgmt-cluster" tt.cluster.SetSelfManaged() capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) { c.Name = tt.cluster.Name }) tt.cluster.Spec.KubernetesVersion = "" tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster, tt.secret) tt.createAllObjs() logger := test.NewNullLogger() _, err := tt.reconciler().ReconcileWorkerNodes(tt.ctx, logger, tt.cluster) tt.Expect(err).To(MatchError(ContainSubstring("building cluster Spec for worker node reconcile"))) } func (tt *reconcilerTest) withFakeClient() { tt.client = fake.NewClientBuilder().WithObjects(clientutil.ObjectsToClientObjects(tt.allObjs())...).Build() } func (tt *reconcilerTest) createAllObjs() { tt.t.Helper() envtest.CreateObjs(tt.ctx, tt.t, tt.client, tt.allObjs()...) } func (tt *reconcilerTest) allObjs() []client.Object { objs := make([]client.Object, 0, len(tt.eksaSupportObjs)+3) objs = append(objs, tt.eksaSupportObjs...) objs = append(objs, tt.cluster, tt.machineConfigControlPlane, tt.machineConfigWorker) return objs } func (tt *reconcilerTest) reconciler() *reconciler.Reconciler { return reconciler.New(tt.client, tt.ipValidator, tt.cniReconciler, tt.remoteClientRegistry, tt.validatorRegistry) } func (tt *reconcilerTest) buildSpec() *clusterspec.Spec { tt.t.Helper() spec, err := clusterspec.BuildSpec(tt.ctx, clientutil.NewKubeClient(tt.client), tt.cluster) tt.Expect(err).NotTo(HaveOccurred()) return spec } type reconcilerTest struct { t testing.TB *WithT *envtest.APIExpecter ctx context.Context cluster *anywherev1.Cluster client client.Client eksaSupportObjs []client.Object datacenterConfig *anywherev1.CloudStackDatacenterConfig machineConfigControlPlane *anywherev1.CloudStackMachineConfig machineConfigWorker *anywherev1.CloudStackMachineConfig ipValidator *cloudstackreconcilermocks.MockIPValidator cniReconciler *cloudstackreconcilermocks.MockCNIReconciler remoteClientRegistry *cloudstackreconcilermocks.MockRemoteClientRegistry validatorRegistry *cloudstack.MockValidatorRegistry execConfig *decoder.CloudStackExecConfig secret *corev1.Secret } func newReconcilerTest(t testing.TB) *reconcilerTest { ctrl := gomock.NewController(t) c := env.Client() ipValidator := cloudstackreconcilermocks.NewMockIPValidator(ctrl) cniReconciler := cloudstackreconcilermocks.NewMockCNIReconciler(ctrl) remoteClientRegistry := cloudstackreconcilermocks.NewMockRemoteClientRegistry(ctrl) validatorRegistry := cloudstack.NewMockValidatorRegistry(ctrl) execConfig := &decoder.CloudStackExecConfig{ Profiles: []decoder.CloudStackProfileConfig{ { Name: "global", ApiKey: "test-key1", SecretKey: "test-secret1", ManagementUrl: "http://1.1.1.1:8080/client/api", }, }, } bundle := test.Bundle() managementCluster := cloudstackCluster(func(c *anywherev1.Cluster) { c.Name = "management-cluster" c.Spec.ManagementCluster = anywherev1.ManagementCluster{ Name: c.Name, } c.Spec.BundlesRef = &anywherev1.BundlesRef{ Name: bundle.Name, Namespace: bundle.Namespace, APIVersion: bundle.APIVersion, } }) machineConfigCP := machineConfig(func(m *anywherev1.CloudStackMachineConfig) { m.Name = "cp-machine-config" m.Spec.Users = append(m.Spec.Users, anywherev1.UserConfiguration{ Name: "user", SshAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC8ZEibIrz1AUBKDvmDiWLs9f5DnOerC4qPITiDtSOuPAsxgZbRMavBfVTxodMdAkYRYlXxK6PqNo0ve0qcOV2yvpxH1OogasMMetck6BlM/dIoo3vEY4ZoG9DuVRIf9Iry5gJKbpMDYWpx1IGZrDMOFcIM20ii2qLQQk5hfq9OqdqhToEJFixdgJt/y/zt6Koy3kix+XsnrVdAHgWAq4CZuwt1G6JUAqrpob3H8vPmL7aS+35ktf0pHBm6nYoxRhslnWMUb/7vpzWiq+fUBIm2LYqvrnm7t3fRqFx7p2sZqAm2jDNivyYXwRXkoQPR96zvGeMtuQ5BVGPpsDfVudSW21+pEXHI0GINtTbua7Ogz7wtpVywSvHraRgdFOeY9mkXPzvm2IhoqNrteck2GErwqSqb19mPz6LnHueK0u7i6WuQWJn0CUoCtyMGIrowXSviK8qgHXKrmfTWATmCkbtosnLskNdYuOw8bKxq5S4WgdQVhPps2TiMSZndjX5NTr8= ubuntu@ip-10-2-0-6"}, }) }) machineConfigWN := machineConfig(func(m *anywherev1.CloudStackMachineConfig) { m.Name = "worker-machine-config" m.Spec.Users = append(m.Spec.Users, anywherev1.UserConfiguration{ Name: "user", SshAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC8ZEibIrz1AUBKDvmDiWLs9f5DnOerC4qPITiDtSOuPAsxgZbRMavBfVTxodMdAkYRYlXxK6PqNo0ve0qcOV2yvpxH1OogasMMetck6BlM/dIoo3vEY4ZoG9DuVRIf9Iry5gJKbpMDYWpx1IGZrDMOFcIM20ii2qLQQk5hfq9OqdqhToEJFixdgJt/y/zt6Koy3kix+XsnrVdAHgWAq4CZuwt1G6JUAqrpob3H8vPmL7aS+35ktf0pHBm6nYoxRhslnWMUb/7vpzWiq+fUBIm2LYqvrnm7t3fRqFx7p2sZqAm2jDNivyYXwRXkoQPR96zvGeMtuQ5BVGPpsDfVudSW21+pEXHI0GINtTbua7Ogz7wtpVywSvHraRgdFOeY9mkXPzvm2IhoqNrteck2GErwqSqb19mPz6LnHueK0u7i6WuQWJn0CUoCtyMGIrowXSviK8qgHXKrmfTWATmCkbtosnLskNdYuOw8bKxq5S4WgdQVhPps2TiMSZndjX5NTr8= ubuntu@ip-10-2-0-6"}, }) }) workloadClusterDatacenter := dataCenter(func(d *anywherev1.CloudStackDatacenterConfig) { d.Spec.AvailabilityZones = append(d.Spec.AvailabilityZones, anywherev1.CloudStackAvailabilityZone{ Name: "test-zone", CredentialsRef: "global", }) d.Status.SpecValid = true }) cluster := cloudstackCluster(func(c *anywherev1.Cluster) { c.Name = "workload-cluster" c.Spec.ManagementCluster = anywherev1.ManagementCluster{ Name: managementCluster.Name, } c.Spec.BundlesRef = &anywherev1.BundlesRef{ Name: bundle.Name, Namespace: bundle.Namespace, APIVersion: bundle.APIVersion, } c.Spec.ControlPlaneConfiguration = anywherev1.ControlPlaneConfiguration{ Count: 1, Endpoint: &anywherev1.Endpoint{ Host: "1.1.1.1", }, MachineGroupRef: &anywherev1.Ref{ Kind: anywherev1.CloudStackMachineConfigKind, Name: machineConfigCP.Name, }, } c.Spec.DatacenterRef = anywherev1.Ref{ Kind: anywherev1.CloudStackDatacenterKind, Name: workloadClusterDatacenter.Name, } c.Spec.WorkerNodeGroupConfigurations = append(c.Spec.WorkerNodeGroupConfigurations, anywherev1.WorkerNodeGroupConfiguration{ Count: ptr.Int(1), MachineGroupRef: &anywherev1.Ref{ Kind: anywherev1.CloudStackMachineConfigKind, Name: machineConfigWN.Name, }, Name: "md-0", Labels: nil, }, ) }) secret := &corev1.Secret{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "Secret", }, ObjectMeta: metav1.ObjectMeta{ Name: "global", Namespace: constants.EksaSystemNamespace, }, Data: map[string][]byte{ decoder.APIKeyKey: []byte("test-key1"), decoder.APIUrlKey: []byte("http://1.1.1.1:8080/client/api"), decoder.SecretKeyKey: []byte("test-secret1"), }, } tt := &reconcilerTest{ t: t, WithT: NewWithT(t), APIExpecter: envtest.NewAPIExpecter(t, c), ctx: context.Background(), ipValidator: ipValidator, client: c, eksaSupportObjs: []client.Object{ test.Namespace(clusterNamespace), test.Namespace(constants.EksaSystemNamespace), managementCluster, workloadClusterDatacenter, bundle, test.EksdRelease(), }, cluster: cluster, datacenterConfig: workloadClusterDatacenter, machineConfigControlPlane: machineConfigCP, machineConfigWorker: machineConfigWN, cniReconciler: cniReconciler, remoteClientRegistry: remoteClientRegistry, validatorRegistry: validatorRegistry, execConfig: execConfig, secret: secret, } t.Cleanup(tt.cleanup) return tt } func (tt *reconcilerTest) cleanup() { tt.DeleteAndWait(tt.ctx, tt.allObjs()...) tt.DeleteAllOfAndWait(tt.ctx, &bootstrapv1.KubeadmConfigTemplate{}) tt.DeleteAllOfAndWait(tt.ctx, &clusterv1.Cluster{}) tt.DeleteAllOfAndWait(tt.ctx, &clusterv1.MachineDeployment{}) tt.DeleteAllOfAndWait(tt.ctx, &cloudstackv1.CloudStackCluster{}) tt.DeleteAllOfAndWait(tt.ctx, &controlplanev1.KubeadmControlPlane{}) tt.DeleteAndWait(tt.ctx, &cloudstackv1.CloudStackMachineTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: "workload-cluster-etcd-1", Namespace: "eksa-system", }, }) tt.DeleteAndWait(tt.ctx, &etcdv1.EtcdadmCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "workload-cluster-etcd", Namespace: "eksa-system", }, }) } type clusterOpt func(*anywherev1.Cluster) func cloudstackCluster(opts ...clusterOpt) *anywherev1.Cluster { c := &anywherev1.Cluster{ TypeMeta: metav1.TypeMeta{ Kind: anywherev1.ClusterKind, APIVersion: anywherev1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Namespace: clusterNamespace, }, Spec: anywherev1.ClusterSpec{ KubernetesVersion: "1.22", ClusterNetwork: anywherev1.ClusterNetwork{ Pods: anywherev1.Pods{ CidrBlocks: []string{"0.0.0.0"}, }, Services: anywherev1.Services{ CidrBlocks: []string{"0.0.0.0"}, }, }, }, } for _, opt := range opts { opt(c) } return c } type datacenterOpt func(config *anywherev1.CloudStackDatacenterConfig) func dataCenter(opts ...datacenterOpt) *anywherev1.CloudStackDatacenterConfig { d := &anywherev1.CloudStackDatacenterConfig{ TypeMeta: metav1.TypeMeta{ Kind: anywherev1.CloudStackDatacenterKind, APIVersion: anywherev1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Name: "datacenter", Namespace: clusterNamespace, }, } for _, opt := range opts { opt(d) } return d } type cloudstackMachineOpt func(config *anywherev1.CloudStackMachineConfig) func machineConfig(opts ...cloudstackMachineOpt) *anywherev1.CloudStackMachineConfig { m := &anywherev1.CloudStackMachineConfig{ TypeMeta: metav1.TypeMeta{ Kind: anywherev1.CloudStackMachineConfigKind, APIVersion: anywherev1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Namespace: clusterNamespace, }, Spec: anywherev1.CloudStackMachineConfigSpec{}, } for _, opt := range opts { opt(m) } return m }
786
eks-anywhere
aws
Go
// Code generated by MockGen. DO NOT EDIT. // Source: pkg/providers/cloudstack/reconciler/reconciler.go // Package mocks is a generated GoMock package. package mocks import ( context "context" reflect "reflect" cluster "github.com/aws/eks-anywhere/pkg/cluster" controller "github.com/aws/eks-anywhere/pkg/controller" logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" client "sigs.k8s.io/controller-runtime/pkg/client" ) // MockIPValidator is a mock of IPValidator interface. type MockIPValidator struct { ctrl *gomock.Controller recorder *MockIPValidatorMockRecorder } // MockIPValidatorMockRecorder is the mock recorder for MockIPValidator. type MockIPValidatorMockRecorder struct { mock *MockIPValidator } // NewMockIPValidator creates a new mock instance. func NewMockIPValidator(ctrl *gomock.Controller) *MockIPValidator { mock := &MockIPValidator{ctrl: ctrl} mock.recorder = &MockIPValidatorMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockIPValidator) EXPECT() *MockIPValidatorMockRecorder { return m.recorder } // ValidateControlPlaneIP mocks base method. func (m *MockIPValidator) ValidateControlPlaneIP(ctx context.Context, log logr.Logger, spec *cluster.Spec) (controller.Result, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateControlPlaneIP", ctx, log, spec) ret0, _ := ret[0].(controller.Result) ret1, _ := ret[1].(error) return ret0, ret1 } // ValidateControlPlaneIP indicates an expected call of ValidateControlPlaneIP. func (mr *MockIPValidatorMockRecorder) ValidateControlPlaneIP(ctx, log, spec interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateControlPlaneIP", reflect.TypeOf((*MockIPValidator)(nil).ValidateControlPlaneIP), ctx, log, spec) } // MockCNIReconciler is a mock of CNIReconciler interface. type MockCNIReconciler struct { ctrl *gomock.Controller recorder *MockCNIReconcilerMockRecorder } // MockCNIReconcilerMockRecorder is the mock recorder for MockCNIReconciler. type MockCNIReconcilerMockRecorder struct { mock *MockCNIReconciler } // NewMockCNIReconciler creates a new mock instance. func NewMockCNIReconciler(ctrl *gomock.Controller) *MockCNIReconciler { mock := &MockCNIReconciler{ctrl: ctrl} mock.recorder = &MockCNIReconcilerMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockCNIReconciler) EXPECT() *MockCNIReconcilerMockRecorder { return m.recorder } // Reconcile mocks base method. func (m *MockCNIReconciler) Reconcile(ctx context.Context, logger logr.Logger, client client.Client, spec *cluster.Spec) (controller.Result, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Reconcile", ctx, logger, client, spec) ret0, _ := ret[0].(controller.Result) ret1, _ := ret[1].(error) return ret0, ret1 } // Reconcile indicates an expected call of Reconcile. func (mr *MockCNIReconcilerMockRecorder) Reconcile(ctx, logger, client, spec interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reconcile", reflect.TypeOf((*MockCNIReconciler)(nil).Reconcile), ctx, logger, client, spec) } // MockRemoteClientRegistry is a mock of RemoteClientRegistry interface. type MockRemoteClientRegistry struct { ctrl *gomock.Controller recorder *MockRemoteClientRegistryMockRecorder } // MockRemoteClientRegistryMockRecorder is the mock recorder for MockRemoteClientRegistry. type MockRemoteClientRegistryMockRecorder struct { mock *MockRemoteClientRegistry } // NewMockRemoteClientRegistry creates a new mock instance. func NewMockRemoteClientRegistry(ctrl *gomock.Controller) *MockRemoteClientRegistry { mock := &MockRemoteClientRegistry{ctrl: ctrl} mock.recorder = &MockRemoteClientRegistryMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockRemoteClientRegistry) EXPECT() *MockRemoteClientRegistryMockRecorder { return m.recorder } // GetClient mocks base method. func (m *MockRemoteClientRegistry) GetClient(ctx context.Context, cluster client.ObjectKey) (client.Client, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetClient", ctx, cluster) ret0, _ := ret[0].(client.Client) ret1, _ := ret[1].(error) return ret0, ret1 } // GetClient indicates an expected call of GetClient. func (mr *MockRemoteClientRegistryMockRecorder) GetClient(ctx, cluster interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClient", reflect.TypeOf((*MockRemoteClientRegistry)(nil).GetClient), ctx, cluster) }
131
eks-anywhere
aws
Go
package common import ( "fmt" "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" auditv1 "k8s.io/apiserver/pkg/apis/audit/v1" "sigs.k8s.io/yaml" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/semver" ) // GetAuditPolicy returns the audit policy either v1 or v1beta1 depending on kube version. func GetAuditPolicy(kubeVersion v1alpha1.KubernetesVersion) (string, error) { // appending the ".0" as the patch version to have a valid semver string and use those semvers for comparison kubeVersionSemver, err := semver.New(string(kubeVersion) + ".0") if err != nil { return "", fmt.Errorf("error converting kubeVersion %v to semver %v", kubeVersion, err) } kube124Semver, err := semver.New(string(v1alpha1.Kube124) + ".0") if err != nil { return "", fmt.Errorf("error converting kubeVersion %v to semver %v", kube124Semver, err) } if kubeVersionSemver.Compare(kube124Semver) != -1 { auditPolicyv1, err := AuditPolicyV1Yaml() if err != nil { return "", err } return strings.TrimSpace(string(auditPolicyv1)), nil } return auditPolicy, nil } // AuditPolicyV1Yaml returns the byte array for yaml created with v1 api version for audit policy. func AuditPolicyV1Yaml() ([]byte, error) { auditPolicy := AuditPolicyV1() return yaml.Marshal(auditPolicy) } // AuditPolicyV1 returns the v1 audit policy. func AuditPolicyV1() *auditv1.Policy { return &auditv1.Policy{ TypeMeta: metav1.TypeMeta{ Kind: "Policy", APIVersion: "audit.k8s.io/v1", }, Rules: []auditv1.PolicyRule{ { Level: auditv1.Level("RequestResponse"), Verbs: []string{ "update", "patch", "delete", }, Resources: []auditv1.GroupResources{ { Resources: []string{ "configmaps", }, ResourceNames: []string{ "aws-auth", }, }, }, OmitStages: []auditv1.Stage{ "RequestReceived", }, Namespaces: []string{"kube-system"}, }, { Level: auditv1.Level("None"), Users: []string{"system:kube-proxy"}, Verbs: []string{"watch"}, Resources: []auditv1.GroupResources{ { Resources: []string{ "endpoints", "services", "services/status", }, }, }, }, { Level: auditv1.Level("None"), Users: []string{"kubelet"}, Verbs: []string{"get"}, Resources: []auditv1.GroupResources{ { Resources: []string{ "nodes", "nodes/status", }, }, }, }, { Level: auditv1.Level("None"), Verbs: []string{"get"}, Resources: []auditv1.GroupResources{ { Resources: []string{ "nodes", "nodes/status", }, }, }, }, { Level: auditv1.Level("None"), Users: []string{ "system:kube-controller-manager", "system:kube-scheduler", "system:serviceaccount:kube-system:endpoint-controller", }, Verbs: []string{ "get", "update", }, Resources: []auditv1.GroupResources{ { Resources: []string{"endpoints"}, }, }, Namespaces: []string{"kube-system"}, }, { Level: auditv1.Level("None"), Users: []string{"system:apiserver"}, Verbs: []string{"get"}, Resources: []auditv1.GroupResources{ { Resources: []string{ "namespaces", "namespaces/status", "namespaces/finalize", }, }, }, }, { Level: auditv1.Level("None"), Users: []string{"system:kube-controller-manager"}, Verbs: []string{ "get", "list", }, Resources: []auditv1.GroupResources{ { Group: "metrics.k8s.io", }, }, }, { Level: auditv1.Level("None"), NonResourceURLs: []string{ "/healthz*", "/version", "/swagger*", }, }, { Level: auditv1.Level("None"), Resources: []auditv1.GroupResources{ { Resources: []string{"events"}, }, }, }, { Level: auditv1.Level("Request"), Users: []string{ "kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector", }, Verbs: []string{ "update", "patch", }, Resources: []auditv1.GroupResources{ { Resources: []string{ "nodes/status", "pods/status", }, }, }, OmitStages: []auditv1.Stage{ "RequestReceived", }, }, { Level: auditv1.Level("Request"), Verbs: []string{ "update", "patch", }, Resources: []auditv1.GroupResources{ { Resources: []string{ "nodes/status", "pods/status", }, }, }, OmitStages: []auditv1.Stage{ "RequestReceived", }, UserGroups: []string{ "system:nodes", }, }, { Level: auditv1.Level("Request"), Users: []string{"system:serviceaccount:kube-system:namespace-controller"}, Verbs: []string{"deletecollection"}, OmitStages: []auditv1.Stage{ "RequestReceived", }, }, { Level: auditv1.Level("Metadata"), Resources: []auditv1.GroupResources{ {Resources: []string{ "secrets", "configmaps", }}, { Group: "authentication.k8s.io", Resources: []string{"tokenreviews"}, }, }, OmitStages: []auditv1.Stage{ "RequestReceived", }, }, { Level: auditv1.Level("Request"), Resources: []auditv1.GroupResources{ { Resources: []string{"serviceaccounts/token"}, }, }, }, { Level: auditv1.Level("Request"), Verbs: []string{ "get", "list", "watch", }, Resources: []auditv1.GroupResources{ {Group: ""}, {Group: "admissionregistration.k8s.io"}, {Group: "apiextensions.k8s.io"}, {Group: "apiregistration.k8s.io"}, {Group: "apps"}, {Group: "authentication.k8s.io"}, {Group: "authorization.k8s.io"}, {Group: "autoscaling"}, {Group: "batch"}, {Group: "certificates.k8s.io"}, {Group: "extensions"}, {Group: "metrics.k8s.io"}, {Group: "networking.k8s.io"}, {Group: "policy"}, {Group: "rbac.authorization.k8s.io"}, {Group: "scheduling.k8s.io"}, {Group: "settings.k8s.io"}, {Group: "storage.k8s.io"}, }, OmitStages: []auditv1.Stage{ "RequestReceived", }, }, { Level: auditv1.Level("RequestResponse"), Resources: []auditv1.GroupResources{ {Group: ""}, {Group: "admissionregistration.k8s.io"}, {Group: "apiextensions.k8s.io"}, {Group: "apiregistration.k8s.io"}, {Group: "apps"}, {Group: "authentication.k8s.io"}, {Group: "authorization.k8s.io"}, {Group: "autoscaling"}, {Group: "batch"}, {Group: "certificates.k8s.io"}, {Group: "extensions"}, {Group: "metrics.k8s.io"}, {Group: "networking.k8s.io"}, {Group: "policy"}, {Group: "rbac.authorization.k8s.io"}, {Group: "scheduling.k8s.io"}, {Group: "settings.k8s.io"}, {Group: "storage.k8s.io"}, }, OmitStages: []auditv1.Stage{ "RequestReceived", }, }, { Level: auditv1.Level("Metadata"), OmitStages: []auditv1.Stage{ "RequestReceived", }, }, }, } }
316
eks-anywhere
aws
Go
package common import ( _ "embed" "fmt" "strings" "time" "golang.org/x/crypto/ssh" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/yaml" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/bootstrapper" "github.com/aws/eks-anywhere/pkg/crypto" "github.com/aws/eks-anywhere/pkg/filewriter" "github.com/aws/eks-anywhere/pkg/logger" "github.com/aws/eks-anywhere/pkg/types" ) //go:embed config/audit-policy.yaml var auditPolicy string // TODO: Split out common into separate packages to avoid becoming a dumping ground const ( privateKeyFileName = "eks-a-id_rsa" publicKeyFileName = "eks-a-id_rsa.pub" ) func BootstrapClusterOpts(clusterConfig *v1alpha1.Cluster, serverEndpoints ...string) ([]bootstrapper.BootstrapClusterOption, error) { env := map[string]string{} if clusterConfig.Spec.ProxyConfiguration != nil { noProxyes := append([]string{}, serverEndpoints...) noProxyes = append(noProxyes, clusterConfig.Spec.ControlPlaneConfiguration.Endpoint.Host) for _, s := range clusterConfig.Spec.ProxyConfiguration.NoProxy { if s != "" { noProxyes = append(noProxyes, s) } } env["HTTP_PROXY"] = clusterConfig.Spec.ProxyConfiguration.HttpProxy env["HTTPS_PROXY"] = clusterConfig.Spec.ProxyConfiguration.HttpsProxy env["NO_PROXY"] = strings.Join(noProxyes, ",") } return []bootstrapper.BootstrapClusterOption{bootstrapper.WithEnv(env)}, nil } func StripSshAuthorizedKeyComment(key string) (string, error) { public, _, _, _, err := ssh.ParseAuthorizedKey([]byte(key)) if err != nil { return "", err } // ssh.MarshalAuthorizedKey returns the key with a trailing newline, which we want to remove return strings.TrimSpace(string(ssh.MarshalAuthorizedKey(public))), nil } func GenerateSSHAuthKey(writer filewriter.FileWriter) (string, error) { privateKeyPath, sshAuthorizedKeyBytes, err := crypto.NewSshKeyPairUsingFileWriter(writer, privateKeyFileName, publicKeyFileName) if err != nil { return "", fmt.Errorf("generating ssh key pair: %v", err) } logger.Info(fmt.Sprintf( "Private key saved to %[1]s. Use 'ssh -i %[1]s <username>@<Node-IP-Address>' to login to your cluster node", privateKeyPath, )) key := string(sshAuthorizedKeyBytes) key = strings.TrimRight(key, "\n") return key, nil } func CPMachineTemplateBase(clusterName string) string { return fmt.Sprintf("%s-control-plane-template", clusterName) } func EtcdMachineTemplateBase(clusterName string) string { return fmt.Sprintf("%s-etcd-template", clusterName) } func WorkerMachineTemplateBase(clusterName, workerNodeGroupName string) string { return fmt.Sprintf("%s-%s", clusterName, workerNodeGroupName) } func CPMachineTemplateName(clusterName string, now types.NowFunc) string { t := now().UnixNano() / int64(time.Millisecond) return fmt.Sprintf("%s-%d", CPMachineTemplateBase(clusterName), t) } func EtcdMachineTemplateName(clusterName string, now types.NowFunc) string { t := now().UnixNano() / int64(time.Millisecond) return fmt.Sprintf("%s-%d", EtcdMachineTemplateBase(clusterName), t) } func WorkerMachineTemplateName(clusterName, workerNodeGroupName string, now types.NowFunc) string { t := now().UnixNano() / int64(time.Millisecond) return fmt.Sprintf("%s-%d", WorkerMachineTemplateBase(clusterName, workerNodeGroupName), t) } func KubeadmConfigTemplateName(clusterName, workerNodeGroupName string, now types.NowFunc) string { t := now().UnixNano() / int64(time.Millisecond) return fmt.Sprintf("%s-%s-template-%d", clusterName, workerNodeGroupName, t) } // GetCAPIBottlerocketSettingsConfig returns the formatted CAPI Bottlerocket settings config as a YAML marshaled string. func GetCAPIBottlerocketSettingsConfig(config *v1alpha1.BottlerocketConfiguration) (string, error) { if config == nil { return "", nil } b := &v1beta1.BottlerocketSettings{} if config.Kubernetes != nil { b.Kubernetes = &v1beta1.BottlerocketKubernetesSettings{ MaxPods: config.Kubernetes.MaxPods, } if len(config.Kubernetes.AllowedUnsafeSysctls) > 0 { b.Kubernetes.AllowedUnsafeSysctls = config.Kubernetes.AllowedUnsafeSysctls } if len(config.Kubernetes.ClusterDNSIPs) > 0 { b.Kubernetes.ClusterDNSIPs = config.Kubernetes.ClusterDNSIPs } } if config.Kernel != nil { if config.Kernel.SysctlSettings != nil { b.Kernel = &v1beta1.BottlerocketKernelSettings{ SysctlSettings: config.Kernel.SysctlSettings, } } } if config.Boot != nil { if config.Boot.BootKernelParameters != nil { b.Boot = &v1beta1.BottlerocketBootSettings{ BootKernelParameters: config.Boot.BootKernelParameters, } } } brMap := map[string]*v1beta1.BottlerocketSettings{ "bottlerocket": b, } marshaledConfig, err := yaml.Marshal(brMap) if err != nil { return "", fmt.Errorf("failed to marshal bottlerocket config: %v", err) } return strings.Trim(string(marshaledConfig), "\n"), nil }
150
eks-anywhere
aws
Go
package common_test import ( "fmt" "testing" . "github.com/onsi/gomega" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/providers/common" ) const ( emptyBottlerocketConfig = `bottlerocket: {}` emptyKubernetesConfig = `bottlerocket: kubernetes: {}` maxPodsConfig = `bottlerocket: kubernetes: maxPods: 100` allowedUnsafeSysctlsConfig = `bottlerocket: kubernetes: allowedUnsafeSysctls: - foo - bar` clusterDNSIPsConfig = `bottlerocket: kubernetes: clusterDNSIPs: - 1.2.3.4 - 5.6.7.8` kernelSysctlConfig = `bottlerocket: kernel: sysctlSettings: foo: bar` bootKernelConfig = `bottlerocket: boot: bootKernelParameters: foo: - abc - def` ) func TestGetCAPIBottlerocketSettingsConfig(t *testing.T) { g := NewWithT(t) tests := []struct { name string config *v1alpha1.BottlerocketConfiguration expected string }{ { name: "nil config", config: nil, expected: "", }, { name: "empty config", config: &v1alpha1.BottlerocketConfiguration{}, expected: emptyBottlerocketConfig, }, { name: "empty kubernetes config", config: &v1alpha1.BottlerocketConfiguration{ Kubernetes: &v1beta1.BottlerocketKubernetesSettings{}, }, expected: emptyKubernetesConfig, }, { name: "with allowed unsafe sysctls", config: &v1alpha1.BottlerocketConfiguration{ Kubernetes: &v1beta1.BottlerocketKubernetesSettings{ AllowedUnsafeSysctls: []string{"foo", "bar"}, }, }, expected: allowedUnsafeSysctlsConfig, }, { name: "with cluster dns IPs", config: &v1alpha1.BottlerocketConfiguration{ Kubernetes: &v1beta1.BottlerocketKubernetesSettings{ ClusterDNSIPs: []string{"1.2.3.4", "5.6.7.8"}, }, }, expected: clusterDNSIPsConfig, }, { name: "with max pods", config: &v1alpha1.BottlerocketConfiguration{ Kubernetes: &v1beta1.BottlerocketKubernetesSettings{ MaxPods: 100, }, }, expected: maxPodsConfig, }, { name: "with kernel sysctl config", config: &v1alpha1.BottlerocketConfiguration{ Kernel: &v1beta1.BottlerocketKernelSettings{ SysctlSettings: map[string]string{ "foo": "bar", }, }, }, expected: kernelSysctlConfig, }, { name: "with boot kernel parameters", config: &v1alpha1.BottlerocketConfiguration{ Boot: &v1beta1.BottlerocketBootSettings{ BootKernelParameters: map[string][]string{ "foo": { "abc", "def", }, }, }, }, expected: bootKernelConfig, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := common.GetCAPIBottlerocketSettingsConfig(tt.config) g.Expect(err).ToNot(HaveOccurred()) if got != tt.expected { fmt.Println(got) fmt.Println(tt.expected) } g.Expect(got).To(Equal(tt.expected)) }) } }
139
eks-anywhere
aws
Go
package common import ( "github.com/aws/eks-anywhere/pkg/filewriter" ) // sshAuthKeyGenerator satisfies SSHAuthKeyGenerator. It exists to wrap the common key generation function so we can // isolate the RNG in testing. type SshAuthKeyGenerator struct{} func (SshAuthKeyGenerator) GenerateSSHAuthKey(w filewriter.FileWriter) (string, error) { return GenerateSSHAuthKey(w) }
14
eks-anywhere
aws
Go
package docker import ( "context" "time" "github.com/go-logr/logr" "github.com/pkg/errors" dockerv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/clusterapi" yamlcapi "github.com/aws/eks-anywhere/pkg/clusterapi/yaml" "github.com/aws/eks-anywhere/pkg/yamlutil" ) // ControlPlane represents a CAPI Docker control plane. type ControlPlane = clusterapi.ControlPlane[*dockerv1.DockerCluster, *dockerv1.DockerMachineTemplate] type controlPlaneBuilder = yamlcapi.ControlPlaneBuilder[*dockerv1.DockerCluster, *dockerv1.DockerMachineTemplate] // ControlPlaneSpec builds a docker ControlPlane definition based on an eks-a cluster spec. func ControlPlaneSpec(ctx context.Context, logger logr.Logger, client kubernetes.Client, spec *cluster.Spec) (*ControlPlane, error) { templateBuilder := NewDockerTemplateBuilder(time.Now) controlPlaneYaml, err := templateBuilder.GenerateCAPISpecControlPlane( spec, func(values map[string]interface{}) { values["controlPlaneTemplateName"] = clusterapi.ControlPlaneMachineTemplateName(spec.Cluster) values["etcdTemplateName"] = clusterapi.EtcdMachineTemplateName(spec.Cluster) }, ) if err != nil { return nil, errors.Wrap(err, "generating docker control plane yaml spec") } parser, builder, err := newControlPlaneParser(logger) if err != nil { return nil, err } err = parser.Parse(controlPlaneYaml, builder) if err != nil { return nil, errors.Wrap(err, "parsing docker control plane yaml") } cp := builder.ControlPlane if err = cp.UpdateImmutableObjectNames(ctx, client, GetMachineTemplate, MachineTemplateEqual); err != nil { return nil, errors.Wrap(err, "updating docker immutable object names") } return cp, nil } func newControlPlaneParser(logger logr.Logger) (*yamlutil.Parser, *controlPlaneBuilder, error) { parser, builder, err := yamlcapi.NewControlPlaneParserAndBuilder( logger, yamlutil.NewMapping( "DockerCluster", func() *dockerv1.DockerCluster { return &dockerv1.DockerCluster{} }, ), yamlutil.NewMapping( "DockerMachineTemplate", func() *dockerv1.DockerMachineTemplate { return &dockerv1.DockerMachineTemplate{} }, ), ) if err != nil { return nil, nil, errors.Wrap(err, "building docker control plane parser") } return parser, builder, nil }
78
eks-anywhere
aws
Go
package docker_test import ( "context" "testing" "time" etcdadmbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" dockerv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" "github.com/aws/eks-anywhere/internal/test" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/providers/docker" "github.com/aws/eks-anywhere/pkg/registrymirror" "github.com/aws/eks-anywhere/pkg/registrymirror/containerd" "github.com/aws/eks-anywhere/pkg/utils/ptr" releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1" ) func TestControlPlaneObjects(t *testing.T) { tests := []struct { name string controlPlane *docker.ControlPlane want []kubernetes.Object }{ { name: "stacked etcd", controlPlane: &docker.ControlPlane{ Cluster: capiCluster(), ProviderCluster: dockerCluster(), KubeadmControlPlane: kubeadmControlPlane(), ControlPlaneMachineTemplate: dockerMachineTemplate("cp-mt"), }, want: []kubernetes.Object{ capiCluster(), dockerCluster(), kubeadmControlPlane(), dockerMachineTemplate("cp-mt"), }, }, { name: "unstacked etcd", controlPlane: &docker.ControlPlane{ Cluster: capiCluster(), ProviderCluster: dockerCluster(), KubeadmControlPlane: kubeadmControlPlane(), ControlPlaneMachineTemplate: dockerMachineTemplate("cp-mt"), EtcdCluster: etcdCluster(), EtcdMachineTemplate: dockerMachineTemplate("etcd-mt"), }, want: []kubernetes.Object{ capiCluster(), dockerCluster(), kubeadmControlPlane(), dockerMachineTemplate("cp-mt"), etcdCluster(), dockerMachineTemplate("etcd-mt"), }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) g.Expect(tt.controlPlane.Objects()).To(ConsistOf(tt.want)) }) } } func TestControlPlaneSpecNewCluster(t *testing.T) { g := NewWithT(t) logger := test.NewNullLogger() ctx := context.Background() client := test.NewFakeKubeClient() spec := testClusterSpec() wantCPMachineTemplate := dockerMachineTemplate("test-control-plane-1") wantEtcdMachineTemplate := dockerMachineTemplate("test-etcd-1") cp, err := docker.ControlPlaneSpec(ctx, logger, client, spec) g.Expect(err).NotTo(HaveOccurred()) g.Expect(cp).NotTo(BeNil()) g.Expect(cp.Cluster).To(Equal(capiCluster())) g.Expect(cp.KubeadmControlPlane).To(Equal(kubeadmControlPlane())) g.Expect(cp.EtcdCluster).To(Equal(etcdCluster())) g.Expect(cp.ProviderCluster).To(Equal(dockerCluster())) g.Expect(cp.ControlPlaneMachineTemplate).To(Equal(wantCPMachineTemplate)) g.Expect(cp.EtcdMachineTemplate).To(Equal(wantEtcdMachineTemplate)) } func TestControlPlaneSpecNoKubeVersion(t *testing.T) { g := NewWithT(t) logger := test.NewNullLogger() ctx := context.Background() client := test.NewFakeKubeClient() spec := testClusterSpec() spec.Cluster.Spec.KubernetesVersion = "" _, err := docker.ControlPlaneSpec(ctx, logger, client, spec) g.Expect(err).To(MatchError(ContainSubstring("generating docker control plane yaml spec"))) } func TestControlPlaneSpecUpdateMachineTemplates(t *testing.T) { g := NewWithT(t) logger := test.NewNullLogger() ctx := context.Background() spec := testClusterSpec() originalKubeadmControlPlane := kubeadmControlPlane() originalEtcdCluster := etcdCluster() originalEtcdCluster.Spec.InfrastructureTemplate.Name = "test-etcd-2" originalCPMachineTemplate := dockerMachineTemplate("test-control-plane-1") originalEtcdMachineTemplate := dockerMachineTemplate("test-etcd-2") wantKCP := originalKubeadmControlPlane.DeepCopy() wantEtcd := originalEtcdCluster.DeepCopy() wantCPtemplate := originalCPMachineTemplate.DeepCopy() wantEtcdTemplate := originalEtcdMachineTemplate.DeepCopy() originalCPMachineTemplate.Spec.Template.Spec.CustomImage = "old-custom-image" originalEtcdMachineTemplate.Spec.Template.Spec.CustomImage = "old-custom-image-etcd" client := test.NewFakeKubeClient( originalKubeadmControlPlane, originalEtcdCluster, originalCPMachineTemplate, originalEtcdMachineTemplate, ) cpTaints := []corev1.Taint{ { Key: "foo", Value: "bar", Effect: "PreferNoSchedule", }, } spec.Cluster.Spec.ControlPlaneConfiguration.Taints = cpTaints wantKCP.Spec.MachineTemplate.InfrastructureRef.Name = "test-control-plane-2" wantKCP.Spec.KubeadmConfigSpec.InitConfiguration.NodeRegistration.Taints = cpTaints wantKCP.Spec.KubeadmConfigSpec.JoinConfiguration.NodeRegistration.Taints = cpTaints wantEtcd.Spec.InfrastructureTemplate.Name = "test-etcd-3" wantCPtemplate.Name = "test-control-plane-2" wantEtcdTemplate.Name = "test-etcd-3" cp, err := docker.ControlPlaneSpec(ctx, logger, client, spec) g.Expect(err).NotTo(HaveOccurred()) g.Expect(cp).NotTo(BeNil()) g.Expect(cp.Cluster).To(Equal(capiCluster())) g.Expect(cp.KubeadmControlPlane).To(Equal(wantKCP)) g.Expect(cp.EtcdCluster).To(Equal(wantEtcd)) g.Expect(cp.ProviderCluster).To(Equal(dockerCluster())) g.Expect(cp.ControlPlaneMachineTemplate).To(Equal(wantCPtemplate)) g.Expect(cp.EtcdMachineTemplate).To(Equal(wantEtcdTemplate)) } func TestControlPlaneSpecNoChangesMachineTemplates(t *testing.T) { g := NewWithT(t) logger := test.NewNullLogger() ctx := context.Background() spec := testClusterSpec() originalKubeadmControlPlane := kubeadmControlPlane() originalEtcdCluster := etcdCluster() originalEtcdCluster.Spec.InfrastructureTemplate.Name = "test-etcd-1" originalCPMachineTemplate := dockerMachineTemplate("test-control-plane-1") originalEtcdMachineTemplate := dockerMachineTemplate("test-etcd-1") wantKCP := originalKubeadmControlPlane.DeepCopy() wantEtcd := originalEtcdCluster.DeepCopy() wantCPtemplate := originalCPMachineTemplate.DeepCopy() wantEtcdTemplate := originalEtcdMachineTemplate.DeepCopy() // This mimics what would happen if the objects were returned by a real api server // It helps make sure that the immutable object comparison is able to deal with these // kind of changes. originalCPMachineTemplate.CreationTimestamp = metav1.NewTime(time.Now()) originalEtcdMachineTemplate.CreationTimestamp = metav1.NewTime(time.Now()) // This is testing defaults. It's possible that some default logic will set items that are not set in our machine templates. // We need to take this into consideration when checking for equality. originalCPMachineTemplate.Spec.Template.Spec.ProviderID = ptr.String("default-id") originalEtcdMachineTemplate.Spec.Template.Spec.ProviderID = ptr.String("default-id") client := test.NewFakeKubeClient( originalKubeadmControlPlane, originalEtcdCluster, originalCPMachineTemplate, originalEtcdMachineTemplate, ) cp, err := docker.ControlPlaneSpec(ctx, logger, client, spec) g.Expect(err).NotTo(HaveOccurred()) g.Expect(cp).NotTo(BeNil()) g.Expect(cp.Cluster).To(Equal(capiCluster())) g.Expect(cp.KubeadmControlPlane).To(Equal(wantKCP)) g.Expect(cp.EtcdCluster).To(Equal(wantEtcd)) g.Expect(cp.ProviderCluster).To(Equal(dockerCluster())) g.Expect(cp.ControlPlaneMachineTemplate).To(Equal(wantCPtemplate)) g.Expect(cp.EtcdMachineTemplate).To(Equal(wantEtcdTemplate)) } func TestControPlaneSpecErrorFromClient(t *testing.T) { g := NewWithT(t) logger := test.NewNullLogger() ctx := context.Background() spec := testClusterSpec() client := test.NewFakeKubeClientAlwaysError() _, err := docker.ControlPlaneSpec(ctx, logger, client, spec) g.Expect(err).To(MatchError(ContainSubstring("updating docker immutable object names"))) } func TestControlPlaneSpecRegistryMirrorConfiguration(t *testing.T) { logger := test.NewNullLogger() ctx := context.Background() client := test.NewFakeKubeClient() tests := []struct { name string mirrorConfig *anywherev1.RegistryMirrorConfiguration files []bootstrapv1.File }{ { name: "insecure skip verify", mirrorConfig: test.RegistryMirrorInsecureSkipVerifyEnabled(), files: test.RegistryMirrorConfigFilesInsecureSkipVerify(), }, { name: "insecure skip verify with ca cert", mirrorConfig: test.RegistryMirrorInsecureSkipVerifyEnabledAndCACert(), files: test.RegistryMirrorConfigFilesInsecureSkipVerifyAndCACert(), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { spec := testClusterSpec(func(s *cluster.Spec) { s.Cluster.Spec.RegistryMirrorConfiguration = tt.mirrorConfig }) wantCPMachineTemplate := dockerMachineTemplate("test-control-plane-1") wantEtcdMachineTemplate := dockerMachineTemplate("test-etcd-1") cp, err := docker.ControlPlaneSpec(ctx, logger, client, spec) g := NewWithT(t) g.Expect(err).NotTo(HaveOccurred()) g.Expect(cp).NotTo(BeNil()) g.Expect(cp.Cluster).To(Equal(capiCluster())) g.Expect(cp.KubeadmControlPlane).To(Equal(kubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) { kcp.Spec.KubeadmConfigSpec.Files = append(kcp.Spec.KubeadmConfigSpec.Files, tt.files...) kcp.Spec.KubeadmConfigSpec.PreKubeadmCommands = append(kcp.Spec.KubeadmConfigSpec.PreKubeadmCommands, test.RegistryMirrorPreKubeadmCommands()...) }))) g.Expect(cp.EtcdCluster).To(Equal(etcdCluster(func(ec *etcdv1.EtcdadmCluster) { ec.Spec.EtcdadmConfigSpec.RegistryMirror = &etcdadmbootstrapv1.RegistryMirrorConfiguration{ Endpoint: containerd.ToAPIEndpoint(registrymirror.FromClusterRegistryMirrorConfiguration(tt.mirrorConfig).CoreEKSAMirror()), CACert: tt.mirrorConfig.CACertContent, } }))) g.Expect(cp.ProviderCluster).To(Equal(dockerCluster())) g.Expect(cp.ControlPlaneMachineTemplate).To(Equal(wantCPMachineTemplate)) g.Expect(cp.EtcdMachineTemplate).To(Equal(wantEtcdMachineTemplate)) }) } } func testClusterSpec(opts ...test.ClusterSpecOpt) *cluster.Spec { name := "test" namespace := "test-namespace" clusterOpts := make([]test.ClusterSpecOpt, 0) clusterOpts = append(clusterOpts, func(s *cluster.Spec) { s.Cluster = &anywherev1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, }, Spec: anywherev1.ClusterSpec{ ClusterNetwork: anywherev1.ClusterNetwork{ Services: anywherev1.Services{ CidrBlocks: []string{"10.96.0.0/12"}, }, Pods: anywherev1.Pods{ CidrBlocks: []string{"192.168.0.0/16"}, }, }, ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{ Count: 3, }, KubernetesVersion: "1.23", WorkerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{ { Count: ptr.Int(3), MachineGroupRef: &anywherev1.Ref{Name: name}, Name: "md-0", }, { Count: ptr.Int(3), MachineGroupRef: &anywherev1.Ref{Name: name}, Name: "md-1", }, }, ExternalEtcdConfiguration: &anywherev1.ExternalEtcdConfiguration{ Count: 3, }, DatacenterRef: anywherev1.Ref{ Kind: "DockerDatacenterConfig", Name: name, }, }, } s.VersionsBundle = &cluster.VersionsBundle{ KubeDistro: &cluster.KubeDistro{ Kubernetes: cluster.VersionedRepository{ Repository: "public.ecr.aws/eks-distro/kubernetes", Tag: "v1.23.12-eks-1-23-6", }, CoreDNS: cluster.VersionedRepository{ Repository: "public.ecr.aws/eks-distro/coredns", Tag: "v1.8.7-eks-1-23-6", }, Etcd: cluster.VersionedRepository{ Repository: "public.ecr.aws/eks-distro/etcd-io", Tag: "v3.5.4-eks-1-23-6", }, EtcdVersion: "3.5.4", }, VersionsBundle: &releasev1alpha1.VersionsBundle{ EksD: releasev1alpha1.EksDRelease{ KindNode: releasev1alpha1.Image{ Description: "kind/node container image", Name: "kind/node", URI: "public.ecr.aws/eks-anywhere/kubernetes-sigs/kind/node:v1.23.12-eks-d-1-23-6-eks-a-19", }, }, }, } }) clusterOpts = append(clusterOpts, opts...) return test.NewClusterSpec(clusterOpts...) } func capiCluster() *clusterv1.Cluster { return &clusterv1.Cluster{ TypeMeta: metav1.TypeMeta{ Kind: "Cluster", APIVersion: "cluster.x-k8s.io/v1beta1", }, ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: constants.EksaSystemNamespace, }, Spec: clusterv1.ClusterSpec{ ClusterNetwork: &clusterv1.ClusterNetwork{ APIServerPort: nil, ServiceDomain: "cluster.local", Services: &clusterv1.NetworkRanges{ CIDRBlocks: []string{"10.96.0.0/12"}, }, Pods: &clusterv1.NetworkRanges{ CIDRBlocks: []string{"192.168.0.0/16"}, }, }, ControlPlaneRef: &corev1.ObjectReference{ Kind: "KubeadmControlPlane", Name: "test", Namespace: constants.EksaSystemNamespace, APIVersion: "controlplane.cluster.x-k8s.io/v1beta1", }, ManagedExternalEtcdRef: &corev1.ObjectReference{ Kind: "EtcdadmCluster", Name: "test-etcd", Namespace: constants.EksaSystemNamespace, APIVersion: "etcdcluster.cluster.x-k8s.io/v1beta1", }, InfrastructureRef: &corev1.ObjectReference{ Kind: "DockerCluster", Name: "test", Namespace: constants.EksaSystemNamespace, APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", }, }, } } func dockerCluster() *dockerv1.DockerCluster { return &dockerv1.DockerCluster{ TypeMeta: metav1.TypeMeta{ Kind: "DockerCluster", APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", }, ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: constants.EksaSystemNamespace, }, } } func dockerMachineTemplate(name string) *dockerv1.DockerMachineTemplate { return &dockerv1.DockerMachineTemplate{ TypeMeta: metav1.TypeMeta{ Kind: "DockerMachineTemplate", APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", }, ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: constants.EksaSystemNamespace, }, Spec: dockerv1.DockerMachineTemplateSpec{ Template: dockerv1.DockerMachineTemplateResource{ Spec: dockerv1.DockerMachineSpec{ CustomImage: "public.ecr.aws/eks-anywhere/kubernetes-sigs/kind/node:v1.23.12-eks-d-1-23-6-eks-a-19", ExtraMounts: []dockerv1.Mount{ { ContainerPath: "/var/run/docker.sock", HostPath: "/var/run/docker.sock", Readonly: false, }, }, Bootstrapped: false, }, }, }, } } func kubeadmControlPlane(opts ...func(*controlplanev1.KubeadmControlPlane)) *controlplanev1.KubeadmControlPlane { kcp := &controlplanev1.KubeadmControlPlane{ TypeMeta: metav1.TypeMeta{ Kind: "KubeadmControlPlane", APIVersion: "controlplane.cluster.x-k8s.io/v1beta1", }, ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: constants.EksaSystemNamespace, }, Spec: controlplanev1.KubeadmControlPlaneSpec{ MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ InfrastructureRef: corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", Kind: "DockerMachineTemplate", Name: "test-control-plane-1", Namespace: constants.EksaSystemNamespace, }, }, KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ ImageRepository: "public.ecr.aws/eks-distro/kubernetes", Etcd: bootstrapv1.Etcd{ External: &bootstrapv1.ExternalEtcd{ Endpoints: []string{}, CAFile: "/etc/kubernetes/pki/etcd/ca.crt", CertFile: "/etc/kubernetes/pki/apiserver-etcd-client.crt", KeyFile: "/etc/kubernetes/pki/apiserver-etcd-client.key", }, }, DNS: bootstrapv1.DNS{ ImageMeta: bootstrapv1.ImageMeta{ ImageRepository: "public.ecr.aws/eks-distro/coredns", ImageTag: "v1.8.7-eks-1-23-6", }, }, APIServer: bootstrapv1.APIServer{ CertSANs: []string{"localhost", "127.0.0.1"}, ControlPlaneComponent: bootstrapv1.ControlPlaneComponent{ ExtraArgs: map[string]string{ "audit-policy-file": "/etc/kubernetes/audit-policy.yaml", "audit-log-path": "/var/log/kubernetes/api-audit.log", "audit-log-maxage": "30", "audit-log-maxbackup": "10", "audit-log-maxsize": "512", "profiling": "false", "tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", }, ExtraVolumes: []bootstrapv1.HostPathMount{ { HostPath: "/etc/kubernetes/audit-policy.yaml", MountPath: "/etc/kubernetes/audit-policy.yaml", Name: "audit-policy", PathType: "File", ReadOnly: true, }, { HostPath: "/var/log/kubernetes", MountPath: "/var/log/kubernetes", Name: "audit-log-dir", PathType: "DirectoryOrCreate", ReadOnly: false, }, }, }, }, ControllerManager: bootstrapv1.ControlPlaneComponent{ ExtraArgs: map[string]string{ "enable-hostpath-provisioner": "true", "profiling": "false", "tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", }, }, Scheduler: bootstrapv1.ControlPlaneComponent{ ExtraArgs: map[string]string{ "profiling": "false", "tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", }, }, }, Files: []bootstrapv1.File{ { Path: "/etc/kubernetes/audit-policy.yaml", Owner: "root:root", Content: `apiVersion: audit.k8s.io/v1beta1 kind: Policy rules: # Log aws-auth configmap changes - level: RequestResponse namespaces: ["kube-system"] verbs: ["update", "patch", "delete"] resources: - group: "" # core resources: ["configmaps"] resourceNames: ["aws-auth"] omitStages: - "RequestReceived" # The following requests were manually identified as high-volume and low-risk, # so drop them. - level: None users: ["system:kube-proxy"] verbs: ["watch"] resources: - group: "" # core resources: ["endpoints", "services", "services/status"] - level: None users: ["kubelet"] # legacy kubelet identity verbs: ["get"] resources: - group: "" # core resources: ["nodes", "nodes/status"] - level: None userGroups: ["system:nodes"] verbs: ["get"] resources: - group: "" # core resources: ["nodes", "nodes/status"] - level: None users: - system:kube-controller-manager - system:kube-scheduler - system:serviceaccount:kube-system:endpoint-controller verbs: ["get", "update"] namespaces: ["kube-system"] resources: - group: "" # core resources: ["endpoints"] - level: None users: ["system:apiserver"] verbs: ["get"] resources: - group: "" # core resources: ["namespaces", "namespaces/status", "namespaces/finalize"] # Don't log HPA fetching metrics. - level: None users: - system:kube-controller-manager verbs: ["get", "list"] resources: - group: "metrics.k8s.io" # Don't log these read-only URLs. - level: None nonResourceURLs: - /healthz* - /version - /swagger* # Don't log events requests. - level: None resources: - group: "" # core resources: ["events"] # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes - level: Request users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] verbs: ["update","patch"] resources: - group: "" # core resources: ["nodes/status", "pods/status"] omitStages: - "RequestReceived" - level: Request userGroups: ["system:nodes"] verbs: ["update","patch"] resources: - group: "" # core resources: ["nodes/status", "pods/status"] omitStages: - "RequestReceived" # deletecollection calls can be large, don't log responses for expected namespace deletions - level: Request users: ["system:serviceaccount:kube-system:namespace-controller"] verbs: ["deletecollection"] omitStages: - "RequestReceived" # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, # so only log at the Metadata level. - level: Metadata resources: - group: "" # core resources: ["secrets", "configmaps"] - group: authentication.k8s.io resources: ["tokenreviews"] omitStages: - "RequestReceived" - level: Request resources: - group: "" resources: ["serviceaccounts/token"] # Get repsonses can be large; skip them. - level: Request verbs: ["get", "list", "watch"] resources: - group: "" # core - group: "admissionregistration.k8s.io" - group: "apiextensions.k8s.io" - group: "apiregistration.k8s.io" - group: "apps" - group: "authentication.k8s.io" - group: "authorization.k8s.io" - group: "autoscaling" - group: "batch" - group: "certificates.k8s.io" - group: "extensions" - group: "metrics.k8s.io" - group: "networking.k8s.io" - group: "policy" - group: "rbac.authorization.k8s.io" - group: "scheduling.k8s.io" - group: "settings.k8s.io" - group: "storage.k8s.io" omitStages: - "RequestReceived" # Default level for known APIs - level: RequestResponse resources: - group: "" # core - group: "admissionregistration.k8s.io" - group: "apiextensions.k8s.io" - group: "apiregistration.k8s.io" - group: "apps" - group: "authentication.k8s.io" - group: "authorization.k8s.io" - group: "autoscaling" - group: "batch" - group: "certificates.k8s.io" - group: "extensions" - group: "metrics.k8s.io" - group: "networking.k8s.io" - group: "policy" - group: "rbac.authorization.k8s.io" - group: "scheduling.k8s.io" - group: "settings.k8s.io" - group: "storage.k8s.io" omitStages: - "RequestReceived" # Default level for all other requests. - level: Metadata omitStages: - "RequestReceived" `, }, }, InitConfiguration: &bootstrapv1.InitConfiguration{ NodeRegistration: bootstrapv1.NodeRegistrationOptions{ CRISocket: "/var/run/containerd/containerd.sock", KubeletExtraArgs: map[string]string{ "cgroup-driver": "cgroupfs", "eviction-hard": "nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%", "tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", }, }, }, JoinConfiguration: &bootstrapv1.JoinConfiguration{ NodeRegistration: bootstrapv1.NodeRegistrationOptions{ CRISocket: "/var/run/containerd/containerd.sock", KubeletExtraArgs: map[string]string{ "cgroup-driver": "cgroupfs", "eviction-hard": "nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%", "tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", }, }, }, }, Replicas: ptr.Int32(3), Version: "v1.23.12-eks-1-23-6", }, } for _, opt := range opts { opt(kcp) } return kcp } func etcdCluster(opts ...func(*etcdv1.EtcdadmCluster)) *etcdv1.EtcdadmCluster { var etcdCluster *etcdv1.EtcdadmCluster = &etcdv1.EtcdadmCluster{ TypeMeta: metav1.TypeMeta{ Kind: "EtcdadmCluster", APIVersion: "etcdcluster.cluster.x-k8s.io/v1beta1", }, ObjectMeta: metav1.ObjectMeta{ Name: "test-etcd", Namespace: constants.EksaSystemNamespace, }, Spec: etcdv1.EtcdadmClusterSpec{ EtcdadmConfigSpec: etcdadmbootstrapv1.EtcdadmConfigSpec{ EtcdadmBuiltin: true, CloudInitConfig: &etcdadmbootstrapv1.CloudInitConfig{ Version: "3.5.4", }, CipherSuites: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", }, InfrastructureTemplate: corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", Kind: "DockerMachineTemplate", Name: "test-etcd-1", Namespace: constants.EksaSystemNamespace, }, Replicas: ptr.Int32(3), }, } for _, opt := range opts { opt(etcdCluster) } return etcdCluster }
740
eks-anywhere
aws
Go
package docker import ( "context" _ "embed" "fmt" "os" "regexp" etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/bootstrapper" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/clusterapi" "github.com/aws/eks-anywhere/pkg/config" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/crypto" "github.com/aws/eks-anywhere/pkg/executables" "github.com/aws/eks-anywhere/pkg/logger" "github.com/aws/eks-anywhere/pkg/providers" "github.com/aws/eks-anywhere/pkg/providers/common" "github.com/aws/eks-anywhere/pkg/registrymirror" "github.com/aws/eks-anywhere/pkg/registrymirror/containerd" "github.com/aws/eks-anywhere/pkg/templater" "github.com/aws/eks-anywhere/pkg/types" releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1" ) const ( githubTokenEnvVar = "GITHUB_TOKEN" ) //go:embed config/template-cp.yaml var defaultCAPIConfigCP string //go:embed config/template-md.yaml var defaultCAPIConfigMD string var eksaDockerResourceType = fmt.Sprintf("dockerdatacenterconfigs.%s", v1alpha1.GroupVersion.Group) type ProviderClient interface { GetDockerLBPort(ctx context.Context, clusterName string) (port string, err error) } type provider struct { docker ProviderClient datacenterConfig *v1alpha1.DockerDatacenterConfig providerKubectlClient ProviderKubectlClient templateBuilder *DockerTemplateBuilder } func (p *provider) InstallCustomProviderComponents(ctx context.Context, kubeconfigFile string) error { return nil } type ProviderKubectlClient interface { GetEksaCluster(ctx context.Context, cluster *types.Cluster, clusterName string) (*v1alpha1.Cluster, error) GetMachineDeployment(ctx context.Context, machineDeploymentName string, opts ...executables.KubectlOpt) (*clusterv1.MachineDeployment, error) GetKubeadmControlPlane(ctx context.Context, cluster *types.Cluster, clusterName string, opts ...executables.KubectlOpt) (*controlplanev1.KubeadmControlPlane, error) GetEtcdadmCluster(ctx context.Context, cluster *types.Cluster, clusterName string, opts ...executables.KubectlOpt) (*etcdv1.EtcdadmCluster, error) UpdateAnnotation(ctx context.Context, resourceType, objectName string, annotations map[string]string, opts ...executables.KubectlOpt) error } func NewProvider(providerConfig *v1alpha1.DockerDatacenterConfig, docker ProviderClient, providerKubectlClient ProviderKubectlClient, now types.NowFunc) providers.Provider { return &provider{ docker: docker, datacenterConfig: providerConfig, providerKubectlClient: providerKubectlClient, templateBuilder: &DockerTemplateBuilder{ now: now, }, } } func (p *provider) BootstrapClusterOpts(_ *cluster.Spec) ([]bootstrapper.BootstrapClusterOption, error) { return []bootstrapper.BootstrapClusterOption{bootstrapper.WithExtraDockerMounts()}, nil } func (p *provider) PreCAPIInstallOnBootstrap(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error { return nil } func (p *provider) PostBootstrapSetup(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error { return nil } func (p *provider) PostBootstrapDeleteForUpgrade(ctx context.Context) error { return nil } func (p *provider) PostBootstrapSetupUpgrade(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error { return nil } func (p *provider) PostWorkloadInit(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error { return nil } func (p *provider) Name() string { return constants.DockerProviderName } func (p *provider) DatacenterResourceType() string { return eksaDockerResourceType } func (p *provider) MachineResourceType() string { return "" } func (p *provider) DeleteResources(_ context.Context, _ *cluster.Spec) error { return nil } func (p *provider) PostClusterDeleteValidate(_ context.Context, _ *types.Cluster) error { // No validations return nil } func (p *provider) PostMoveManagementToBootstrap(_ context.Context, _ *types.Cluster) error { // NOOP return nil } func (p *provider) SetupAndValidateCreateCluster(ctx context.Context, clusterSpec *cluster.Spec) error { logger.Info("Warning: The docker infrastructure provider is meant for local development and testing only") if err := ValidateControlPlaneEndpoint(clusterSpec); err != nil { return err } return nil } func (p *provider) SetupAndValidateDeleteCluster(ctx context.Context, _ *types.Cluster, _ *cluster.Spec) error { return nil } func (p *provider) SetupAndValidateUpgradeCluster(ctx context.Context, _ *types.Cluster, _ *cluster.Spec, _ *cluster.Spec) error { return nil } func (p *provider) UpdateSecrets(ctx context.Context, cluster *types.Cluster, _ *cluster.Spec) error { // Not implemented return nil } // NewDockerTemplateBuilder returns a docker template builder object. func NewDockerTemplateBuilder(now types.NowFunc) *DockerTemplateBuilder { return &DockerTemplateBuilder{ now: now, } } type DockerTemplateBuilder struct { now types.NowFunc } func (d *DockerTemplateBuilder) GenerateCAPISpecControlPlane(clusterSpec *cluster.Spec, buildOptions ...providers.BuildMapOption) (content []byte, err error) { values, err := buildTemplateMapCP(clusterSpec) if err != nil { return nil, fmt.Errorf("error building template map for CP %v", err) } for _, buildOption := range buildOptions { buildOption(values) } bytes, err := templater.Execute(defaultCAPIConfigCP, values) if err != nil { return nil, err } return bytes, nil } func (d *DockerTemplateBuilder) GenerateCAPISpecWorkers(clusterSpec *cluster.Spec, workloadTemplateNames, kubeadmconfigTemplateNames map[string]string) (content []byte, err error) { workerSpecs := make([][]byte, 0, len(clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations)) for _, workerNodeGroupConfiguration := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations { values, err := buildTemplateMapMD(clusterSpec, workerNodeGroupConfiguration) if err != nil { return nil, fmt.Errorf("error building template map for MD %v", err) } values["workloadTemplateName"] = workloadTemplateNames[workerNodeGroupConfiguration.Name] values["workloadkubeadmconfigTemplateName"] = kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name] bytes, err := templater.Execute(defaultCAPIConfigMD, values) if err != nil { return nil, err } workerSpecs = append(workerSpecs, bytes) } return templater.AppendYamlResources(workerSpecs...), nil } // CAPIWorkersSpecWithInitialNames generates a yaml spec with the CAPI objects representing the worker // nodes for a particular eks-a cluster. It uses default initial names (ended in '-1') for the docker // machine templates and kubeadm config templates. func (d *DockerTemplateBuilder) CAPIWorkersSpecWithInitialNames(spec *cluster.Spec) (content []byte, err error) { machineTemplateNames, kubeadmConfigTemplateNames := initialNamesForWorkers(spec) return d.GenerateCAPISpecWorkers(spec, machineTemplateNames, kubeadmConfigTemplateNames) } func initialNamesForWorkers(spec *cluster.Spec) (machineTemplateNames, kubeadmConfigTemplateNames map[string]string) { workerGroupsLen := len(spec.Cluster.Spec.WorkerNodeGroupConfigurations) machineTemplateNames = make(map[string]string, workerGroupsLen) kubeadmConfigTemplateNames = make(map[string]string, workerGroupsLen) for _, workerNodeGroupConfiguration := range spec.Cluster.Spec.WorkerNodeGroupConfigurations { machineTemplateNames[workerNodeGroupConfiguration.Name] = clusterapi.WorkerMachineTemplateName(spec, workerNodeGroupConfiguration) kubeadmConfigTemplateNames[workerNodeGroupConfiguration.Name] = clusterapi.DefaultKubeadmConfigTemplateName(spec, workerNodeGroupConfiguration) } return machineTemplateNames, kubeadmConfigTemplateNames } func kubeletCgroupDriverExtraArgs(kubeVersion v1alpha1.KubernetesVersion) (clusterapi.ExtraArgs, error) { clusterKubeVersionSemver, err := v1alpha1.KubeVersionToSemver(kubeVersion) if err != nil { return nil, fmt.Errorf("converting kubeVersion %v to semver %v", kubeVersion, err) } kube124Semver, err := v1alpha1.KubeVersionToSemver(v1alpha1.Kube124) if err != nil { return nil, fmt.Errorf("error converting kubeVersion %v to semver %v", v1alpha1.Kube124, err) } if clusterKubeVersionSemver.Compare(kube124Semver) != -1 { return clusterapi.CgroupDriverSystemdExtraArgs(), nil } return clusterapi.CgroupDriverCgroupfsExtraArgs(), nil } func buildTemplateMapCP(clusterSpec *cluster.Spec) (map[string]interface{}, error) { bundle := clusterSpec.VersionsBundle etcdExtraArgs := clusterapi.SecureEtcdTlsCipherSuitesExtraArgs() sharedExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs() kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)). Append(clusterapi.ControlPlaneNodeLabelsExtraArgs(clusterSpec.Cluster.Spec.ControlPlaneConfiguration)) cgroupDriverArgs, err := kubeletCgroupDriverExtraArgs(clusterSpec.Cluster.Spec.KubernetesVersion) if err != nil { return nil, err } if cgroupDriverArgs != nil { kubeletExtraArgs.Append(cgroupDriverArgs) } apiServerExtraArgs := clusterapi.OIDCToExtraArgs(clusterSpec.OIDCConfig). Append(clusterapi.AwsIamAuthExtraArgs(clusterSpec.AWSIamConfig)). Append(clusterapi.PodIAMAuthExtraArgs(clusterSpec.Cluster.Spec.PodIAMConfig)). Append(sharedExtraArgs) controllerManagerExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). Append(clusterapi.NodeCIDRMaskExtraArgs(&clusterSpec.Cluster.Spec.ClusterNetwork)) values := map[string]interface{}{ "clusterName": clusterSpec.Cluster.Name, "control_plane_replicas": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Count, "kubernetesRepository": bundle.KubeDistro.Kubernetes.Repository, "kubernetesVersion": bundle.KubeDistro.Kubernetes.Tag, "etcdRepository": bundle.KubeDistro.Etcd.Repository, "etcdVersion": bundle.KubeDistro.Etcd.Tag, "corednsRepository": bundle.KubeDistro.CoreDNS.Repository, "corednsVersion": bundle.KubeDistro.CoreDNS.Tag, "kindNodeImage": bundle.EksD.KindNode.VersionedImage(), "etcdExtraArgs": etcdExtraArgs.ToPartialYaml(), "etcdCipherSuites": crypto.SecureCipherSuitesString(), "apiserverExtraArgs": apiServerExtraArgs.ToPartialYaml(), "controllermanagerExtraArgs": controllerManagerExtraArgs.ToPartialYaml(), "schedulerExtraArgs": sharedExtraArgs.ToPartialYaml(), "kubeletExtraArgs": kubeletExtraArgs.ToPartialYaml(), "externalEtcdVersion": bundle.KubeDistro.EtcdVersion, "eksaSystemNamespace": constants.EksaSystemNamespace, "podCidrs": clusterSpec.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks, "serviceCidrs": clusterSpec.Cluster.Spec.ClusterNetwork.Services.CidrBlocks, "haproxyImageRepository": getHAProxyImageRepo(bundle.Haproxy.Image), "haproxyImageTag": bundle.Haproxy.Image.Tag(), "workerNodeGroupConfigurations": clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations, } if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil { values["externalEtcd"] = true values["externalEtcdReplicas"] = clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.Count } if clusterSpec.AWSIamConfig != nil { values["awsIamAuth"] = true } values["controlPlaneTaints"] = clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Taints auditPolicy, err := common.GetAuditPolicy(clusterSpec.Cluster.Spec.KubernetesVersion) if err != nil { return nil, err } values["auditPolicy"] = auditPolicy if clusterSpec.Cluster.Spec.RegistryMirrorConfiguration != nil { values, err := populateRegistryMirrorValues(clusterSpec, values) if err != nil { return values, err } } return values, nil } func buildTemplateMapMD(clusterSpec *cluster.Spec, workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration) (map[string]interface{}, error) { bundle := clusterSpec.VersionsBundle kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). Append(clusterapi.WorkerNodeLabelsExtraArgs(workerNodeGroupConfiguration)). Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)) cgroupDriverArgs, err := kubeletCgroupDriverExtraArgs(clusterSpec.Cluster.Spec.KubernetesVersion) if err != nil { return nil, err } if cgroupDriverArgs != nil { kubeletExtraArgs.Append(cgroupDriverArgs) } values := map[string]interface{}{ "clusterName": clusterSpec.Cluster.Name, "kubernetesVersion": bundle.KubeDistro.Kubernetes.Tag, "kindNodeImage": bundle.EksD.KindNode.VersionedImage(), "eksaSystemNamespace": constants.EksaSystemNamespace, "kubeletExtraArgs": kubeletExtraArgs.ToPartialYaml(), "workerReplicas": *workerNodeGroupConfiguration.Count, "workerNodeGroupName": fmt.Sprintf("%s-%s", clusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name), "workerNodeGroupTaints": workerNodeGroupConfiguration.Taints, "autoscalingConfig": workerNodeGroupConfiguration.AutoScalingConfiguration, } if clusterSpec.Cluster.Spec.RegistryMirrorConfiguration != nil { values, err := populateRegistryMirrorValues(clusterSpec, values) if err != nil { return values, err } } return values, nil } func NeedsNewControlPlaneTemplate(oldSpec, newSpec *cluster.Spec) bool { return (oldSpec.Cluster.Spec.KubernetesVersion != newSpec.Cluster.Spec.KubernetesVersion) || (oldSpec.Bundles.Spec.Number != newSpec.Bundles.Spec.Number) } func NeedsNewWorkloadTemplate(oldSpec, newSpec *cluster.Spec) bool { if !v1alpha1.WorkerNodeGroupConfigurationSliceTaintsEqual(oldSpec.Cluster.Spec.WorkerNodeGroupConfigurations, newSpec.Cluster.Spec.WorkerNodeGroupConfigurations) || !v1alpha1.WorkerNodeGroupConfigurationsLabelsMapEqual(oldSpec.Cluster.Spec.WorkerNodeGroupConfigurations, newSpec.Cluster.Spec.WorkerNodeGroupConfigurations) { return true } return (oldSpec.Cluster.Spec.KubernetesVersion != newSpec.Cluster.Spec.KubernetesVersion) || (oldSpec.Bundles.Spec.Number != newSpec.Bundles.Spec.Number) } func NeedsNewKubeadmConfigTemplate(newWorkerNodeGroup *v1alpha1.WorkerNodeGroupConfiguration, oldWorkerNodeGroup *v1alpha1.WorkerNodeGroupConfiguration) bool { return !v1alpha1.TaintsSliceEqual(newWorkerNodeGroup.Taints, oldWorkerNodeGroup.Taints) || !v1alpha1.MapEqual(newWorkerNodeGroup.Labels, oldWorkerNodeGroup.Labels) } func NeedsNewEtcdTemplate(oldSpec, newSpec *cluster.Spec) bool { return (oldSpec.Cluster.Spec.KubernetesVersion != newSpec.Cluster.Spec.KubernetesVersion) || (oldSpec.Bundles.Spec.Number != newSpec.Bundles.Spec.Number) } func (p *provider) generateCAPISpecForUpgrade(ctx context.Context, bootstrapCluster, workloadCluster *types.Cluster, currentSpec, newClusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error) { clusterName := newClusterSpec.Cluster.Name var controlPlaneTemplateName, workloadTemplateName, kubeadmconfigTemplateName, etcdTemplateName string var needsNewEtcdTemplate bool needsNewControlPlaneTemplate := NeedsNewControlPlaneTemplate(currentSpec, newClusterSpec) if !needsNewControlPlaneTemplate { cp, err := p.providerKubectlClient.GetKubeadmControlPlane(ctx, workloadCluster, workloadCluster.Name, executables.WithCluster(bootstrapCluster), executables.WithNamespace(constants.EksaSystemNamespace)) if err != nil { return nil, nil, err } controlPlaneTemplateName = cp.Spec.MachineTemplate.InfrastructureRef.Name } else { controlPlaneTemplateName = common.CPMachineTemplateName(clusterName, p.templateBuilder.now) } previousWorkerNodeGroupConfigs := cluster.BuildMapForWorkerNodeGroupsByName(currentSpec.Cluster.Spec.WorkerNodeGroupConfigurations) workloadTemplateNames := make(map[string]string, len(newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations)) kubeadmconfigTemplateNames := make(map[string]string, len(newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations)) for _, workerNodeGroupConfiguration := range newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations { needsNewWorkloadTemplate, err := p.needsNewMachineTemplate(currentSpec, newClusterSpec, workerNodeGroupConfiguration, previousWorkerNodeGroupConfigs) if err != nil { return nil, nil, err } needsNewKubeadmConfigTemplate, err := p.needsNewKubeadmConfigTemplate(workerNodeGroupConfiguration, previousWorkerNodeGroupConfigs) if err != nil { return nil, nil, err } if !needsNewKubeadmConfigTemplate { mdName := machineDeploymentName(newClusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name) md, err := p.providerKubectlClient.GetMachineDeployment(ctx, mdName, executables.WithCluster(bootstrapCluster), executables.WithNamespace(constants.EksaSystemNamespace)) if err != nil { return nil, nil, err } kubeadmconfigTemplateName = md.Spec.Template.Spec.Bootstrap.ConfigRef.Name kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name] = kubeadmconfigTemplateName } else { kubeadmconfigTemplateName = common.KubeadmConfigTemplateName(clusterName, workerNodeGroupConfiguration.Name, p.templateBuilder.now) kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name] = kubeadmconfigTemplateName } if !needsNewWorkloadTemplate { mdName := machineDeploymentName(newClusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name) md, err := p.providerKubectlClient.GetMachineDeployment(ctx, mdName, executables.WithCluster(bootstrapCluster), executables.WithNamespace(constants.EksaSystemNamespace)) if err != nil { return nil, nil, err } workloadTemplateName = md.Spec.Template.Spec.InfrastructureRef.Name workloadTemplateNames[workerNodeGroupConfiguration.Name] = workloadTemplateName } else { workloadTemplateName = common.WorkerMachineTemplateName(clusterName, workerNodeGroupConfiguration.Name, p.templateBuilder.now) workloadTemplateNames[workerNodeGroupConfiguration.Name] = workloadTemplateName } } if newClusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil { needsNewEtcdTemplate = NeedsNewEtcdTemplate(currentSpec, newClusterSpec) if !needsNewEtcdTemplate { etcdadmCluster, err := p.providerKubectlClient.GetEtcdadmCluster(ctx, workloadCluster, newClusterSpec.Cluster.Name, executables.WithCluster(bootstrapCluster), executables.WithNamespace(constants.EksaSystemNamespace)) if err != nil { return nil, nil, err } etcdTemplateName = etcdadmCluster.Spec.InfrastructureTemplate.Name } else { /* During a cluster upgrade, etcd machines need to be upgraded first, so that the etcd machines with new spec get created and can be used by controlplane machines as etcd endpoints. KCP rollout should not start until then. As a temporary solution in the absence of static etcd endpoints, we annotate the etcd cluster as "upgrading", so that KCP checks this annotation and does not proceed if etcd cluster is upgrading. The etcdadm controller removes this annotation once the etcd upgrade is complete. */ err = p.providerKubectlClient.UpdateAnnotation(ctx, "etcdadmcluster", fmt.Sprintf("%s-etcd", newClusterSpec.Cluster.Name), map[string]string{etcdv1.UpgradeInProgressAnnotation: "true"}, executables.WithCluster(bootstrapCluster), executables.WithNamespace(constants.EksaSystemNamespace)) if err != nil { return nil, nil, err } etcdTemplateName = common.EtcdMachineTemplateName(clusterName, p.templateBuilder.now) } } cpOpt := func(values map[string]interface{}) { values["controlPlaneTemplateName"] = controlPlaneTemplateName values["etcdTemplateName"] = etcdTemplateName } controlPlaneSpec, err = p.templateBuilder.GenerateCAPISpecControlPlane(newClusterSpec, cpOpt) if err != nil { return nil, nil, err } workersSpec, err = p.templateBuilder.GenerateCAPISpecWorkers(newClusterSpec, workloadTemplateNames, kubeadmconfigTemplateNames) if err != nil { return nil, nil, err } return controlPlaneSpec, workersSpec, nil } func (p *provider) needsNewMachineTemplate(currentSpec, newClusterSpec *cluster.Spec, workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration, prevWorkerNodeGroupConfigs map[string]v1alpha1.WorkerNodeGroupConfiguration) (bool, error) { if _, ok := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name]; ok { needsNewWorkloadTemplate := NeedsNewWorkloadTemplate(currentSpec, newClusterSpec) return needsNewWorkloadTemplate, nil } return true, nil } func (p *provider) needsNewKubeadmConfigTemplate(workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration, prevWorkerNodeGroupConfigs map[string]v1alpha1.WorkerNodeGroupConfiguration) (bool, error) { if _, ok := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name]; ok { existingWorkerNodeGroupConfig := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name] return NeedsNewKubeadmConfigTemplate(&workerNodeGroupConfiguration, &existingWorkerNodeGroupConfig), nil } return true, nil } func (p *provider) generateCAPISpecForCreate(ctx context.Context, clusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error) { clusterName := clusterSpec.Cluster.Name cpOpt := func(values map[string]interface{}) { values["controlPlaneTemplateName"] = common.CPMachineTemplateName(clusterName, p.templateBuilder.now) values["etcdTemplateName"] = common.EtcdMachineTemplateName(clusterName, p.templateBuilder.now) } controlPlaneSpec, err = p.templateBuilder.GenerateCAPISpecControlPlane(clusterSpec, cpOpt) if err != nil { return nil, nil, err } workloadTemplateNames := make(map[string]string, len(clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations)) kubeadmconfigTemplateNames := make(map[string]string, len(clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations)) for _, workerNodeGroupConfiguration := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations { workloadTemplateNames[workerNodeGroupConfiguration.Name] = common.WorkerMachineTemplateName(clusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name, p.templateBuilder.now) kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name] = common.KubeadmConfigTemplateName(clusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name, p.templateBuilder.now) } workersSpec, err = p.templateBuilder.GenerateCAPISpecWorkers(clusterSpec, workloadTemplateNames, kubeadmconfigTemplateNames) if err != nil { return nil, nil, err } return controlPlaneSpec, workersSpec, nil } func (p *provider) GenerateCAPISpecForCreate(ctx context.Context, _ *types.Cluster, clusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error) { controlPlaneSpec, workersSpec, err = p.generateCAPISpecForCreate(ctx, clusterSpec) if err != nil { return nil, nil, fmt.Errorf("generating cluster api spec contents: %v", err) } return controlPlaneSpec, workersSpec, nil } func (p *provider) GenerateCAPISpecForUpgrade(ctx context.Context, bootstrapCluster, workloadCluster *types.Cluster, currentSpec, newClusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error) { controlPlaneSpec, workersSpec, err = p.generateCAPISpecForUpgrade(ctx, bootstrapCluster, workloadCluster, currentSpec, newClusterSpec) if err != nil { return nil, nil, fmt.Errorf("generating cluster api spec contents: %v", err) } return controlPlaneSpec, workersSpec, nil } func (p *provider) UpdateKubeConfig(content *[]byte, clusterName string) error { // The Docker provider is for testing only. We don't want to change the interface just for the test ctx := context.Background() if port, err := p.docker.GetDockerLBPort(ctx, clusterName); err != nil { return err } else { getUpdatedKubeConfigContent(content, port) return nil } } // this is required for docker provider. func getUpdatedKubeConfigContent(content *[]byte, dockerLbPort string) { mc := regexp.MustCompile("server:.*") updatedConfig := mc.ReplaceAllString(string(*content), fmt.Sprintf("server: https://127.0.0.1:%s", dockerLbPort)) mc = regexp.MustCompile("certificate-authority-data:.*") updatedConfig = mc.ReplaceAllString(updatedConfig, "insecure-skip-tls-verify: true") updatedContentByte := []byte(updatedConfig) *content = updatedContentByte } func (p *provider) Version(clusterSpec *cluster.Spec) string { return clusterSpec.VersionsBundle.Docker.Version } func (p *provider) EnvMap(_ *cluster.Spec) (map[string]string, error) { envMap := make(map[string]string) if env, ok := os.LookupEnv(githubTokenEnvVar); ok && len(env) > 0 { envMap[githubTokenEnvVar] = env } return envMap, nil } func (p *provider) GetDeployments() map[string][]string { return map[string][]string{ "capd-system": {"capd-controller-manager"}, } } func (p *provider) GetInfrastructureBundle(clusterSpec *cluster.Spec) *types.InfrastructureBundle { bundle := clusterSpec.VersionsBundle folderName := fmt.Sprintf("infrastructure-docker/%s/", bundle.Docker.Version) infraBundle := types.InfrastructureBundle{ FolderName: folderName, Manifests: []releasev1alpha1.Manifest{ bundle.Docker.Components, bundle.Docker.Metadata, bundle.Docker.ClusterTemplate, }, } return &infraBundle } func (p *provider) DatacenterConfig(_ *cluster.Spec) providers.DatacenterConfig { return p.datacenterConfig } func (p *provider) MachineConfigs(_ *cluster.Spec) []providers.MachineConfig { return nil } func (p *provider) ValidateNewSpec(_ context.Context, _ *types.Cluster, _ *cluster.Spec) error { return nil } func (p *provider) ChangeDiff(currentSpec, newSpec *cluster.Spec) *types.ComponentChangeDiff { if currentSpec.VersionsBundle.Docker.Version == newSpec.VersionsBundle.Docker.Version { return nil } return &types.ComponentChangeDiff{ ComponentName: constants.DockerProviderName, NewVersion: newSpec.VersionsBundle.Docker.Version, OldVersion: currentSpec.VersionsBundle.Docker.Version, } } func (p *provider) RunPostControlPlaneUpgrade(ctx context.Context, oldClusterSpec *cluster.Spec, clusterSpec *cluster.Spec, workloadCluster *types.Cluster, managementCluster *types.Cluster) error { return nil } func (p *provider) UpgradeNeeded(_ context.Context, _, _ *cluster.Spec, _ *types.Cluster) (bool, error) { return false, nil } func (p *provider) RunPostControlPlaneCreation(ctx context.Context, clusterSpec *cluster.Spec, cluster *types.Cluster) error { return nil } func machineDeploymentName(clusterName, nodeGroupName string) string { return fmt.Sprintf("%s-%s", clusterName, nodeGroupName) } func getHAProxyImageRepo(haProxyImage releasev1alpha1.Image) string { var haproxyImageRepo string regexStr := `(?P<HAProxyImageRepoPrefix>public.ecr.aws/[a-z0-9._-]+/kubernetes-sigs/kind)/haproxy` regex := regexp.MustCompile(regexStr) matches := regex.FindStringSubmatch(haProxyImage.Image()) if len(matches) > 0 { haproxyImageRepo = matches[regex.SubexpIndex("HAProxyImageRepoPrefix")] } return haproxyImageRepo } // PreCoreComponentsUpgrade staisfies the Provider interface. func (p *provider) PreCoreComponentsUpgrade( ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, ) error { return nil } func populateRegistryMirrorValues(clusterSpec *cluster.Spec, values map[string]interface{}) (map[string]interface{}, error) { registryMirror := registrymirror.FromCluster(clusterSpec.Cluster) values["registryMirrorMap"] = containerd.ToAPIEndpoints(registryMirror.NamespacedRegistryMap) values["mirrorBase"] = registryMirror.BaseRegistry values["insecureSkip"] = registryMirror.InsecureSkipVerify values["publicMirror"] = containerd.ToAPIEndpoint(registryMirror.CoreEKSAMirror()) if len(registryMirror.CACertContent) > 0 { values["registryCACert"] = registryMirror.CACertContent } if registryMirror.Auth { values["registryAuth"] = registryMirror.Auth username, password, err := config.ReadCredentials() if err != nil { return values, err } values["registryUsername"] = username values["registryPassword"] = password } return values, nil }
653
eks-anywhere
aws
Go
package docker_test import ( "context" _ "embed" "fmt" "path" "testing" "time" etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/executables" "github.com/aws/eks-anywhere/pkg/providers" "github.com/aws/eks-anywhere/pkg/providers/docker" dockerMocks "github.com/aws/eks-anywhere/pkg/providers/docker/mocks" "github.com/aws/eks-anywhere/pkg/types" "github.com/aws/eks-anywhere/pkg/utils/ptr" releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1" ) const testdataDir = "testdata" type dockerTest struct { *WithT dockerClient *dockerMocks.MockProviderClient kubectl *dockerMocks.MockProviderKubectlClient provider providers.Provider } func newTest(t *testing.T) *dockerTest { ctrl := gomock.NewController(t) client := dockerMocks.NewMockProviderClient(ctrl) kubectl := dockerMocks.NewMockProviderKubectlClient(ctrl) return &dockerTest{ WithT: NewWithT(t), dockerClient: client, kubectl: kubectl, provider: docker.NewProvider(&v1alpha1.DockerDatacenterConfig{}, client, kubectl, test.FakeNow), } } func givenClusterSpec(t *testing.T, fileName string) *cluster.Spec { return test.NewFullClusterSpec(t, path.Join(testdataDir, fileName)) } func TestProviderUpdateKubeConfig(t *testing.T) { input := []byte(` apiVersion: v1 clusters: - cluster: certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJ server: https://172.18.0.3:6443 name: capi-quickstart`) expected := ` apiVersion: v1 clusters: - cluster: insecure-skip-tls-verify: true server: https://127.0.0.1:4332 name: capi-quickstart` mockCtrl := gomock.NewController(t) type fields struct { clusterName string } type args struct { content *[]byte clusterName string } tests := []struct { name string fields fields args args want string wantErr bool }{ { name: "Test updates for docker config file", fields: fields{ clusterName: "capi-quickstart", }, args: args{ content: &input, clusterName: "capi-quickstart", }, want: expected, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { client := dockerMocks.NewMockProviderClient(mockCtrl) client.EXPECT().GetDockerLBPort(gomock.Any(), tt.args.clusterName).Return("4332", nil) kubectl := dockerMocks.NewMockProviderKubectlClient(mockCtrl) p := docker.NewProvider(&v1alpha1.DockerDatacenterConfig{}, client, kubectl, test.FakeNow) if err := p.UpdateKubeConfig(tt.args.content, tt.args.clusterName); (err != nil) != tt.wantErr { t.Errorf("UpdateKubeConfig() error = %v, wantErr %v", err, tt.wantErr) } if string(*tt.args.content) != tt.want { t.Errorf("updateKubeConfigFile() got = %v, want %v", string(*tt.args.content), tt.want) } }) } } func TestProviderGenerateDeploymentFileSuccessUpdateMachineTemplate(t *testing.T) { mockCtrl := gomock.NewController(t) var cpTaints, wnTaints, wnTaints2 []v1.Taint cpTaints = append(cpTaints, v1.Taint{Key: "key1", Value: "val1", Effect: "NoSchedule", TimeAdded: nil}) cpTaints = append(cpTaints, v1.Taint{Key: "key2", Value: "val2", Effect: "PreferNoSchedule", TimeAdded: nil}) cpTaints = append(cpTaints, v1.Taint{Key: "key3", Value: "val3", Effect: "NoExecute", TimeAdded: nil}) wnTaints = append(wnTaints, v1.Taint{Key: "key2", Value: "val2", Effect: "PreferNoSchedule", TimeAdded: nil}) wnTaints2 = append(wnTaints2, v1.Taint{Key: "wnTaitns2", Value: "true", Effect: "PreferNoSchedule", TimeAdded: nil}) nodeLabels := map[string]string{"label1": "foo", "label2": "bar"} tests := []struct { testName string clusterSpec *cluster.Spec wantCPFile string wantMDFile string }{ { testName: "valid config", clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "test-cluster" s.Cluster.Spec.KubernetesVersion = "1.19" s.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"192.168.0.0/16"} s.Cluster.Spec.ClusterNetwork.Services.CidrBlocks = []string{"10.128.0.0/12"} s.Cluster.Spec.ControlPlaneConfiguration.Count = 3 s.VersionsBundle = versionsBundle s.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{Count: 3} s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(3), MachineGroupRef: &v1alpha1.Ref{Name: "test-cluster"}, Name: "md-0"}} }), wantCPFile: "testdata/valid_deployment_cp_expected.yaml", wantMDFile: "testdata/valid_deployment_md_expected.yaml", }, { testName: "valid config 1.24", clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "test-cluster" s.Cluster.Spec.KubernetesVersion = "1.24" s.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"192.168.0.0/16"} s.Cluster.Spec.ClusterNetwork.Services.CidrBlocks = []string{"10.128.0.0/12"} s.Cluster.Spec.ControlPlaneConfiguration.Count = 3 s.VersionsBundle = versionsBundle s.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{Count: 3} s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(3), MachineGroupRef: &v1alpha1.Ref{Name: "test-cluster"}, Name: "md-0"}} }), wantCPFile: "testdata/valid_deployment_cp_expected_124onwards.yaml", wantMDFile: "testdata/valid_deployment_md_expected_124onwards.yaml", }, { testName: "valid config with cp taints", clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "test-cluster" s.Cluster.Spec.KubernetesVersion = "1.19" s.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"192.168.0.0/16"} s.Cluster.Spec.ClusterNetwork.Services.CidrBlocks = []string{"10.128.0.0/12"} s.Cluster.Spec.ControlPlaneConfiguration.Count = 3 s.Cluster.Spec.ControlPlaneConfiguration.Taints = cpTaints s.VersionsBundle = versionsBundle s.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{Count: 3} s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(3), MachineGroupRef: &v1alpha1.Ref{Name: "test-cluster"}, Name: "md-0"}} }), wantCPFile: "testdata/valid_deployment_cp_taints_expected.yaml", wantMDFile: "testdata/valid_deployment_md_expected.yaml", }, { testName: "valid config with md taints", clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "test-cluster" s.Cluster.Spec.KubernetesVersion = "1.19" s.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"192.168.0.0/16"} s.Cluster.Spec.ClusterNetwork.Services.CidrBlocks = []string{"10.128.0.0/12"} s.Cluster.Spec.ControlPlaneConfiguration.Count = 3 s.Cluster.Spec.ControlPlaneConfiguration.Taints = cpTaints s.VersionsBundle = versionsBundle s.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{Count: 3} s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(3), Taints: wnTaints, MachineGroupRef: &v1alpha1.Ref{Name: "test-cluster"}, Name: "md-0"}} }), wantCPFile: "testdata/valid_deployment_cp_taints_expected.yaml", wantMDFile: "testdata/valid_deployment_md_taints_expected.yaml", }, { testName: "valid config multiple worker node groups with machine deployment taints", clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "test-cluster" s.Cluster.Spec.KubernetesVersion = "1.19" s.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"192.168.0.0/16"} s.Cluster.Spec.ClusterNetwork.Services.CidrBlocks = []string{"10.128.0.0/12"} s.Cluster.Spec.ControlPlaneConfiguration.Count = 3 s.Cluster.Spec.ControlPlaneConfiguration.Taints = cpTaints s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(3), Taints: wnTaints, MachineGroupRef: &v1alpha1.Ref{Name: "test-cluster"}, Name: "md-0"}, {Count: ptr.Int(3), Taints: wnTaints2, MachineGroupRef: &v1alpha1.Ref{Name: "test-cluster"}, Name: "md-1"}} s.VersionsBundle = versionsBundle s.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{Count: 3} }), wantCPFile: "testdata/valid_deployment_cp_taints_expected.yaml", wantMDFile: "testdata/valid_deployment_multiple_md_taints_expected.yaml", }, { testName: "valid config with node labels", clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "test-cluster" s.Cluster.Spec.KubernetesVersion = "1.19" s.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"192.168.0.0/16"} s.Cluster.Spec.ClusterNetwork.Services.CidrBlocks = []string{"10.128.0.0/12"} s.Cluster.Spec.ControlPlaneConfiguration.Count = 3 s.VersionsBundle = versionsBundle s.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{Count: 3} s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(3), MachineGroupRef: &v1alpha1.Ref{Name: "test-cluster"}, Labels: nodeLabels, Name: "md-0"}} }), wantCPFile: "testdata/valid_deployment_cp_expected.yaml", wantMDFile: "testdata/valid_deployment_node_labels_md_expected.yaml", }, { testName: "valid config with cp node labels", clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "test-cluster" s.Cluster.Spec.KubernetesVersion = "1.19" s.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"192.168.0.0/16"} s.Cluster.Spec.ClusterNetwork.Services.CidrBlocks = []string{"10.128.0.0/12"} s.Cluster.Spec.ControlPlaneConfiguration.Count = 3 s.Cluster.Spec.ControlPlaneConfiguration.Labels = nodeLabels s.VersionsBundle = versionsBundle s.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{Count: 3} s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(3), MachineGroupRef: &v1alpha1.Ref{Name: "test-cluster"}, Name: "md-0"}} }), wantCPFile: "testdata/valid_deployment_node_labels_cp_expected.yaml", wantMDFile: "testdata/valid_deployment_md_expected.yaml", }, { testName: "valid config with cidrs and custom resolv.conf", clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "test-cluster" s.Cluster.Spec.KubernetesVersion = "1.19" s.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"10.10.0.0/24", "10.128.0.0/12"} s.Cluster.Spec.ClusterNetwork.Services.CidrBlocks = []string{"192.168.0.0/16", "10.10.0.0/16"} s.Cluster.Spec.ClusterNetwork.DNS.ResolvConf = &v1alpha1.ResolvConf{Path: "/etc/my-custom-resolv.conf"} s.Cluster.Spec.ControlPlaneConfiguration.Count = 3 s.VersionsBundle = versionsBundle s.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{Count: 3} s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(3), MachineGroupRef: &v1alpha1.Ref{Name: "test-cluster"}, Name: "md-0"}} }), wantCPFile: "testdata/valid_deployment_custom_cidrs_cp_expected.yaml", wantMDFile: "testdata/valid_deployment_custom_cidrs_md_expected.yaml", }, { testName: "with minimal oidc", clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "test-cluster" s.Cluster.Spec.KubernetesVersion = "1.19" s.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"192.168.0.0/16"} s.Cluster.Spec.ClusterNetwork.Services.CidrBlocks = []string{"10.128.0.0/12"} s.Cluster.Spec.ControlPlaneConfiguration.Count = 3 s.VersionsBundle = versionsBundle s.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{Count: 3} s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(3), MachineGroupRef: &v1alpha1.Ref{Name: "test-cluster"}, Name: "md-0"}} s.OIDCConfig = &v1alpha1.OIDCConfig{ Spec: v1alpha1.OIDCConfigSpec{ ClientId: "my-client-id", IssuerUrl: "https://mydomain.com/issuer", }, } }), wantCPFile: "testdata/capd_valid_minimal_oidc_cp_expected.yaml", wantMDFile: "testdata/capd_valid_minimal_oidc_md_expected.yaml", }, { testName: "with full oidc", clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "test-cluster" s.Cluster.Spec.KubernetesVersion = "1.19" s.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"192.168.0.0/16"} s.Cluster.Spec.ClusterNetwork.Services.CidrBlocks = []string{"10.128.0.0/12"} s.Cluster.Spec.ControlPlaneConfiguration.Count = 3 s.VersionsBundle = versionsBundle s.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{Count: 3} s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(3), MachineGroupRef: &v1alpha1.Ref{Name: "test-cluster"}, Name: "md-0"}} s.OIDCConfig = &v1alpha1.OIDCConfig{ Spec: v1alpha1.OIDCConfigSpec{ ClientId: "my-client-id", IssuerUrl: "https://mydomain.com/issuer", GroupsClaim: "claim1", GroupsPrefix: "prefix-for-groups", RequiredClaims: []v1alpha1.OIDCConfigRequiredClaim{ { Claim: "sub", Value: "test", }, }, UsernameClaim: "username-claim", UsernamePrefix: "username-prefix", }, } }), wantCPFile: "testdata/capd_valid_full_oidc_cp_expected.yaml", wantMDFile: "testdata/capd_valid_full_oidc_md_expected.yaml", }, { testName: "valid autoscaling config", clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "test-cluster" s.Cluster.Spec.KubernetesVersion = "1.19" s.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"192.168.0.0/16"} s.Cluster.Spec.ClusterNetwork.Services.CidrBlocks = []string{"10.128.0.0/12"} s.Cluster.Spec.ControlPlaneConfiguration.Count = 3 s.VersionsBundle = versionsBundle s.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{Count: 3} s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(3), MachineGroupRef: &v1alpha1.Ref{Name: "test-cluster"}, Name: "md-0", AutoScalingConfiguration: &v1alpha1.AutoScalingConfiguration{MinCount: 3, MaxCount: 5}}} }), wantCPFile: "testdata/valid_deployment_cp_expected.yaml", wantMDFile: "testdata/valid_autoscaler_deployment_md_expected.yaml", }, } for _, tt := range tests { t.Run(tt.testName, func(t *testing.T) { ctx := context.Background() client := dockerMocks.NewMockProviderClient(mockCtrl) kubectl := dockerMocks.NewMockProviderKubectlClient(mockCtrl) p := docker.NewProvider(&v1alpha1.DockerDatacenterConfig{}, client, kubectl, test.FakeNow) cluster := &types.Cluster{ Name: "test", } currentSpec := tt.clusterSpec.DeepCopy() tt.clusterSpec.Bundles.Spec.Number = 2 bootstrapCluster := &types.Cluster{ Name: "bootstrap-test", } for _, nodeGroup := range tt.clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations { md := &clusterv1.MachineDeployment{ Spec: clusterv1.MachineDeploymentSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ ConfigRef: &v1.ObjectReference{ Name: fmt.Sprintf("%s-%s-template-1234567890000", tt.clusterSpec.Cluster.Name, nodeGroup.Name), }, }, }, }, }, } machineDeploymentName := fmt.Sprintf("%s-%s", tt.clusterSpec.Cluster.Name, nodeGroup.Name) kubectl.EXPECT().GetMachineDeployment(ctx, machineDeploymentName, gomock.AssignableToTypeOf(executables.WithCluster(bootstrapCluster)), gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace))).Return(md, nil) } kubectl.EXPECT().UpdateAnnotation(ctx, "etcdadmcluster", fmt.Sprintf("%s-etcd", tt.clusterSpec.Cluster.Name), map[string]string{etcdv1.UpgradeInProgressAnnotation: "true"}, gomock.Any(), gomock.Any()) cpContent, mdContent, err := p.GenerateCAPISpecForUpgrade(ctx, bootstrapCluster, cluster, currentSpec, tt.clusterSpec) if err != nil { t.Fatalf("provider.GenerateCAPISpecForUpgrade() error = %v, wantErr nil", err) } test.AssertContentToFile(t, string(cpContent), tt.wantCPFile) test.AssertContentToFile(t, string(mdContent), tt.wantMDFile) }) } } func TestProviderGenerateDeploymentFileSuccessUpdateKubeadmConfigTemplate(t *testing.T) { mockCtrl := gomock.NewController(t) ctx := context.Background() client := dockerMocks.NewMockProviderClient(mockCtrl) kubectl := dockerMocks.NewMockProviderKubectlClient(mockCtrl) clusterSpec := test.NewClusterSpec() var cpTaints, wnTaints []v1.Taint cpTaints = append(cpTaints, v1.Taint{Key: "key1", Value: "val1", Effect: "NoSchedule", TimeAdded: nil}) cpTaints = append(cpTaints, v1.Taint{Key: "key2", Value: "val2", Effect: "PreferNoSchedule", TimeAdded: nil}) cpTaints = append(cpTaints, v1.Taint{Key: "key3", Value: "val3", Effect: "NoExecute", TimeAdded: nil}) wnTaints = append(wnTaints, v1.Taint{Key: "key2", Value: "val2", Effect: "PreferNoSchedule", TimeAdded: nil}) clusterSpec.Cluster.Name = "test-cluster" clusterSpec.Cluster.Spec.KubernetesVersion = "1.19" clusterSpec.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"192.168.0.0/16"} clusterSpec.Cluster.Spec.ClusterNetwork.Services.CidrBlocks = []string{"10.128.0.0/12"} clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Count = 3 clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Taints = cpTaints clusterSpec.VersionsBundle = versionsBundle clusterSpec.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{Count: 3} clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(3), MachineGroupRef: &v1alpha1.Ref{Name: "test-cluster"}, Name: "md-0"}} p := docker.NewProvider(&v1alpha1.DockerDatacenterConfig{}, client, kubectl, test.FakeNow) cluster := &types.Cluster{ Name: "test-cluster", } currentSpec := clusterSpec.DeepCopy() clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Taints = wnTaints bootstrapCluster := &types.Cluster{ Name: "bootstrap-test", } cp := &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ InfrastructureRef: v1.ObjectReference{ Name: "test-cluster-control-plane-template-1234567890000", }, }, }, } etcdadm := &etcdv1.EtcdadmCluster{ Spec: etcdv1.EtcdadmClusterSpec{ InfrastructureTemplate: v1.ObjectReference{ Name: "test-cluster-etcd-template-1234567890000", }, }, } kubectl.EXPECT().GetKubeadmControlPlane(ctx, cluster, cluster.Name, gomock.AssignableToTypeOf(executables.WithCluster(bootstrapCluster))).Return(cp, nil) kubectl.EXPECT().GetEtcdadmCluster(ctx, cluster, cluster.Name, gomock.AssignableToTypeOf(executables.WithCluster(bootstrapCluster))).Return(etcdadm, nil) cpContent, mdContent, err := p.GenerateCAPISpecForUpgrade(ctx, bootstrapCluster, cluster, currentSpec, clusterSpec) if err != nil { t.Fatalf("provider.GenerateCAPISpecForUpgrade() error = %v, wantErr nil", err) } test.AssertContentToFile(t, string(cpContent), "testdata/valid_deployment_cp_taints_expected.yaml") test.AssertContentToFile(t, string(mdContent), "testdata/valid_deployment_md_taints_expected.yaml") } func TestProviderGenerateDeploymentFileSuccessNotUpdateMachineTemplate(t *testing.T) { mockCtrl := gomock.NewController(t) ctx := context.Background() client := dockerMocks.NewMockProviderClient(mockCtrl) kubectl := dockerMocks.NewMockProviderKubectlClient(mockCtrl) clusterSpec := test.NewClusterSpec() clusterSpec.Cluster.Spec.KubernetesVersion = v1alpha1.Kube122 clusterSpec.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"192.168.0.0/16"} clusterSpec.Cluster.Spec.ClusterNetwork.Services.CidrBlocks = []string{"10.128.0.0/12"} clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(0), MachineGroupRef: &v1alpha1.Ref{Name: "fluxTestCluster"}, Name: "md-0"}} p := docker.NewProvider(&v1alpha1.DockerDatacenterConfig{}, client, kubectl, test.FakeNow) cluster := &types.Cluster{ Name: "test", } currentSpec := clusterSpec.DeepCopy() bootstrapCluster := &types.Cluster{ Name: "bootstrap-test", } cp := &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ InfrastructureRef: v1.ObjectReference{ Name: "test-control-plane-template-original", }, }, }, } md := &clusterv1.MachineDeployment{ Spec: clusterv1.MachineDeploymentSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ ConfigRef: &v1.ObjectReference{ Name: "test-md-0-original", }, }, InfrastructureRef: v1.ObjectReference{ Name: "test-md-0-original", }, }, }, }, } machineDeploymentName := fmt.Sprintf("%s-%s", clusterSpec.Cluster.Name, clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Name) kubectl.EXPECT().GetKubeadmControlPlane(ctx, cluster, cluster.Name, gomock.AssignableToTypeOf(executables.WithCluster(bootstrapCluster))).Return(cp, nil) kubectl.EXPECT().GetMachineDeployment(ctx, machineDeploymentName, gomock.AssignableToTypeOf(executables.WithCluster(bootstrapCluster))).Return(md, nil).Times(2) cpContent, mdContent, err := p.GenerateCAPISpecForUpgrade(ctx, bootstrapCluster, cluster, currentSpec, clusterSpec) if err != nil { t.Fatalf("provider.GenerateCAPISpecForUpgrade() error = %v, wantErr nil", err) } test.AssertContentToFile(t, string(cpContent), "testdata/no_machinetemplate_update_cp_expected.yaml") test.AssertContentToFile(t, string(mdContent), "testdata/no_machinetemplate_update_md_expected.yaml") } func TestGetInfrastructureBundleSuccess(t *testing.T) { mockCtrl := gomock.NewController(t) tests := []struct { testName string clusterSpec *cluster.Spec }{ { testName: "create overrides layer", clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "test-cluster" s.VersionsBundle = versionsBundle }), }, } for _, tt := range tests { t.Run(tt.testName, func(t *testing.T) { client := dockerMocks.NewMockProviderClient(mockCtrl) kubectl := dockerMocks.NewMockProviderKubectlClient(mockCtrl) p := docker.NewProvider(&v1alpha1.DockerDatacenterConfig{}, client, kubectl, test.FakeNow) infraBundle := p.GetInfrastructureBundle(tt.clusterSpec) if infraBundle == nil { t.Fatalf("provider.GetInfrastructureBundle() should have an infrastructure bundle") } assert.Equal(t, "infrastructure-docker/v0.3.19/", infraBundle.FolderName, "Incorrect folder name") assert.Equal(t, len(infraBundle.Manifests), 3, "Wrong number of files in the infrastructure bundle") wantManifests := []releasev1alpha1.Manifest{ versionsBundle.Docker.Components, versionsBundle.Docker.Metadata, versionsBundle.Docker.ClusterTemplate, } assert.ElementsMatch(t, infraBundle.Manifests, wantManifests, "Incorrect manifests") }) } } var versionsBundle = &cluster.VersionsBundle{ KubeDistro: &cluster.KubeDistro{ Kubernetes: cluster.VersionedRepository{ Repository: "public.ecr.aws/eks-distro/kubernetes", Tag: "v1.19.6-eks-1-19-2", }, CoreDNS: cluster.VersionedRepository{ Repository: "public.ecr.aws/eks-distro/coredns", Tag: "v1.8.0-eks-1-19-2", }, Etcd: cluster.VersionedRepository{ Repository: "public.ecr.aws/eks-distro/etcd-io", Tag: "v3.4.14-eks-1-19-2", }, EtcdVersion: "3.4.14", }, VersionsBundle: &releasev1alpha1.VersionsBundle{ EksD: releasev1alpha1.EksDRelease{ KindNode: releasev1alpha1.Image{ Description: "kind/node container image", Name: "kind/node", URI: "public.ecr.aws/eks-distro/kubernetes-sigs/kind/node:v1.18.16-eks-1-18-4-216edda697a37f8bf16651af6c23b7e2bb7ef42f-62681885fe3a97ee4f2b110cc277e084e71230fa", }, }, Docker: releasev1alpha1.DockerBundle{ Version: "v0.3.19", Manager: releasev1alpha1.Image{ URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/cluster-api/capd-manager:v0.3.15-6bdb9fc78bb926135843c58ec8b77b54d8f2c82c", }, KubeProxy: releasev1alpha1.Image{ URI: "public.ecr.aws/l0g8r8j6/brancz/kube-rbac-proxy:v0.8.0-25df7d96779e2a305a22c6e3f9425c3465a77244", }, Components: releasev1alpha1.Manifest{ URI: "embed:///config/clusterctl/overrides/infrastructure-docker/v0.3.19/infrastructure-components-development.yaml", }, ClusterTemplate: releasev1alpha1.Manifest{ URI: "embed:///config/clusterctl/overrides/infrastructure-docker/v0.3.19/cluster-template-development.yaml", }, Metadata: releasev1alpha1.Manifest{ URI: "embed:///config/clusterctl/overrides/infrastructure-docker/v0.3.19/metadata.yaml", }, }, Haproxy: releasev1alpha1.HaproxyBundle{ Image: releasev1alpha1.Image{ URI: "public.ecr.aws/l0g8r8j6/kubernetes-sigs/kind/haproxy:v0.11.1-eks-a-v0.0.0-dev-build.1464", }, }, }, } func TestChangeDiffNoChange(t *testing.T) { tt := newTest(t) clusterSpec := test.NewClusterSpec() assert.Nil(t, tt.provider.ChangeDiff(clusterSpec, clusterSpec)) } func TestChangeDiffWithChange(t *testing.T) { tt := newTest(t) clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) { s.VersionsBundle.Docker.Version = "v0.3.18" }) newClusterSpec := test.NewClusterSpec(func(s *cluster.Spec) { s.VersionsBundle.Docker.Version = "v0.3.19" }) wantDiff := &types.ComponentChangeDiff{ ComponentName: "docker", NewVersion: "v0.3.19", OldVersion: "v0.3.18", } tt.Expect(tt.provider.ChangeDiff(clusterSpec, newClusterSpec)).To(Equal(wantDiff)) } func TestProviderGenerateCAPISpecForCreateWithPodIAMConfig(t *testing.T) { mockCtrl := gomock.NewController(t) ctx := context.Background() client := dockerMocks.NewMockProviderClient(mockCtrl) kubectl := dockerMocks.NewMockProviderKubectlClient(mockCtrl) provider := docker.NewProvider(&v1alpha1.DockerDatacenterConfig{}, client, kubectl, test.FakeNow) clusterObj := &types.Cluster{ Name: "test-cluster", } clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "test-cluster" s.Cluster.Spec.KubernetesVersion = "1.19" s.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"192.168.0.0/16"} s.Cluster.Spec.ClusterNetwork.Services.CidrBlocks = []string{"10.128.0.0/12"} s.Cluster.Spec.ControlPlaneConfiguration.Count = 1 s.VersionsBundle = versionsBundle s.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{Count: 3} s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(3), MachineGroupRef: &v1alpha1.Ref{Name: "test-cluster"}}} }) clusterSpec.Cluster.Spec.PodIAMConfig = &v1alpha1.PodIAMConfig{ServiceAccountIssuer: "https://test"} if provider == nil { t.Fatalf("provider object is nil") } err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec) if err != nil { t.Fatalf("failed to setup and validate: %v", err) } cp, _, err := provider.GenerateCAPISpecForCreate(context.Background(), clusterObj, clusterSpec) if err != nil { t.Fatalf("failed to generate cluster api spec contents: %v", err) } test.AssertContentToFile(t, string(cp), "testdata/valid_deployment_cp_pod_iam_expected.yaml") } func TestProviderGenerateCAPISpecForCreateWithStackedEtcd(t *testing.T) { mockCtrl := gomock.NewController(t) ctx := context.Background() client := dockerMocks.NewMockProviderClient(mockCtrl) kubectl := dockerMocks.NewMockProviderKubectlClient(mockCtrl) provider := docker.NewProvider(&v1alpha1.DockerDatacenterConfig{}, client, kubectl, test.FakeNow) clusterObj := &types.Cluster{ Name: "test-cluster", } clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "test-cluster" s.Cluster.Spec.KubernetesVersion = "1.19" s.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"192.168.0.0/16"} s.Cluster.Spec.ClusterNetwork.Services.CidrBlocks = []string{"10.128.0.0/12"} s.Cluster.Spec.ControlPlaneConfiguration.Count = 1 s.VersionsBundle = versionsBundle s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(3), MachineGroupRef: &v1alpha1.Ref{Name: "test-cluster"}}} }) if provider == nil { t.Fatalf("provider object is nil") } err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec) if err != nil { t.Fatalf("failed to setup and validate: %v", err) } cp, _, err := provider.GenerateCAPISpecForCreate(context.Background(), clusterObj, clusterSpec) if err != nil { t.Fatalf("failed to generate cluster api spec contents: %v", err) } test.AssertContentToFile(t, string(cp), "testdata/valid_deployment_cp_stacked_etcd_expected.yaml") } func TestDockerTemplateBuilderGenerateCAPISpecControlPlane(t *testing.T) { type args struct { clusterSpec *cluster.Spec buildOptions []providers.BuildMapOption } tests := []struct { name string args args wantContent []byte wantErr error }{ { name: "kube 122 test", args: args{ clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "test-cluster" s.Cluster.Spec.KubernetesVersion = "1.22" }), buildOptions: nil, }, wantErr: nil, }, { name: "kube version not specified", args: args{ clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "test-cluster" }), buildOptions: nil, }, wantErr: fmt.Errorf("error building template map for CP "), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) builder := docker.NewDockerTemplateBuilder(time.Now) gotContent, err := builder.GenerateCAPISpecControlPlane(tt.args.clusterSpec, tt.args.buildOptions...) if err != tt.wantErr && !assert.Contains(t, err.Error(), tt.wantErr.Error()) { t.Errorf("Got DockerTemplateBuilder.GenerateCAPISpecControlPlane() error = %v, wantErr %v", err, tt.wantErr) return } if err == nil { g.Expect(gotContent).NotTo(BeEmpty()) } }) } } func TestProviderGenerateDeploymentFileForSingleNodeCluster(t *testing.T) { mockCtrl := gomock.NewController(t) ctx := context.Background() client := dockerMocks.NewMockProviderClient(mockCtrl) kubectl := dockerMocks.NewMockProviderKubectlClient(mockCtrl) provider := docker.NewProvider(&v1alpha1.DockerDatacenterConfig{}, client, kubectl, test.FakeNow) clusterObj := &types.Cluster{Name: "single-node"} clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "single-node" s.Cluster.Spec.KubernetesVersion = "1.21" s.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"192.168.0.0/16"} s.Cluster.Spec.ClusterNetwork.Services.CidrBlocks = []string{"10.128.0.0/12"} s.Cluster.Spec.ControlPlaneConfiguration.Count = 1 s.VersionsBundle = versionsBundle s.Cluster.Spec.WorkerNodeGroupConfigurations = nil }) if provider == nil { t.Fatalf("provider object is nil") } err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec) if err != nil { t.Fatalf("failed to setup and validate: %v", err) } cp, _, err := provider.GenerateCAPISpecForCreate(context.Background(), clusterObj, clusterSpec) if err != nil { t.Fatalf("failed to generate cluster api spec contents: %v", err) } test.AssertContentToFile(t, string(cp), "testdata/expected_results_cluster_docker_cp_single_node.yaml") } func TestDockerTemplateBuilderGenerateCAPISpecWorkers(t *testing.T) { type args struct { clusterSpec *cluster.Spec } tests := []struct { name string args args wantContent []byte wantErr error }{ { name: "kube version not specified", args: args{ clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "test-cluster" }), }, wantErr: fmt.Errorf("error building template map for MD "), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) builder := docker.NewDockerTemplateBuilder(time.Now) gotContent, err := builder.GenerateCAPISpecWorkers(tt.args.clusterSpec, nil, nil) if err != tt.wantErr && !assert.Contains(t, err.Error(), tt.wantErr.Error()) { t.Errorf("Got DockerTemplateBuilder.GenerateCAPISpecWorkers() error = %v, wantErr %v", err, tt.wantErr) return } if err == nil { g.Expect(gotContent).NotTo(BeEmpty()) } }) } } func TestInvalidDockerTemplateWithControlplaneEndpoint(t *testing.T) { mockCtrl := gomock.NewController(t) ctx := context.Background() client := dockerMocks.NewMockProviderClient(mockCtrl) kubectl := dockerMocks.NewMockProviderKubectlClient(mockCtrl) provider := docker.NewProvider(&v1alpha1.DockerDatacenterConfig{}, client, kubectl, test.FakeNow) clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "test-cluster" s.Cluster.Spec.KubernetesVersion = "1.19" s.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"192.168.0.0/16"} s.Cluster.Spec.ClusterNetwork.Services.CidrBlocks = []string{"10.128.0.0/12"} s.Cluster.Spec.ControlPlaneConfiguration.Count = 3 s.Cluster.Spec.ControlPlaneConfiguration.Endpoint = &v1alpha1.Endpoint{Host: "test-ip"} s.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{Count: 3} s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(3), MachineGroupRef: &v1alpha1.Ref{Name: "test-cluster"}, Name: "md-0"}} }) wantErr := fmt.Errorf("specifying endpoint host configuration in Cluster is not supported") err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec) if err == nil || err.Error() != wantErr.Error() { t.Fatalf("err %v, wantErr %v", err, wantErr) } } func TestDockerGenerateDeploymentFileWithMirrorConfig(t *testing.T) { mockCtrl := gomock.NewController(t) ctx := context.Background() client := dockerMocks.NewMockProviderClient(mockCtrl) kubectl := dockerMocks.NewMockProviderKubectlClient(mockCtrl) provider := docker.NewProvider(&v1alpha1.DockerDatacenterConfig{}, client, kubectl, test.FakeNow) clusterObj := &types.Cluster{Name: "test"} clusterSpec := givenClusterSpec(t, "cluster_mirror_config.yaml") if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil { t.Fatalf("failed to setup and validate: %v", err) } cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), clusterObj, clusterSpec) if err != nil { t.Fatalf("failed to generate cluster api spec contents: %v", err) } test.AssertContentToFile(t, string(cp), "testdata/expected_results_mirror_config_cp.yaml") test.AssertContentToFile(t, string(md), "testdata/expected_results_mirror_config_md.yaml") } func TestDockerGenerateDeploymentFileWithMirrorAndCertConfig(t *testing.T) { mockCtrl := gomock.NewController(t) ctx := context.Background() client := dockerMocks.NewMockProviderClient(mockCtrl) kubectl := dockerMocks.NewMockProviderKubectlClient(mockCtrl) provider := docker.NewProvider(&v1alpha1.DockerDatacenterConfig{}, client, kubectl, test.FakeNow) clusterObj := &types.Cluster{Name: "test"} clusterSpec := givenClusterSpec(t, "cluster_mirror_with_cert_config.yaml") if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil { t.Fatalf("failed to setup and validate: %v", err) } cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), clusterObj, clusterSpec) if err != nil { t.Fatalf("failed to generate cluster api spec contents: %v", err) } fmt.Println("CP template starts") fmt.Println(string(cp)) fmt.Println("CP template ends") fmt.Println("MD template starts") fmt.Println(string(md)) fmt.Println("MDtemplate ends") test.AssertContentToFile(t, string(cp), "testdata/expected_results_mirror_with_cert_config_cp.yaml") test.AssertContentToFile(t, string(md), "testdata/expected_results_mirror_with_cert_config_md.yaml") } func TestDockerGenerateDeploymentFileWithMirrorAndAuthConfig(t *testing.T) { mockCtrl := gomock.NewController(t) t.Setenv("REGISTRY_USERNAME", "username") t.Setenv("REGISTRY_PASSWORD", "password") ctx := context.Background() client := dockerMocks.NewMockProviderClient(mockCtrl) kubectl := dockerMocks.NewMockProviderKubectlClient(mockCtrl) provider := docker.NewProvider(&v1alpha1.DockerDatacenterConfig{}, client, kubectl, test.FakeNow) clusterObj := &types.Cluster{Name: "test"} clusterSpec := givenClusterSpec(t, "cluster_mirror_with_auth_config.yaml") if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil { t.Fatalf("failed to setup and validate: %v", err) } cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), clusterObj, clusterSpec) if err != nil { t.Fatalf("failed to generate cluster api spec contents: %v", err) } test.AssertContentToFile(t, string(cp), "testdata/expected_results_mirror_with_auth_config_cp.yaml") test.AssertContentToFile(t, string(md), "testdata/expected_results_mirror_with_auth_config_md.yaml") }
902
eks-anywhere
aws
Go
package docker import ( "context" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/api/equality" dockerv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" ) // GetMachineTemplate gets a DockerMachineTemplate object using the provided client // If the object doesn't exist, it returns a NotFound error. func GetMachineTemplate(ctx context.Context, client kubernetes.Client, name, namespace string) (*dockerv1.DockerMachineTemplate, error) { m := &dockerv1.DockerMachineTemplate{} if err := client.Get(ctx, name, namespace, m); err != nil { return nil, errors.Wrap(err, "reading dockerMachineTemplate") } return m, nil } // MachineTemplateEqual returns a boolean indicating whether or not the provided DockerMachineTemplates are equal. func MachineTemplateEqual(new, old *dockerv1.DockerMachineTemplate) bool { return equality.Semantic.DeepDerivative(new.Spec, old.Spec) }
28
eks-anywhere
aws
Go
package docker_test import ( "context" "testing" . "github.com/onsi/gomega" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/providers/docker" ) func TestMachineTemplateEqualDifferentNames(t *testing.T) { g := NewWithT(t) machineTemplate := dockerMachineTemplate("test-machine-1") otherMachineTemplate := machineTemplate.DeepCopy() otherMachineTemplate.Name = "test-machine-2" isEqual := docker.MachineTemplateEqual(machineTemplate, otherMachineTemplate) g.Expect(isEqual).To(BeTrue()) } func TestMachineTemplateEqualDifferentCustomImages(t *testing.T) { g := NewWithT(t) machineTemplate := dockerMachineTemplate("test-machine-1") otherMachineTemplate := machineTemplate.DeepCopy() otherMachineTemplate.Spec.Template.Spec.CustomImage = "other-custom-image" g.Expect(docker.MachineTemplateEqual(machineTemplate, otherMachineTemplate)).To(BeFalse()) } func TestGetMachineTemplateNoError(t *testing.T) { g := NewWithT(t) ctx := context.Background() machineTemplateName := "test-machine-1" machineTemplate := dockerMachineTemplate(machineTemplateName) client := test.NewFakeKubeClient( machineTemplate, ) m, err := docker.GetMachineTemplate(ctx, client, machineTemplateName, constants.EksaSystemNamespace) g.Expect(err).NotTo(HaveOccurred()) g.Expect(m).NotTo(BeNil()) } func TestGetMachineTemplateErrorFromClient(t *testing.T) { g := NewWithT(t) ctx := context.Background() client := test.NewFakeKubeClient() _, err := docker.GetMachineTemplate(ctx, client, "test-machine-1", constants.EksaSystemNamespace) g.Expect(err).To(MatchError(ContainSubstring("reading dockerMachineTemplate"))) }
57
eks-anywhere
aws
Go
package docker import ( "fmt" "github.com/aws/eks-anywhere/pkg/cluster" ) // ValidateControlPlaneEndpoint - checks to see if endpoint host configuration is specified for docker cluster and returns an error if true. func ValidateControlPlaneEndpoint(clusterSpec *cluster.Spec) error { if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint != nil { return fmt.Errorf("specifying endpoint host configuration in Cluster is not supported") } return nil }
16
eks-anywhere
aws
Go
package docker_test import ( "fmt" "testing" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/providers/docker" "github.com/aws/eks-anywhere/pkg/utils/ptr" ) func TestValidateControlplaneEndpoint(t *testing.T) { clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster.Name = "test-cluster" s.Cluster.Spec.KubernetesVersion = "1.19" s.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks = []string{"192.168.0.0/16"} s.Cluster.Spec.ClusterNetwork.Services.CidrBlocks = []string{"10.128.0.0/12"} s.Cluster.Spec.ControlPlaneConfiguration.Count = 3 s.Cluster.Spec.ControlPlaneConfiguration.Endpoint = &v1alpha1.Endpoint{Host: "test-ip"} s.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{Count: 3} s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(3), MachineGroupRef: &v1alpha1.Ref{Name: "test-cluster"}, Name: "md-0"}} }) wantErr := fmt.Errorf("specifying endpoint host configuration in Cluster is not supported") err := docker.ValidateControlPlaneEndpoint(clusterSpec) if err == nil || err.Error() != wantErr.Error() { t.Errorf("Got err %v, wanted %v", err, wantErr) } }
32
eks-anywhere
aws
Go
package docker import ( "context" "time" "github.com/go-logr/logr" "github.com/pkg/errors" dockerv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/clusterapi" capiyaml "github.com/aws/eks-anywhere/pkg/clusterapi/yaml" "github.com/aws/eks-anywhere/pkg/yamlutil" ) type ( // Workers represents the docker specific CAPI spec for worker nodes. Workers = clusterapi.Workers[*dockerv1.DockerMachineTemplate] workersBuilder = capiyaml.WorkersBuilder[*dockerv1.DockerMachineTemplate] ) // WorkersSpec generates a Docker specific CAPI spec for an eks-a cluster worker nodes. // It talks to the cluster with a client to detect changes in immutable objects and generates new // names for them. func WorkersSpec(ctx context.Context, logger logr.Logger, client kubernetes.Client, spec *cluster.Spec) (*Workers, error) { templateBuilder := NewDockerTemplateBuilder(time.Now) workersYaml, err := templateBuilder.CAPIWorkersSpecWithInitialNames(spec) if err != nil { return nil, err } parser, builder, err := newWorkersParserAndBuilder(logger) if err != nil { return nil, err } if err = parser.Parse(workersYaml, builder); err != nil { return nil, errors.Wrap(err, "parsing docker CAPI workers yaml") } workers := builder.Workers if err = workers.UpdateImmutableObjectNames(ctx, client, GetMachineTemplate, MachineTemplateEqual); err != nil { return nil, errors.Wrap(err, "updating docker worker immutable object names") } return workers, nil } func newWorkersParserAndBuilder(logger logr.Logger) (*yamlutil.Parser, *workersBuilder, error) { parser, builder, err := capiyaml.NewWorkersParserAndBuilder( logger, machineTemplateMapping(), ) if err != nil { return nil, nil, errors.Wrap(err, "building docker workers parser and builder") } return parser, builder, nil } func machineTemplateMapping() yamlutil.Mapping[*dockerv1.DockerMachineTemplate] { return yamlutil.NewMapping( "DockerMachineTemplate", func() *dockerv1.DockerMachineTemplate { return &dockerv1.DockerMachineTemplate{} }, ) }
71
eks-anywhere
aws
Go
package docker_test import ( "context" "testing" "time" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" dockerv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" "github.com/aws/eks-anywhere/internal/test" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/clusterapi" "github.com/aws/eks-anywhere/pkg/controller/clientutil" "github.com/aws/eks-anywhere/pkg/providers/docker" "github.com/aws/eks-anywhere/pkg/utils/ptr" releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1" ) func TestWorkersSpecNewCluster(t *testing.T) { g := NewWithT(t) logger := test.NewNullLogger() ctx := context.Background() spec := testClusterSpec() client := test.NewFakeKubeClient() workers, err := docker.WorkersSpec(ctx, logger, client, spec) g.Expect(err).NotTo(HaveOccurred()) g.Expect(workers).NotTo(BeNil()) g.Expect(workers.Groups).To(HaveLen(2)) g.Expect(workers.Groups).To(ConsistOf( clusterapi.WorkerGroup[*dockerv1.DockerMachineTemplate]{ KubeadmConfigTemplate: kubeadmConfigTemplate(), MachineDeployment: machineDeployment(), ProviderMachineTemplate: dockerMachineTemplate("test-md-0-1"), }, clusterapi.WorkerGroup[*dockerv1.DockerMachineTemplate]{ KubeadmConfigTemplate: kubeadmConfigTemplate( func(kct *bootstrapv1.KubeadmConfigTemplate) { kct.Name = "test-md-1-1" }, ), MachineDeployment: machineDeployment( func(md *clusterv1.MachineDeployment) { md.Name = "test-md-1" md.Spec.Template.Spec.InfrastructureRef.Name = "test-md-1-1" md.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-1-1" }, ), ProviderMachineTemplate: dockerMachineTemplate("test-md-1-1"), }, )) } func TestWorkersSpecUpgradeCluster(t *testing.T) { g := NewWithT(t) logger := test.NewNullLogger() ctx := context.Background() spec := testClusterSpec() currentGroup1 := clusterapi.WorkerGroup[*dockerv1.DockerMachineTemplate]{ KubeadmConfigTemplate: kubeadmConfigTemplate(), MachineDeployment: machineDeployment(), ProviderMachineTemplate: dockerMachineTemplate("test-md-0-1"), } currentGroup2 := clusterapi.WorkerGroup[*dockerv1.DockerMachineTemplate]{ KubeadmConfigTemplate: kubeadmConfigTemplate( func(kct *bootstrapv1.KubeadmConfigTemplate) { kct.Name = "test-md-1-1" }, ), MachineDeployment: machineDeployment( func(md *clusterv1.MachineDeployment) { md.Name = "test-md-1" md.Spec.Template.Spec.InfrastructureRef.Name = "test-md-1-1" md.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-1-1" }, ), ProviderMachineTemplate: dockerMachineTemplate("test-md-1-1"), } // Always make copies before passing to client since it does modifies the api objects // Like for example, the ResourceVersion expectedGroup1 := currentGroup1.DeepCopy() expectedGroup2 := currentGroup2.DeepCopy() objs := make([]kubernetes.Object, 0, 6) objs = append(objs, currentGroup1.Objects()...) objs = append(objs, currentGroup2.Objects()...) client := test.NewFakeKubeClient(clientutil.ObjectsToClientObjects(objs)...) // This will cause a change in the kubeadmconfigtemplate which we also treat as immutable spec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Taints = []corev1.Taint{ { Key: "a", Value: "accept", Effect: corev1.TaintEffectNoSchedule, }, } expectedGroup1.KubeadmConfigTemplate.Spec.Template.Spec.JoinConfiguration.NodeRegistration.Taints = []corev1.Taint{ { Key: "a", Value: "accept", Effect: corev1.TaintEffectNoSchedule, }, } expectedGroup1.KubeadmConfigTemplate.Name = "test-md-0-2" expectedGroup1.MachineDeployment.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-0-2" // This will cause a change in the docker machine templates, which are immutable spec.VersionsBundle.EksD.KindNode = releasev1.Image{ URI: "my-new-kind-image:tag", } expectedGroup1.ProviderMachineTemplate.Spec.Template.Spec.CustomImage = "my-new-kind-image:tag" expectedGroup1.ProviderMachineTemplate.Name = "test-md-0-2" expectedGroup1.MachineDeployment.Spec.Template.Spec.InfrastructureRef.Name = "test-md-0-2" expectedGroup2.ProviderMachineTemplate.Spec.Template.Spec.CustomImage = "my-new-kind-image:tag" expectedGroup2.ProviderMachineTemplate.Name = "test-md-1-2" expectedGroup2.MachineDeployment.Spec.Template.Spec.InfrastructureRef.Name = "test-md-1-2" workers, err := docker.WorkersSpec(ctx, logger, client, spec) g.Expect(err).NotTo(HaveOccurred()) g.Expect(workers).NotTo(BeNil()) g.Expect(workers.Groups).To(HaveLen(2)) g.Expect(workers.Groups).To(ConsistOf(*expectedGroup1, *expectedGroup2)) } func TestWorkersSpecUpgradeClusterRemoveLabels(t *testing.T) { g := NewWithT(t) logger := test.NewNullLogger() ctx := context.Background() spec := testClusterSpec() kct := kubeadmConfigTemplate() kct.Spec.Template.Spec.JoinConfiguration.NodeRegistration.KubeletExtraArgs = map[string]string{ "tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "cgroup-driver": "cgroupfs", "node-labels": "foo=bar", "eviction-hard": "nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%", } currentGroup1 := clusterapi.WorkerGroup[*dockerv1.DockerMachineTemplate]{ KubeadmConfigTemplate: kct, MachineDeployment: machineDeployment(), ProviderMachineTemplate: dockerMachineTemplate("test-md-0-1"), } currentGroup2 := clusterapi.WorkerGroup[*dockerv1.DockerMachineTemplate]{ KubeadmConfigTemplate: kubeadmConfigTemplate( func(kct *bootstrapv1.KubeadmConfigTemplate) { kct.Name = "test-md-1-1" }, ), MachineDeployment: machineDeployment( func(md *clusterv1.MachineDeployment) { md.Name = "test-md-1" md.Spec.Template.Spec.InfrastructureRef.Name = "test-md-1-1" md.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-1-1" }, ), ProviderMachineTemplate: dockerMachineTemplate("test-md-1-1"), } // Always make copies before passing to client since it does modifies the api objects // Like for example, the ResourceVersion expectedGroup1 := currentGroup1.DeepCopy() expectedGroup2 := currentGroup2.DeepCopy() objs := make([]kubernetes.Object, 0, 6) objs = append(objs, currentGroup1.Objects()...) client := test.NewFakeKubeClient(clientutil.ObjectsToClientObjects(objs)...) // This will cause a change in the kubeadmconfigtemplate which we also treat as immutable spec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Labels = map[string]string{} expectedGroup1.KubeadmConfigTemplate.Spec.Template.Spec.JoinConfiguration.NodeRegistration.KubeletExtraArgs = map[string]string{ "tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "cgroup-driver": "cgroupfs", "eviction-hard": "nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%", } expectedGroup1.KubeadmConfigTemplate.Name = "test-md-0-2" expectedGroup1.MachineDeployment.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-0-2" workers, err := docker.WorkersSpec(ctx, logger, client, spec) g.Expect(err).NotTo(HaveOccurred()) g.Expect(workers).NotTo(BeNil()) g.Expect(workers.Groups).To(HaveLen(2)) g.Expect(workers.Groups).To(ConsistOf(*expectedGroup1, *expectedGroup2)) } func TestWorkersSpecNoMachineTemplateChanges(t *testing.T) { g := NewWithT(t) logger := test.NewNullLogger() ctx := context.Background() spec := testClusterSpec() currentGroup1 := clusterapi.WorkerGroup[*dockerv1.DockerMachineTemplate]{ KubeadmConfigTemplate: kubeadmConfigTemplate(), MachineDeployment: machineDeployment(), ProviderMachineTemplate: dockerMachineTemplate("test-md-0-1"), } currentGroup2 := clusterapi.WorkerGroup[*dockerv1.DockerMachineTemplate]{ KubeadmConfigTemplate: kubeadmConfigTemplate( func(kct *bootstrapv1.KubeadmConfigTemplate) { kct.Name = "test-md-1-1" }, ), MachineDeployment: machineDeployment( func(md *clusterv1.MachineDeployment) { md.Name = "test-md-1" md.Spec.Template.Spec.InfrastructureRef.Name = "test-md-1-1" md.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-1-1" }, ), ProviderMachineTemplate: dockerMachineTemplate("test-md-1-1"), } // Always make copies before passing to client since it does modifies the api objects // Like for example, the ResourceVersion expectedGroup1 := currentGroup1.DeepCopy() expectedGroup2 := currentGroup2.DeepCopy() // This mimics what would happen if the objects were returned by a real api server // It helps make sure that the immutable object comparison is able to deal with these // kind of changes. currentGroup1.ProviderMachineTemplate.CreationTimestamp = metav1.NewTime(time.Now()) currentGroup1.ProviderMachineTemplate.CreationTimestamp = metav1.NewTime(time.Now()) // This is testing defaults. It's possible that some default logic will set items that are not set in our machine templates. // We need to take this into consideration when checking for equality. currentGroup1.ProviderMachineTemplate.Spec.Template.Spec.ProviderID = ptr.String("default-id") currentGroup2.ProviderMachineTemplate.Spec.Template.Spec.ProviderID = ptr.String("default-id") objs := make([]kubernetes.Object, 0, 6) objs = append(objs, currentGroup1.Objects()...) objs = append(objs, currentGroup2.Objects()...) client := test.NewFakeKubeClient(clientutil.ObjectsToClientObjects(objs)...) workers, err := docker.WorkersSpec(ctx, logger, client, spec) g.Expect(err).NotTo(HaveOccurred()) g.Expect(workers).NotTo(BeNil()) g.Expect(workers.Groups).To(HaveLen(2)) g.Expect(workers.Groups).To(ConsistOf(*expectedGroup1, *expectedGroup2)) } func TestWorkersSpecErrorFromClient(t *testing.T) { g := NewWithT(t) logger := test.NewNullLogger() ctx := context.Background() spec := testClusterSpec() client := test.NewFakeKubeClientAlwaysError() _, err := docker.WorkersSpec(ctx, logger, client, spec) g.Expect(err).To(MatchError(ContainSubstring("updating docker worker immutable object names"))) } func TestWorkersSpecMachineTemplateNotFound(t *testing.T) { g := NewWithT(t) logger := test.NewNullLogger() ctx := context.Background() spec := testClusterSpec() client := test.NewFakeKubeClient(machineDeployment()) _, err := docker.WorkersSpec(ctx, logger, client, spec) g.Expect(err).NotTo(HaveOccurred()) } func TestWorkersSpecRegistryMirrorConfiguration(t *testing.T) { logger := test.NewNullLogger() ctx := context.Background() client := test.NewFakeKubeClient() tests := []struct { name string mirrorConfig *anywherev1.RegistryMirrorConfiguration files []bootstrapv1.File }{ { name: "insecure skip verify", mirrorConfig: test.RegistryMirrorInsecureSkipVerifyEnabled(), files: test.RegistryMirrorConfigFilesInsecureSkipVerify(), }, { name: "insecure skip verify with ca cert", mirrorConfig: test.RegistryMirrorInsecureSkipVerifyEnabledAndCACert(), files: test.RegistryMirrorConfigFilesInsecureSkipVerifyAndCACert(), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { spec := testClusterSpec() spec.Cluster.Spec.RegistryMirrorConfiguration = tt.mirrorConfig workers, err := docker.WorkersSpec(ctx, logger, client, spec) g := NewWithT(t) g.Expect(err).NotTo(HaveOccurred()) g.Expect(workers).NotTo(BeNil()) g.Expect(workers.Groups).To(HaveLen(2)) g.Expect(workers.Groups).To(ConsistOf( clusterapi.WorkerGroup[*dockerv1.DockerMachineTemplate]{ KubeadmConfigTemplate: kubeadmConfigTemplate(func(kct *bootstrapv1.KubeadmConfigTemplate) { kct.Spec.Template.Spec.Files = append(kct.Spec.Template.Spec.Files, tt.files...) kct.Spec.Template.Spec.PreKubeadmCommands = append(kct.Spec.Template.Spec.PreKubeadmCommands, test.RegistryMirrorPreKubeadmCommands()...) }), MachineDeployment: machineDeployment(), ProviderMachineTemplate: dockerMachineTemplate("test-md-0-1"), }, clusterapi.WorkerGroup[*dockerv1.DockerMachineTemplate]{ KubeadmConfigTemplate: kubeadmConfigTemplate( func(kct *bootstrapv1.KubeadmConfigTemplate) { kct.Name = "test-md-1-1" kct.Spec.Template.Spec.Files = append(kct.Spec.Template.Spec.Files, tt.files...) kct.Spec.Template.Spec.PreKubeadmCommands = append(kct.Spec.Template.Spec.PreKubeadmCommands, test.RegistryMirrorPreKubeadmCommands()...) }, ), MachineDeployment: machineDeployment( func(md *clusterv1.MachineDeployment) { md.Name = "test-md-1" md.Spec.Template.Spec.InfrastructureRef.Name = "test-md-1-1" md.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "test-md-1-1" }, ), ProviderMachineTemplate: dockerMachineTemplate("test-md-1-1"), }, )) }) } } func kubeadmConfigTemplate(opts ...func(*bootstrapv1.KubeadmConfigTemplate)) *bootstrapv1.KubeadmConfigTemplate { o := &bootstrapv1.KubeadmConfigTemplate{ TypeMeta: metav1.TypeMeta{ Kind: "KubeadmConfigTemplate", APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", }, ObjectMeta: metav1.ObjectMeta{ Name: "test-md-0-1", Namespace: "eksa-system", }, Spec: bootstrapv1.KubeadmConfigTemplateSpec{ Template: bootstrapv1.KubeadmConfigTemplateResource{ Spec: bootstrapv1.KubeadmConfigSpec{ JoinConfiguration: &bootstrapv1.JoinConfiguration{ NodeRegistration: bootstrapv1.NodeRegistrationOptions{ CRISocket: "/var/run/containerd/containerd.sock", KubeletExtraArgs: map[string]string{ "tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "cgroup-driver": "cgroupfs", "eviction-hard": "nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%", }, Taints: []corev1.Taint{}, }, }, }, }, }, } for _, opt := range opts { opt(o) } return o } func machineDeployment(opts ...func(*clusterv1.MachineDeployment)) *clusterv1.MachineDeployment { o := &clusterv1.MachineDeployment{ TypeMeta: metav1.TypeMeta{ Kind: "MachineDeployment", APIVersion: "cluster.x-k8s.io/v1beta1", }, ObjectMeta: metav1.ObjectMeta{ Name: "test-md-0", Namespace: "eksa-system", }, Spec: clusterv1.MachineDeploymentSpec{ ClusterName: "test", Replicas: ptr.Int32(3), Template: clusterv1.MachineTemplateSpec{ ObjectMeta: clusterv1.ObjectMeta{}, Spec: clusterv1.MachineSpec{ ClusterName: "test", Bootstrap: clusterv1.Bootstrap{ ConfigRef: &corev1.ObjectReference{ Kind: "KubeadmConfigTemplate", Name: "test-md-0-1", APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", Namespace: "eksa-system", }, }, InfrastructureRef: corev1.ObjectReference{ Kind: "DockerMachineTemplate", Name: "test-md-0-1", APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", Namespace: "eksa-system", }, Version: ptr.String("v1.23.12-eks-1-23-6"), }, }, }, } for _, opt := range opts { opt(o) } return o }
416
eks-anywhere
aws
Go
// Code generated by MockGen. DO NOT EDIT. // Source: github.com/aws/eks-anywhere/pkg/providers/docker (interfaces: ProviderClient,ProviderKubectlClient) // Package mocks is a generated GoMock package. package mocks import ( context "context" reflect "reflect" v1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" executables "github.com/aws/eks-anywhere/pkg/executables" types "github.com/aws/eks-anywhere/pkg/types" v1beta1 "github.com/aws/etcdadm-controller/api/v1beta1" gomock "github.com/golang/mock/gomock" v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" v1beta11 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" ) // MockProviderClient is a mock of ProviderClient interface. type MockProviderClient struct { ctrl *gomock.Controller recorder *MockProviderClientMockRecorder } // MockProviderClientMockRecorder is the mock recorder for MockProviderClient. type MockProviderClientMockRecorder struct { mock *MockProviderClient } // NewMockProviderClient creates a new mock instance. func NewMockProviderClient(ctrl *gomock.Controller) *MockProviderClient { mock := &MockProviderClient{ctrl: ctrl} mock.recorder = &MockProviderClientMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockProviderClient) EXPECT() *MockProviderClientMockRecorder { return m.recorder } // GetDockerLBPort mocks base method. func (m *MockProviderClient) GetDockerLBPort(arg0 context.Context, arg1 string) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetDockerLBPort", arg0, arg1) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // GetDockerLBPort indicates an expected call of GetDockerLBPort. func (mr *MockProviderClientMockRecorder) GetDockerLBPort(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDockerLBPort", reflect.TypeOf((*MockProviderClient)(nil).GetDockerLBPort), arg0, arg1) } // MockProviderKubectlClient is a mock of ProviderKubectlClient interface. type MockProviderKubectlClient struct { ctrl *gomock.Controller recorder *MockProviderKubectlClientMockRecorder } // MockProviderKubectlClientMockRecorder is the mock recorder for MockProviderKubectlClient. type MockProviderKubectlClientMockRecorder struct { mock *MockProviderKubectlClient } // NewMockProviderKubectlClient creates a new mock instance. func NewMockProviderKubectlClient(ctrl *gomock.Controller) *MockProviderKubectlClient { mock := &MockProviderKubectlClient{ctrl: ctrl} mock.recorder = &MockProviderKubectlClientMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockProviderKubectlClient) EXPECT() *MockProviderKubectlClientMockRecorder { return m.recorder } // GetEksaCluster mocks base method. func (m *MockProviderKubectlClient) GetEksaCluster(arg0 context.Context, arg1 *types.Cluster, arg2 string) (*v1alpha1.Cluster, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetEksaCluster", arg0, arg1, arg2) ret0, _ := ret[0].(*v1alpha1.Cluster) ret1, _ := ret[1].(error) return ret0, ret1 } // GetEksaCluster indicates an expected call of GetEksaCluster. func (mr *MockProviderKubectlClientMockRecorder) GetEksaCluster(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaCluster", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetEksaCluster), arg0, arg1, arg2) } // GetEtcdadmCluster mocks base method. func (m *MockProviderKubectlClient) GetEtcdadmCluster(arg0 context.Context, arg1 *types.Cluster, arg2 string, arg3 ...executables.KubectlOpt) (*v1beta1.EtcdadmCluster, error) { m.ctrl.T.Helper() varargs := []interface{}{arg0, arg1, arg2} for _, a := range arg3 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "GetEtcdadmCluster", varargs...) ret0, _ := ret[0].(*v1beta1.EtcdadmCluster) ret1, _ := ret[1].(error) return ret0, ret1 } // GetEtcdadmCluster indicates an expected call of GetEtcdadmCluster. func (mr *MockProviderKubectlClientMockRecorder) GetEtcdadmCluster(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEtcdadmCluster", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetEtcdadmCluster), varargs...) } // GetKubeadmControlPlane mocks base method. func (m *MockProviderKubectlClient) GetKubeadmControlPlane(arg0 context.Context, arg1 *types.Cluster, arg2 string, arg3 ...executables.KubectlOpt) (*v1beta11.KubeadmControlPlane, error) { m.ctrl.T.Helper() varargs := []interface{}{arg0, arg1, arg2} for _, a := range arg3 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "GetKubeadmControlPlane", varargs...) ret0, _ := ret[0].(*v1beta11.KubeadmControlPlane) ret1, _ := ret[1].(error) return ret0, ret1 } // GetKubeadmControlPlane indicates an expected call of GetKubeadmControlPlane. func (mr *MockProviderKubectlClientMockRecorder) GetKubeadmControlPlane(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetKubeadmControlPlane", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetKubeadmControlPlane), varargs...) } // GetMachineDeployment mocks base method. func (m *MockProviderKubectlClient) GetMachineDeployment(arg0 context.Context, arg1 string, arg2 ...executables.KubectlOpt) (*v1beta10.MachineDeployment, error) { m.ctrl.T.Helper() varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "GetMachineDeployment", varargs...) ret0, _ := ret[0].(*v1beta10.MachineDeployment) ret1, _ := ret[1].(error) return ret0, ret1 } // GetMachineDeployment indicates an expected call of GetMachineDeployment. func (mr *MockProviderKubectlClientMockRecorder) GetMachineDeployment(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() varargs := append([]interface{}{arg0, arg1}, arg2...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMachineDeployment", reflect.TypeOf((*MockProviderKubectlClient)(nil).GetMachineDeployment), varargs...) } // UpdateAnnotation mocks base method. func (m *MockProviderKubectlClient) UpdateAnnotation(arg0 context.Context, arg1, arg2 string, arg3 map[string]string, arg4 ...executables.KubectlOpt) error { m.ctrl.T.Helper() varargs := []interface{}{arg0, arg1, arg2, arg3} for _, a := range arg4 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "UpdateAnnotation", varargs...) ret0, _ := ret[0].(error) return ret0 } // UpdateAnnotation indicates an expected call of UpdateAnnotation. func (mr *MockProviderKubectlClientMockRecorder) UpdateAnnotation(arg0, arg1, arg2, arg3 interface{}, arg4 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() varargs := append([]interface{}{arg0, arg1, arg2, arg3}, arg4...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAnnotation", reflect.TypeOf((*MockProviderKubectlClient)(nil).UpdateAnnotation), varargs...) }
174
eks-anywhere
aws
Go
package reconciler_test import ( "os" "testing" "github.com/aws/eks-anywhere/internal/test/envtest" ) var env *envtest.Environment func TestMain(m *testing.M) { os.Exit(envtest.RunWithEnvironment(m, envtest.WithAssignment(&env))) }
15
eks-anywhere
aws
Go
package reconciler import ( "context" "github.com/go-logr/logr" "github.com/pkg/errors" "sigs.k8s.io/controller-runtime/pkg/client" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/controller" "github.com/aws/eks-anywhere/pkg/controller/clientutil" "github.com/aws/eks-anywhere/pkg/controller/clusters" "github.com/aws/eks-anywhere/pkg/controller/serverside" "github.com/aws/eks-anywhere/pkg/providers/docker" ) // Reconciler contains dependencies for a docker reconciler. type Reconciler struct { client client.Client cniReconciler CNIReconciler remoteClientRegistry RemoteClientRegistry *serverside.ObjectApplier } // CNIReconciler is an interface for reconciling CNI in the Docker cluster reconciler. type CNIReconciler interface { Reconcile(ctx context.Context, logger logr.Logger, client client.Client, spec *cluster.Spec) (controller.Result, error) } // RemoteClientRegistry is an interface that defines methods for remote clients. type RemoteClientRegistry interface { GetClient(ctx context.Context, cluster client.ObjectKey) (client.Client, error) } // New creates a new Docker provider reconciler. func New(client client.Client, cniReconciler CNIReconciler, remoteClientRegistry RemoteClientRegistry) *Reconciler { return &Reconciler{ client: client, cniReconciler: cniReconciler, remoteClientRegistry: remoteClientRegistry, ObjectApplier: serverside.NewObjectApplier(client), } } // Reconcile brings the cluster to the desired state for the docker provider. func (r *Reconciler) Reconcile(ctx context.Context, log logr.Logger, c *anywherev1.Cluster) (controller.Result, error) { log = log.WithValues("provider", "docker") clusterSpec, err := cluster.BuildSpec(ctx, clientutil.NewKubeClient(r.client), c) if err != nil { return controller.Result{}, err } return controller.NewPhaseRunner[*cluster.Spec]().Register( r.ReconcileControlPlane, r.CheckControlPlaneReady, r.ReconcileCNI, r.ReconcileWorkers, ).Run(ctx, log, clusterSpec) } // CheckControlPlaneReady checks whether the control plane for an eks-a cluster is ready or not. // Requeues with the appropriate wait times whenever the cluster is not ready yet. func (r *Reconciler) CheckControlPlaneReady(ctx context.Context, log logr.Logger, spec *cluster.Spec) (controller.Result, error) { log = log.WithValues("phase", "checkControlPlaneReady") return clusters.CheckControlPlaneReady(ctx, r.client, log, spec.Cluster) } // ReconcileCNI takes the Cilium CNI in a cluster to the desired state defined in a cluster spec. func (r *Reconciler) ReconcileCNI(ctx context.Context, log logr.Logger, clusterSpec *cluster.Spec) (controller.Result, error) { log = log.WithValues("phase", "reconcileCNI") client, err := r.remoteClientRegistry.GetClient(ctx, controller.CapiClusterObjectKey(clusterSpec.Cluster)) if err != nil { return controller.Result{}, err } return r.cniReconciler.Reconcile(ctx, log, client, clusterSpec) } // ReconcileWorkerNodes validates the cluster definition and reconciles the worker nodes // to the desired state. func (r *Reconciler) ReconcileWorkerNodes(ctx context.Context, log logr.Logger, c *anywherev1.Cluster) (controller.Result, error) { log = log.WithValues("provider", "docker", "reconcile type", "workers") clusterSpec, err := cluster.BuildSpec(ctx, clientutil.NewKubeClient(r.client), c) if err != nil { return controller.Result{}, errors.Wrap(err, "building cluster Spec for worker node reconcile") } return controller.NewPhaseRunner[*cluster.Spec]().Register( r.ReconcileWorkers, ).Run(ctx, log, clusterSpec) } // ReconcileWorkers applies the worker CAPI objects to the cluster. func (r *Reconciler) ReconcileWorkers(ctx context.Context, log logr.Logger, spec *cluster.Spec) (controller.Result, error) { log = log.WithValues("phase", "reconcileWorkers") log.Info("Applying worker CAPI objects") w, err := docker.WorkersSpec(ctx, log, clientutil.NewKubeClient(r.client), spec) if err != nil { return controller.Result{}, errors.Wrap(err, "generating workers spec") } return clusters.ReconcileWorkersForEKSA(ctx, log, r.client, spec.Cluster, clusters.ToWorkers(w)) } // ReconcileControlPlane applies the control plane CAPI objects to the cluster. func (r *Reconciler) ReconcileControlPlane(ctx context.Context, log logr.Logger, spec *cluster.Spec) (controller.Result, error) { log = log.WithValues("phase", "reconcileControlPlane") log.Info("Applying control plane CAPI objects") cp, err := docker.ControlPlaneSpec(ctx, log, clientutil.NewKubeClient(r.client), spec) if err != nil { return controller.Result{}, err } return clusters.ReconcileControlPlane(ctx, r.client, &clusters.ControlPlane{ Cluster: cp.Cluster, ProviderCluster: cp.ProviderCluster, KubeadmControlPlane: cp.KubeadmControlPlane, ControlPlaneMachineTemplate: cp.ControlPlaneMachineTemplate, EtcdCluster: cp.EtcdCluster, EtcdMachineTemplate: cp.EtcdMachineTemplate, }) }
124
eks-anywhere
aws
Go
package reconciler_test import ( "context" "errors" "strings" "testing" etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" dockerv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/internal/test/envtest" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" clusterspec "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/controller" "github.com/aws/eks-anywhere/pkg/controller/clientutil" "github.com/aws/eks-anywhere/pkg/providers/docker/reconciler" dockereconcilermocks "github.com/aws/eks-anywhere/pkg/providers/docker/reconciler/mocks" "github.com/aws/eks-anywhere/pkg/utils/ptr" ) const ( clusterNamespace = "test-namespace" ) func TestReconcilerReconcileSuccess(t *testing.T) { tt := newReconcilerTest(t) logger := test.NewNullLogger() capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) { c.Name = tt.cluster.Name }) tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster) tt.createAllObjs() remoteClient := fake.NewClientBuilder().Build() tt.remoteClientRegistry.EXPECT().GetClient( tt.ctx, client.ObjectKey{Name: tt.cluster.Name, Namespace: constants.EksaSystemNamespace}, ).Return(remoteClient, nil) tt.cniReconciler.EXPECT().Reconcile(tt.ctx, logger, remoteClient, tt.buildSpec()) tt.Expect(tt.reconciler().Reconcile(tt.ctx, logger, tt.cluster)).To(Equal(controller.Result{})) tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero()) tt.ShouldEventuallyExist(tt.ctx, capiCluster) tt.ShouldEventuallyExist(tt.ctx, &controlplanev1.KubeadmControlPlane{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name, Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &dockerv1.DockerMachineTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-control-plane-1", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &dockerv1.DockerCluster{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name, Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyNotExist(tt.ctx, &dockerv1.DockerMachineTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-etcd-1", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyNotExist(tt.ctx, &etcdv1.EtcdadmCluster{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-etcd", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &clusterv1.MachineDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-md-0", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &bootstrapv1.KubeadmConfigTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-md-0-1", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &dockerv1.DockerMachineTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-md-0-1", Namespace: constants.EksaSystemNamespace, }, }, ) } func TestReconcilerReconcileWorkerNodesSuccess(t *testing.T) { tt := newReconcilerTest(t) tt.cluster.Name = "my-management-cluster" tt.cluster.SetSelfManaged() capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) { c.Name = tt.cluster.Name }) tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster) tt.createAllObjs() logger := test.NewNullLogger() result, err := tt.reconciler().ReconcileWorkerNodes(tt.ctx, logger, tt.cluster) tt.Expect(err).NotTo(HaveOccurred()) tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero()) tt.Expect(result).To(Equal(controller.Result{})) tt.ShouldEventuallyExist(tt.ctx, &clusterv1.MachineDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-md-0", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &bootstrapv1.KubeadmConfigTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-md-0-1", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &dockerv1.DockerMachineTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-md-0-1", Namespace: constants.EksaSystemNamespace, }, }, ) } func TestReconcileCNISuccess(t *testing.T) { tt := newReconcilerTest(t) tt.withFakeClient() logger := test.NewNullLogger() remoteClient := fake.NewClientBuilder().Build() spec := tt.buildSpec() tt.remoteClientRegistry.EXPECT().GetClient( tt.ctx, client.ObjectKey{Name: tt.cluster.Name, Namespace: "eksa-system"}, ).Return(remoteClient, nil) tt.cniReconciler.EXPECT().Reconcile(tt.ctx, logger, remoteClient, spec) result, err := tt.reconciler().ReconcileCNI(tt.ctx, logger, spec) tt.Expect(err).NotTo(HaveOccurred()) tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero()) tt.Expect(result).To(Equal(controller.Result{})) } func TestReconcileCNIErrorClientRegistry(t *testing.T) { tt := newReconcilerTest(t) tt.withFakeClient() logger := test.NewNullLogger() spec := tt.buildSpec() tt.remoteClientRegistry.EXPECT().GetClient( tt.ctx, client.ObjectKey{Name: tt.cluster.Name, Namespace: "eksa-system"}, ).Return(nil, errors.New("building client")) result, err := tt.reconciler().ReconcileCNI(tt.ctx, logger, spec) tt.Expect(err).To(MatchError(ContainSubstring("building client"))) tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero()) tt.Expect(result).To(Equal(controller.Result{})) } func TestReconcilerReconcileWorkersSuccess(t *testing.T) { tt := newReconcilerTest(t) capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) { c.Name = tt.cluster.Name }) tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster) tt.createAllObjs() result, err := tt.reconciler().ReconcileWorkers(tt.ctx, test.NewNullLogger(), tt.buildSpec()) tt.Expect(err).NotTo(HaveOccurred()) tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero()) tt.Expect(result).To(Equal(controller.Result{})) tt.ShouldEventuallyExist(tt.ctx, &clusterv1.MachineDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-md-0", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &bootstrapv1.KubeadmConfigTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-md-0-1", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &dockerv1.DockerMachineTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-md-0-1", Namespace: constants.EksaSystemNamespace, }, }, ) } func TestReconcilerReconcileWorkersErrorGeneratingSpec(t *testing.T) { tt := newReconcilerTest(t) tt.createAllObjs() spec := tt.buildSpec() // this will always return an error since objects are not registered in the scheme tt.client = fake.NewClientBuilder().WithScheme(runtime.NewScheme()).Build() tt.Expect( tt.reconciler().ReconcileWorkers(tt.ctx, test.NewNullLogger(), spec), ).Error().To(MatchError(ContainSubstring("generating workers spec"))) } func TestReconcilerReconcileWorkerNodesFail(t *testing.T) { tt := newReconcilerTest(t) tt.cluster.Name = "my-management-cluster" tt.cluster.SetSelfManaged() capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) { c.Name = tt.cluster.Name }) tt.cluster.Spec.KubernetesVersion = "" tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster) tt.createAllObjs() logger := test.NewNullLogger() _, err := tt.reconciler().ReconcileWorkerNodes(tt.ctx, logger, tt.cluster) tt.Expect(err).To(MatchError(ContainSubstring("building cluster Spec for worker node reconcile"))) } func TestReconcileControlPlaneStackedEtcdSuccess(t *testing.T) { tt := newReconcilerTest(t) tt.createAllObjs() logger := test.NewNullLogger() result, err := tt.reconciler().ReconcileControlPlane(tt.ctx, logger, tt.buildSpec()) tt.Expect(err).NotTo(HaveOccurred()) tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero()) tt.Expect(result).To(Equal(controller.Result{})) capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) { c.Name = tt.cluster.Name }) tt.ShouldEventuallyExist(tt.ctx, capiCluster) tt.ShouldEventuallyExist(tt.ctx, &controlplanev1.KubeadmControlPlane{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name, Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &dockerv1.DockerMachineTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-control-plane-1", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &dockerv1.DockerCluster{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name, Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyNotExist(tt.ctx, &dockerv1.DockerMachineTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-etcd-1", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyNotExist(tt.ctx, &etcdv1.EtcdadmCluster{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-etcd", Namespace: constants.EksaSystemNamespace, }, }, ) } func TestReconcileControlPlaneUnstackedEtcdSuccess(t *testing.T) { tt := newReconcilerTest(t) tt.cluster.Spec.ExternalEtcdConfiguration = &anywherev1.ExternalEtcdConfiguration{ Count: 1, } tt.createAllObjs() logger := test.NewNullLogger() result, err := tt.reconciler().ReconcileControlPlane(tt.ctx, logger, tt.buildSpec()) tt.Expect(err).NotTo(HaveOccurred()) tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero()) tt.Expect(result).To(Equal(controller.Result{})) capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) { c.Name = tt.cluster.Name }) tt.ShouldEventuallyExist(tt.ctx, capiCluster) tt.ShouldEventuallyExist(tt.ctx, &controlplanev1.KubeadmControlPlane{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name, Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &dockerv1.DockerMachineTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-control-plane-1", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &dockerv1.DockerCluster{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name, Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &dockerv1.DockerMachineTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-etcd-1", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &etcdv1.EtcdadmCluster{ ObjectMeta: metav1.ObjectMeta{ Name: tt.cluster.Name + "-etcd", Namespace: constants.EksaSystemNamespace, }, }, ) } func TestReconcilerReconcileControlPlaneFailure(t *testing.T) { tt := newReconcilerTest(t) tt.createAllObjs() spec := tt.buildSpec() spec.Cluster.Spec.KubernetesVersion = "" _, err := tt.reconciler().ReconcileControlPlane(tt.ctx, test.NewNullLogger(), spec) tt.Expect(err).To(MatchError(ContainSubstring("generating docker control plane yaml spec"))) } type reconcilerTest struct { t testing.TB *WithT *envtest.APIExpecter ctx context.Context cniReconciler *dockereconcilermocks.MockCNIReconciler remoteClientRegistry *dockereconcilermocks.MockRemoteClientRegistry cluster *anywherev1.Cluster client client.Client env *envtest.Environment eksaSupportObjs []client.Object datacenterConfig *anywherev1.DockerDatacenterConfig } func newReconcilerTest(t testing.TB) *reconcilerTest { ctrl := gomock.NewController(t) cniReconciler := dockereconcilermocks.NewMockCNIReconciler(ctrl) remoteClientRegistry := dockereconcilermocks.NewMockRemoteClientRegistry(ctrl) c := env.Client() bundle := test.Bundle() managementCluster := dockerCluster(func(c *anywherev1.Cluster) { c.Name = "management-cluster" c.Spec.ManagementCluster = anywherev1.ManagementCluster{ Name: c.Name, } c.Spec.BundlesRef = &anywherev1.BundlesRef{ Name: bundle.Name, Namespace: bundle.Namespace, APIVersion: bundle.APIVersion, } }) workloadClusterDatacenter := dataCenter() cluster := dockerCluster(func(c *anywherev1.Cluster) { c.Name = strings.ToLower(t.Name()) c.Spec.ManagementCluster = anywherev1.ManagementCluster{ Name: managementCluster.Name, } c.Spec.BundlesRef = &anywherev1.BundlesRef{ Name: bundle.Name, Namespace: bundle.Namespace, APIVersion: bundle.APIVersion, } c.Spec.ControlPlaneConfiguration = anywherev1.ControlPlaneConfiguration{ Count: 1, } c.Spec.DatacenterRef = anywherev1.Ref{ Kind: anywherev1.DockerDatacenterKind, Name: workloadClusterDatacenter.Name, } c.Spec.WorkerNodeGroupConfigurations = append(c.Spec.WorkerNodeGroupConfigurations, anywherev1.WorkerNodeGroupConfiguration{ Count: ptr.Int(1), Name: "md-0", }, ) }) tt := &reconcilerTest{ t: t, WithT: NewWithT(t), APIExpecter: envtest.NewAPIExpecter(t, c), ctx: context.Background(), cniReconciler: cniReconciler, remoteClientRegistry: remoteClientRegistry, cluster: cluster, client: c, env: env, eksaSupportObjs: []client.Object{ test.Namespace(clusterNamespace), test.Namespace(constants.EksaSystemNamespace), managementCluster, workloadClusterDatacenter, bundle, test.EksdRelease(), }, datacenterConfig: workloadClusterDatacenter, } t.Cleanup(tt.cleanup) return tt } func (tt *reconcilerTest) cleanup() { tt.DeleteAndWait(tt.ctx, tt.allObjs()...) tt.DeleteAllOfAndWait(tt.ctx, &clusterv1.Cluster{}) tt.DeleteAllOfAndWait(tt.ctx, &dockerv1.DockerMachineTemplate{}) tt.DeleteAllOfAndWait(tt.ctx, &etcdv1.EtcdadmCluster{}) } func (tt *reconcilerTest) reconciler() *reconciler.Reconciler { return reconciler.New(tt.client, tt.cniReconciler, tt.remoteClientRegistry) } func (tt *reconcilerTest) buildSpec() *clusterspec.Spec { tt.t.Helper() spec, err := clusterspec.BuildSpec(tt.ctx, clientutil.NewKubeClient(tt.client), tt.cluster) tt.Expect(err).NotTo(HaveOccurred()) return spec } func (tt *reconcilerTest) withFakeClient() { tt.client = fake.NewClientBuilder().WithObjects(clientutil.ObjectsToClientObjects(tt.allObjs())...).Build() } func (tt *reconcilerTest) createAllObjs() { tt.t.Helper() envtest.CreateObjs(tt.ctx, tt.t, tt.client, tt.allObjs()...) } func (tt *reconcilerTest) allObjs() []client.Object { objs := make([]client.Object, 0, len(tt.eksaSupportObjs)+1) objs = append(objs, tt.eksaSupportObjs...) objs = append(objs, tt.cluster) return objs } type clusterOpt func(*anywherev1.Cluster) func dockerCluster(opts ...clusterOpt) *anywherev1.Cluster { c := &anywherev1.Cluster{ TypeMeta: metav1.TypeMeta{ Kind: anywherev1.ClusterKind, APIVersion: anywherev1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Namespace: clusterNamespace, }, Spec: anywherev1.ClusterSpec{ KubernetesVersion: "1.22", ClusterNetwork: anywherev1.ClusterNetwork{ Pods: anywherev1.Pods{ CidrBlocks: []string{"0.0.0.0"}, }, Services: anywherev1.Services{ CidrBlocks: []string{"0.0.0.0"}, }, }, }, } for _, opt := range opts { opt(c) } return c } func dataCenter() *anywherev1.DockerDatacenterConfig { return &anywherev1.DockerDatacenterConfig{ TypeMeta: metav1.TypeMeta{ Kind: anywherev1.DockerDatacenterKind, APIVersion: anywherev1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Name: "datacenter", Namespace: clusterNamespace, }, } }
574
eks-anywhere
aws
Go
// Code generated by MockGen. DO NOT EDIT. // Source: pkg/providers/docker/reconciler/reconciler.go // Package mocks is a generated GoMock package. package mocks import ( context "context" reflect "reflect" cluster "github.com/aws/eks-anywhere/pkg/cluster" controller "github.com/aws/eks-anywhere/pkg/controller" logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" client "sigs.k8s.io/controller-runtime/pkg/client" ) // MockCNIReconciler is a mock of CNIReconciler interface. type MockCNIReconciler struct { ctrl *gomock.Controller recorder *MockCNIReconcilerMockRecorder } // MockCNIReconcilerMockRecorder is the mock recorder for MockCNIReconciler. type MockCNIReconcilerMockRecorder struct { mock *MockCNIReconciler } // NewMockCNIReconciler creates a new mock instance. func NewMockCNIReconciler(ctrl *gomock.Controller) *MockCNIReconciler { mock := &MockCNIReconciler{ctrl: ctrl} mock.recorder = &MockCNIReconcilerMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockCNIReconciler) EXPECT() *MockCNIReconcilerMockRecorder { return m.recorder } // Reconcile mocks base method. func (m *MockCNIReconciler) Reconcile(ctx context.Context, logger logr.Logger, client client.Client, spec *cluster.Spec) (controller.Result, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Reconcile", ctx, logger, client, spec) ret0, _ := ret[0].(controller.Result) ret1, _ := ret[1].(error) return ret0, ret1 } // Reconcile indicates an expected call of Reconcile. func (mr *MockCNIReconcilerMockRecorder) Reconcile(ctx, logger, client, spec interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reconcile", reflect.TypeOf((*MockCNIReconciler)(nil).Reconcile), ctx, logger, client, spec) } // MockRemoteClientRegistry is a mock of RemoteClientRegistry interface. type MockRemoteClientRegistry struct { ctrl *gomock.Controller recorder *MockRemoteClientRegistryMockRecorder } // MockRemoteClientRegistryMockRecorder is the mock recorder for MockRemoteClientRegistry. type MockRemoteClientRegistryMockRecorder struct { mock *MockRemoteClientRegistry } // NewMockRemoteClientRegistry creates a new mock instance. func NewMockRemoteClientRegistry(ctrl *gomock.Controller) *MockRemoteClientRegistry { mock := &MockRemoteClientRegistry{ctrl: ctrl} mock.recorder = &MockRemoteClientRegistryMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockRemoteClientRegistry) EXPECT() *MockRemoteClientRegistryMockRecorder { return m.recorder } // GetClient mocks base method. func (m *MockRemoteClientRegistry) GetClient(ctx context.Context, cluster client.ObjectKey) (client.Client, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetClient", ctx, cluster) ret0, _ := ret[0].(client.Client) ret1, _ := ret[1].(error) return ret0, ret1 } // GetClient indicates an expected call of GetClient. func (mr *MockRemoteClientRegistryMockRecorder) GetClient(ctx, cluster interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClient", reflect.TypeOf((*MockRemoteClientRegistry)(nil).GetClient), ctx, cluster) }
93
eks-anywhere
aws
Go
// Code generated by MockGen. DO NOT EDIT. // Source: github.com/aws/eks-anywhere/pkg/providers (interfaces: Provider,DatacenterConfig,MachineConfig) // Package mocks is a generated GoMock package. package mocks import ( context "context" reflect "reflect" v1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" bootstrapper "github.com/aws/eks-anywhere/pkg/bootstrapper" cluster "github.com/aws/eks-anywhere/pkg/cluster" providers "github.com/aws/eks-anywhere/pkg/providers" types "github.com/aws/eks-anywhere/pkg/types" gomock "github.com/golang/mock/gomock" ) // MockProvider is a mock of Provider interface. type MockProvider struct { ctrl *gomock.Controller recorder *MockProviderMockRecorder } // MockProviderMockRecorder is the mock recorder for MockProvider. type MockProviderMockRecorder struct { mock *MockProvider } // NewMockProvider creates a new mock instance. func NewMockProvider(ctrl *gomock.Controller) *MockProvider { mock := &MockProvider{ctrl: ctrl} mock.recorder = &MockProviderMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockProvider) EXPECT() *MockProviderMockRecorder { return m.recorder } // BootstrapClusterOpts mocks base method. func (m *MockProvider) BootstrapClusterOpts(arg0 *cluster.Spec) ([]bootstrapper.BootstrapClusterOption, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BootstrapClusterOpts", arg0) ret0, _ := ret[0].([]bootstrapper.BootstrapClusterOption) ret1, _ := ret[1].(error) return ret0, ret1 } // BootstrapClusterOpts indicates an expected call of BootstrapClusterOpts. func (mr *MockProviderMockRecorder) BootstrapClusterOpts(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BootstrapClusterOpts", reflect.TypeOf((*MockProvider)(nil).BootstrapClusterOpts), arg0) } // ChangeDiff mocks base method. func (m *MockProvider) ChangeDiff(arg0, arg1 *cluster.Spec) *types.ComponentChangeDiff { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChangeDiff", arg0, arg1) ret0, _ := ret[0].(*types.ComponentChangeDiff) return ret0 } // ChangeDiff indicates an expected call of ChangeDiff. func (mr *MockProviderMockRecorder) ChangeDiff(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChangeDiff", reflect.TypeOf((*MockProvider)(nil).ChangeDiff), arg0, arg1) } // DatacenterConfig mocks base method. func (m *MockProvider) DatacenterConfig(arg0 *cluster.Spec) providers.DatacenterConfig { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DatacenterConfig", arg0) ret0, _ := ret[0].(providers.DatacenterConfig) return ret0 } // DatacenterConfig indicates an expected call of DatacenterConfig. func (mr *MockProviderMockRecorder) DatacenterConfig(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DatacenterConfig", reflect.TypeOf((*MockProvider)(nil).DatacenterConfig), arg0) } // DatacenterResourceType mocks base method. func (m *MockProvider) DatacenterResourceType() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DatacenterResourceType") ret0, _ := ret[0].(string) return ret0 } // DatacenterResourceType indicates an expected call of DatacenterResourceType. func (mr *MockProviderMockRecorder) DatacenterResourceType() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DatacenterResourceType", reflect.TypeOf((*MockProvider)(nil).DatacenterResourceType)) } // DeleteResources mocks base method. func (m *MockProvider) DeleteResources(arg0 context.Context, arg1 *cluster.Spec) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteResources", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // DeleteResources indicates an expected call of DeleteResources. func (mr *MockProviderMockRecorder) DeleteResources(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteResources", reflect.TypeOf((*MockProvider)(nil).DeleteResources), arg0, arg1) } // EnvMap mocks base method. func (m *MockProvider) EnvMap(arg0 *cluster.Spec) (map[string]string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EnvMap", arg0) ret0, _ := ret[0].(map[string]string) ret1, _ := ret[1].(error) return ret0, ret1 } // EnvMap indicates an expected call of EnvMap. func (mr *MockProviderMockRecorder) EnvMap(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnvMap", reflect.TypeOf((*MockProvider)(nil).EnvMap), arg0) } // GenerateCAPISpecForCreate mocks base method. func (m *MockProvider) GenerateCAPISpecForCreate(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec) ([]byte, []byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GenerateCAPISpecForCreate", arg0, arg1, arg2) ret0, _ := ret[0].([]byte) ret1, _ := ret[1].([]byte) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // GenerateCAPISpecForCreate indicates an expected call of GenerateCAPISpecForCreate. func (mr *MockProviderMockRecorder) GenerateCAPISpecForCreate(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateCAPISpecForCreate", reflect.TypeOf((*MockProvider)(nil).GenerateCAPISpecForCreate), arg0, arg1, arg2) } // GenerateCAPISpecForUpgrade mocks base method. func (m *MockProvider) GenerateCAPISpecForUpgrade(arg0 context.Context, arg1, arg2 *types.Cluster, arg3, arg4 *cluster.Spec) ([]byte, []byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GenerateCAPISpecForUpgrade", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].([]byte) ret1, _ := ret[1].([]byte) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // GenerateCAPISpecForUpgrade indicates an expected call of GenerateCAPISpecForUpgrade. func (mr *MockProviderMockRecorder) GenerateCAPISpecForUpgrade(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateCAPISpecForUpgrade", reflect.TypeOf((*MockProvider)(nil).GenerateCAPISpecForUpgrade), arg0, arg1, arg2, arg3, arg4) } // GetDeployments mocks base method. func (m *MockProvider) GetDeployments() map[string][]string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetDeployments") ret0, _ := ret[0].(map[string][]string) return ret0 } // GetDeployments indicates an expected call of GetDeployments. func (mr *MockProviderMockRecorder) GetDeployments() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeployments", reflect.TypeOf((*MockProvider)(nil).GetDeployments)) } // GetInfrastructureBundle mocks base method. func (m *MockProvider) GetInfrastructureBundle(arg0 *cluster.Spec) *types.InfrastructureBundle { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetInfrastructureBundle", arg0) ret0, _ := ret[0].(*types.InfrastructureBundle) return ret0 } // GetInfrastructureBundle indicates an expected call of GetInfrastructureBundle. func (mr *MockProviderMockRecorder) GetInfrastructureBundle(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInfrastructureBundle", reflect.TypeOf((*MockProvider)(nil).GetInfrastructureBundle), arg0) } // InstallCustomProviderComponents mocks base method. func (m *MockProvider) InstallCustomProviderComponents(arg0 context.Context, arg1 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "InstallCustomProviderComponents", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // InstallCustomProviderComponents indicates an expected call of InstallCustomProviderComponents. func (mr *MockProviderMockRecorder) InstallCustomProviderComponents(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallCustomProviderComponents", reflect.TypeOf((*MockProvider)(nil).InstallCustomProviderComponents), arg0, arg1) } // MachineConfigs mocks base method. func (m *MockProvider) MachineConfigs(arg0 *cluster.Spec) []providers.MachineConfig { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MachineConfigs", arg0) ret0, _ := ret[0].([]providers.MachineConfig) return ret0 } // MachineConfigs indicates an expected call of MachineConfigs. func (mr *MockProviderMockRecorder) MachineConfigs(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MachineConfigs", reflect.TypeOf((*MockProvider)(nil).MachineConfigs), arg0) } // MachineResourceType mocks base method. func (m *MockProvider) MachineResourceType() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MachineResourceType") ret0, _ := ret[0].(string) return ret0 } // MachineResourceType indicates an expected call of MachineResourceType. func (mr *MockProviderMockRecorder) MachineResourceType() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MachineResourceType", reflect.TypeOf((*MockProvider)(nil).MachineResourceType)) } // Name mocks base method. func (m *MockProvider) Name() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Name") ret0, _ := ret[0].(string) return ret0 } // Name indicates an expected call of Name. func (mr *MockProviderMockRecorder) Name() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Name", reflect.TypeOf((*MockProvider)(nil).Name)) } // PostBootstrapDeleteForUpgrade mocks base method. func (m *MockProvider) PostBootstrapDeleteForUpgrade(arg0 context.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PostBootstrapDeleteForUpgrade", arg0) ret0, _ := ret[0].(error) return ret0 } // PostBootstrapDeleteForUpgrade indicates an expected call of PostBootstrapDeleteForUpgrade. func (mr *MockProviderMockRecorder) PostBootstrapDeleteForUpgrade(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PostBootstrapDeleteForUpgrade", reflect.TypeOf((*MockProvider)(nil).PostBootstrapDeleteForUpgrade), arg0) } // PostBootstrapSetup mocks base method. func (m *MockProvider) PostBootstrapSetup(arg0 context.Context, arg1 *v1alpha1.Cluster, arg2 *types.Cluster) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PostBootstrapSetup", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // PostBootstrapSetup indicates an expected call of PostBootstrapSetup. func (mr *MockProviderMockRecorder) PostBootstrapSetup(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PostBootstrapSetup", reflect.TypeOf((*MockProvider)(nil).PostBootstrapSetup), arg0, arg1, arg2) } // PostBootstrapSetupUpgrade mocks base method. func (m *MockProvider) PostBootstrapSetupUpgrade(arg0 context.Context, arg1 *v1alpha1.Cluster, arg2 *types.Cluster) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PostBootstrapSetupUpgrade", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // PostBootstrapSetupUpgrade indicates an expected call of PostBootstrapSetupUpgrade. func (mr *MockProviderMockRecorder) PostBootstrapSetupUpgrade(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PostBootstrapSetupUpgrade", reflect.TypeOf((*MockProvider)(nil).PostBootstrapSetupUpgrade), arg0, arg1, arg2) } // PostClusterDeleteValidate mocks base method. func (m *MockProvider) PostClusterDeleteValidate(arg0 context.Context, arg1 *types.Cluster) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PostClusterDeleteValidate", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // PostClusterDeleteValidate indicates an expected call of PostClusterDeleteValidate. func (mr *MockProviderMockRecorder) PostClusterDeleteValidate(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PostClusterDeleteValidate", reflect.TypeOf((*MockProvider)(nil).PostClusterDeleteValidate), arg0, arg1) } // PostMoveManagementToBootstrap mocks base method. func (m *MockProvider) PostMoveManagementToBootstrap(arg0 context.Context, arg1 *types.Cluster) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PostMoveManagementToBootstrap", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // PostMoveManagementToBootstrap indicates an expected call of PostMoveManagementToBootstrap. func (mr *MockProviderMockRecorder) PostMoveManagementToBootstrap(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PostMoveManagementToBootstrap", reflect.TypeOf((*MockProvider)(nil).PostMoveManagementToBootstrap), arg0, arg1) } // PostWorkloadInit mocks base method. func (m *MockProvider) PostWorkloadInit(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PostWorkloadInit", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // PostWorkloadInit indicates an expected call of PostWorkloadInit. func (mr *MockProviderMockRecorder) PostWorkloadInit(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PostWorkloadInit", reflect.TypeOf((*MockProvider)(nil).PostWorkloadInit), arg0, arg1, arg2) } // PreCAPIInstallOnBootstrap mocks base method. func (m *MockProvider) PreCAPIInstallOnBootstrap(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PreCAPIInstallOnBootstrap", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // PreCAPIInstallOnBootstrap indicates an expected call of PreCAPIInstallOnBootstrap. func (mr *MockProviderMockRecorder) PreCAPIInstallOnBootstrap(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PreCAPIInstallOnBootstrap", reflect.TypeOf((*MockProvider)(nil).PreCAPIInstallOnBootstrap), arg0, arg1, arg2) } // PreCoreComponentsUpgrade mocks base method. func (m *MockProvider) PreCoreComponentsUpgrade(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PreCoreComponentsUpgrade", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // PreCoreComponentsUpgrade indicates an expected call of PreCoreComponentsUpgrade. func (mr *MockProviderMockRecorder) PreCoreComponentsUpgrade(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PreCoreComponentsUpgrade", reflect.TypeOf((*MockProvider)(nil).PreCoreComponentsUpgrade), arg0, arg1, arg2) } // RunPostControlPlaneUpgrade mocks base method. func (m *MockProvider) RunPostControlPlaneUpgrade(arg0 context.Context, arg1, arg2 *cluster.Spec, arg3, arg4 *types.Cluster) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RunPostControlPlaneUpgrade", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(error) return ret0 } // RunPostControlPlaneUpgrade indicates an expected call of RunPostControlPlaneUpgrade. func (mr *MockProviderMockRecorder) RunPostControlPlaneUpgrade(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunPostControlPlaneUpgrade", reflect.TypeOf((*MockProvider)(nil).RunPostControlPlaneUpgrade), arg0, arg1, arg2, arg3, arg4) } // SetupAndValidateCreateCluster mocks base method. func (m *MockProvider) SetupAndValidateCreateCluster(arg0 context.Context, arg1 *cluster.Spec) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetupAndValidateCreateCluster", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // SetupAndValidateCreateCluster indicates an expected call of SetupAndValidateCreateCluster. func (mr *MockProviderMockRecorder) SetupAndValidateCreateCluster(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetupAndValidateCreateCluster", reflect.TypeOf((*MockProvider)(nil).SetupAndValidateCreateCluster), arg0, arg1) } // SetupAndValidateDeleteCluster mocks base method. func (m *MockProvider) SetupAndValidateDeleteCluster(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetupAndValidateDeleteCluster", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // SetupAndValidateDeleteCluster indicates an expected call of SetupAndValidateDeleteCluster. func (mr *MockProviderMockRecorder) SetupAndValidateDeleteCluster(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetupAndValidateDeleteCluster", reflect.TypeOf((*MockProvider)(nil).SetupAndValidateDeleteCluster), arg0, arg1, arg2) } // SetupAndValidateUpgradeCluster mocks base method. func (m *MockProvider) SetupAndValidateUpgradeCluster(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 *cluster.Spec) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetupAndValidateUpgradeCluster", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // SetupAndValidateUpgradeCluster indicates an expected call of SetupAndValidateUpgradeCluster. func (mr *MockProviderMockRecorder) SetupAndValidateUpgradeCluster(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetupAndValidateUpgradeCluster", reflect.TypeOf((*MockProvider)(nil).SetupAndValidateUpgradeCluster), arg0, arg1, arg2, arg3) } // UpdateKubeConfig mocks base method. func (m *MockProvider) UpdateKubeConfig(arg0 *[]byte, arg1 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "UpdateKubeConfig", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // UpdateKubeConfig indicates an expected call of UpdateKubeConfig. func (mr *MockProviderMockRecorder) UpdateKubeConfig(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateKubeConfig", reflect.TypeOf((*MockProvider)(nil).UpdateKubeConfig), arg0, arg1) } // UpdateSecrets mocks base method. func (m *MockProvider) UpdateSecrets(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "UpdateSecrets", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // UpdateSecrets indicates an expected call of UpdateSecrets. func (mr *MockProviderMockRecorder) UpdateSecrets(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSecrets", reflect.TypeOf((*MockProvider)(nil).UpdateSecrets), arg0, arg1, arg2) } // UpgradeNeeded mocks base method. func (m *MockProvider) UpgradeNeeded(arg0 context.Context, arg1, arg2 *cluster.Spec, arg3 *types.Cluster) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "UpgradeNeeded", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 } // UpgradeNeeded indicates an expected call of UpgradeNeeded. func (mr *MockProviderMockRecorder) UpgradeNeeded(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpgradeNeeded", reflect.TypeOf((*MockProvider)(nil).UpgradeNeeded), arg0, arg1, arg2, arg3) } // ValidateNewSpec mocks base method. func (m *MockProvider) ValidateNewSpec(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateNewSpec", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // ValidateNewSpec indicates an expected call of ValidateNewSpec. func (mr *MockProviderMockRecorder) ValidateNewSpec(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateNewSpec", reflect.TypeOf((*MockProvider)(nil).ValidateNewSpec), arg0, arg1, arg2) } // Version mocks base method. func (m *MockProvider) Version(arg0 *cluster.Spec) string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Version", arg0) ret0, _ := ret[0].(string) return ret0 } // Version indicates an expected call of Version. func (mr *MockProviderMockRecorder) Version(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockProvider)(nil).Version), arg0) } // MockDatacenterConfig is a mock of DatacenterConfig interface. type MockDatacenterConfig struct { ctrl *gomock.Controller recorder *MockDatacenterConfigMockRecorder } // MockDatacenterConfigMockRecorder is the mock recorder for MockDatacenterConfig. type MockDatacenterConfigMockRecorder struct { mock *MockDatacenterConfig } // NewMockDatacenterConfig creates a new mock instance. func NewMockDatacenterConfig(ctrl *gomock.Controller) *MockDatacenterConfig { mock := &MockDatacenterConfig{ctrl: ctrl} mock.recorder = &MockDatacenterConfigMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockDatacenterConfig) EXPECT() *MockDatacenterConfigMockRecorder { return m.recorder } // ClearPauseAnnotation mocks base method. func (m *MockDatacenterConfig) ClearPauseAnnotation() { m.ctrl.T.Helper() m.ctrl.Call(m, "ClearPauseAnnotation") } // ClearPauseAnnotation indicates an expected call of ClearPauseAnnotation. func (mr *MockDatacenterConfigMockRecorder) ClearPauseAnnotation() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClearPauseAnnotation", reflect.TypeOf((*MockDatacenterConfig)(nil).ClearPauseAnnotation)) } // Kind mocks base method. func (m *MockDatacenterConfig) Kind() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Kind") ret0, _ := ret[0].(string) return ret0 } // Kind indicates an expected call of Kind. func (mr *MockDatacenterConfigMockRecorder) Kind() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Kind", reflect.TypeOf((*MockDatacenterConfig)(nil).Kind)) } // Marshallable mocks base method. func (m *MockDatacenterConfig) Marshallable() v1alpha1.Marshallable { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Marshallable") ret0, _ := ret[0].(v1alpha1.Marshallable) return ret0 } // Marshallable indicates an expected call of Marshallable. func (mr *MockDatacenterConfigMockRecorder) Marshallable() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Marshallable", reflect.TypeOf((*MockDatacenterConfig)(nil).Marshallable)) } // PauseReconcile mocks base method. func (m *MockDatacenterConfig) PauseReconcile() { m.ctrl.T.Helper() m.ctrl.Call(m, "PauseReconcile") } // PauseReconcile indicates an expected call of PauseReconcile. func (mr *MockDatacenterConfigMockRecorder) PauseReconcile() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PauseReconcile", reflect.TypeOf((*MockDatacenterConfig)(nil).PauseReconcile)) } // MockMachineConfig is a mock of MachineConfig interface. type MockMachineConfig struct { ctrl *gomock.Controller recorder *MockMachineConfigMockRecorder } // MockMachineConfigMockRecorder is the mock recorder for MockMachineConfig. type MockMachineConfigMockRecorder struct { mock *MockMachineConfig } // NewMockMachineConfig creates a new mock instance. func NewMockMachineConfig(ctrl *gomock.Controller) *MockMachineConfig { mock := &MockMachineConfig{ctrl: ctrl} mock.recorder = &MockMachineConfigMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockMachineConfig) EXPECT() *MockMachineConfigMockRecorder { return m.recorder } // GetName mocks base method. func (m *MockMachineConfig) GetName() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetName") ret0, _ := ret[0].(string) return ret0 } // GetName indicates an expected call of GetName. func (mr *MockMachineConfigMockRecorder) GetName() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetName", reflect.TypeOf((*MockMachineConfig)(nil).GetName)) } // GetNamespace mocks base method. func (m *MockMachineConfig) GetNamespace() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetNamespace") ret0, _ := ret[0].(string) return ret0 } // GetNamespace indicates an expected call of GetNamespace. func (mr *MockMachineConfigMockRecorder) GetNamespace() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNamespace", reflect.TypeOf((*MockMachineConfig)(nil).GetNamespace)) } // Marshallable mocks base method. func (m *MockMachineConfig) Marshallable() v1alpha1.Marshallable { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Marshallable") ret0, _ := ret[0].(v1alpha1.Marshallable) return ret0 } // Marshallable indicates an expected call of Marshallable. func (mr *MockMachineConfigMockRecorder) Marshallable() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Marshallable", reflect.TypeOf((*MockMachineConfig)(nil).Marshallable)) } // OSFamily mocks base method. func (m *MockMachineConfig) OSFamily() v1alpha1.OSFamily { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "OSFamily") ret0, _ := ret[0].(v1alpha1.OSFamily) return ret0 } // OSFamily indicates an expected call of OSFamily. func (mr *MockMachineConfigMockRecorder) OSFamily() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OSFamily", reflect.TypeOf((*MockMachineConfig)(nil).OSFamily)) }
636
eks-anywhere
aws
Go
package nutanix import ( "context" v3 "github.com/nutanix-cloud-native/prism-go-client/v3" ) type Client interface { GetSubnet(ctx context.Context, uuid string) (*v3.SubnetIntentResponse, error) ListSubnet(ctx context.Context, getEntitiesRequest *v3.DSMetadata) (*v3.SubnetListIntentResponse, error) GetImage(ctx context.Context, uuid string) (*v3.ImageIntentResponse, error) ListImage(ctx context.Context, getEntitiesRequest *v3.DSMetadata) (*v3.ImageListIntentResponse, error) GetCluster(ctx context.Context, uuid string) (*v3.ClusterIntentResponse, error) ListCluster(ctx context.Context, getEntitiesRequest *v3.DSMetadata) (*v3.ClusterListIntentResponse, error) GetProject(ctx context.Context, uuid string) (*v3.Project, error) ListProject(ctx context.Context, getEntitiesRequest *v3.DSMetadata) (*v3.ProjectListResponse, error) GetCurrentLoggedInUser(ctx context.Context) (*v3.UserIntentResponse, error) ListCategories(ctx context.Context, getEntitiesRequest *v3.CategoryListMetadata) (*v3.CategoryKeyListResponse, error) GetCategoryKey(ctx context.Context, name string) (*v3.CategoryKeyStatus, error) ListCategoryValues(ctx context.Context, name string, getEntitiesRequest *v3.CategoryListMetadata) (*v3.CategoryValueListResponse, error) GetCategoryValue(ctx context.Context, name string, value string) (*v3.CategoryValueStatus, error) GetCategoryQuery(ctx context.Context, query *v3.CategoryQueryInput) (*v3.CategoryQueryResponse, error) }
25
eks-anywhere
aws
Go
package nutanix import ( "crypto/x509" "encoding/pem" "fmt" "net" "strconv" prismgoclient "github.com/nutanix-cloud-native/prism-go-client" "github.com/nutanix-cloud-native/prism-go-client/environment/credentials" v3 "github.com/nutanix-cloud-native/prism-go-client/v3" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" ) // ClientCache is a map of NutanixDatacenterConfig name to Nutanix client. type ClientCache struct { clients map[string]Client } // NewClientCache returns a new ClientCache. func NewClientCache() *ClientCache { return &ClientCache{ clients: make(map[string]Client), } } // GetNutanixClient returns a Nutanix client for the given NutanixDatacenterConfig. func (cb *ClientCache) GetNutanixClient(datacenterConfig *anywherev1.NutanixDatacenterConfig, creds credentials.BasicAuthCredential) (Client, error) { if client, ok := cb.clients[datacenterConfig.Name]; ok { return client, nil } clientOpts := make([]v3.ClientOption, 0) if datacenterConfig.Spec.AdditionalTrustBundle != "" { block, _ := pem.Decode([]byte(datacenterConfig.Spec.AdditionalTrustBundle)) certs, err := x509.ParseCertificates(block.Bytes) if err != nil { return nil, fmt.Errorf("unable to parse additional trust bundle %s: %v", datacenterConfig.Spec.AdditionalTrustBundle, err) } if len(certs) == 0 { return nil, fmt.Errorf("unable to extract certs from the addtional trust bundle %s", datacenterConfig.Spec.AdditionalTrustBundle) } clientOpts = append(clientOpts, v3.WithCertificate(certs[0])) } endpoint := datacenterConfig.Spec.Endpoint port := datacenterConfig.Spec.Port url := net.JoinHostPort(endpoint, strconv.Itoa(port)) nutanixCreds := prismgoclient.Credentials{ URL: url, Username: creds.PrismCentral.Username, Password: creds.PrismCentral.Password, Endpoint: endpoint, Port: fmt.Sprintf("%d", port), Insecure: datacenterConfig.Spec.Insecure, } client, err := v3.NewV3Client(nutanixCreds, clientOpts...) if err != nil { return nil, fmt.Errorf("error creating nutanix client: %v", err) } cb.clients[datacenterConfig.Name] = client.V3 return client.V3, nil }
68
eks-anywhere
aws
Go
package nutanix import ( "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "sigs.k8s.io/yaml" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/constants" ) func TestNewClientCache(t *testing.T) { cc := NewClientCache() dcConf := &anywherev1.NutanixDatacenterConfig{} err := yaml.Unmarshal([]byte(nutanixDatacenterConfigSpecWithTrustBundle), dcConf) require.NoError(t, err) t.Setenv(constants.EksaNutanixUsernameKey, "admin") t.Setenv(constants.EksaNutanixPasswordKey, "password") c, err := cc.GetNutanixClient(dcConf, GetCredsFromEnv()) assert.NoError(t, err) assert.NotNil(t, c) }
25
eks-anywhere
aws
Go
package nutanix import ( "context" "fmt" "time" "github.com/go-logr/logr" nutanixv1 "github.com/nutanix-cloud-native/cluster-api-provider-nutanix/api/v1beta1" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/clusterapi" yamlcapi "github.com/aws/eks-anywhere/pkg/clusterapi/yaml" "github.com/aws/eks-anywhere/pkg/yamlutil" ) // BaseControlPlane represents a CAPI Nutanix control plane. type BaseControlPlane = clusterapi.ControlPlane[*nutanixv1.NutanixCluster, *nutanixv1.NutanixMachineTemplate] // ControlPlane holds the Nutanix specific objects for a CAPI Nutanix control plane. type ControlPlane struct { BaseControlPlane } // Objects returns the control plane objects associated with the Nutanix cluster. func (p ControlPlane) Objects() []kubernetes.Object { o := p.BaseControlPlane.Objects() return o } // ControlPlaneBuilder defines the builder for all objects in the CAPI Nutanix control plane. type ControlPlaneBuilder struct { BaseBuilder *yamlcapi.ControlPlaneBuilder[*nutanixv1.NutanixCluster, *nutanixv1.NutanixMachineTemplate] ControlPlane *ControlPlane } // BuildFromParsed implements the base yamlcapi.BuildFromParsed and processes any additional objects for the Nutanix control plane. func (b *ControlPlaneBuilder) BuildFromParsed(lookup yamlutil.ObjectLookup) error { if err := b.BaseBuilder.BuildFromParsed(lookup); err != nil { return err } b.ControlPlane.BaseControlPlane = *b.BaseBuilder.ControlPlane return nil } // ControlPlaneSpec builds a nutanix ControlPlane definition based on an eks-a cluster spec. func ControlPlaneSpec(ctx context.Context, logger logr.Logger, client kubernetes.Client, spec *cluster.Spec) (*ControlPlane, error) { ndcs := spec.NutanixDatacenter.Spec machineConfigs := spec.NutanixMachineConfigs controlPlaneMachineSpec, etcdMachineSpec := getControlPlaneMachineSpecs(machineConfigs, &spec.Cluster.Spec.ControlPlaneConfiguration, spec.Cluster.Spec.ExternalEtcdConfiguration) for _, machineConfig := range machineConfigs { machineConfig.SetDefaults() } creds := GetCredsFromEnv() templateBuilder := NewNutanixTemplateBuilder(&ndcs, controlPlaneMachineSpec, etcdMachineSpec, nil, creds, time.Now) controlPlaneYaml, err := generateControlPlaneYAML(templateBuilder, spec) if err != nil { return nil, err } cp, err := parseControlPlaneYAML(logger, controlPlaneYaml) if err != nil { return nil, err } if err := cp.UpdateImmutableObjectNames(ctx, client, getMachineTemplate, machineTemplateEquals); err != nil { return nil, err } return cp, nil } func getControlPlaneMachineSpecs(machineConfigs map[string]*v1alpha1.NutanixMachineConfig, controlPlaneConfig *v1alpha1.ControlPlaneConfiguration, externalEtcdConfig *v1alpha1.ExternalEtcdConfiguration) (*v1alpha1.NutanixMachineConfigSpec, *v1alpha1.NutanixMachineConfigSpec) { var controlPlaneMachineSpec, etcdMachineSpec *v1alpha1.NutanixMachineConfigSpec if controlPlaneConfig.MachineGroupRef != nil && machineConfigs[controlPlaneConfig.MachineGroupRef.Name] != nil { controlPlaneMachineSpec = &machineConfigs[controlPlaneConfig.MachineGroupRef.Name].Spec } if externalEtcdConfig != nil && externalEtcdConfig.MachineGroupRef != nil && machineConfigs[externalEtcdConfig.MachineGroupRef.Name] != nil { etcdMachineSpec = &machineConfigs[externalEtcdConfig.MachineGroupRef.Name].Spec } return controlPlaneMachineSpec, etcdMachineSpec } func generateControlPlaneYAML(templateBuilder *TemplateBuilder, spec *cluster.Spec) ([]byte, error) { return templateBuilder.GenerateCAPISpecControlPlane( spec, func(values map[string]interface{}) { values["controlPlaneTemplateName"] = clusterapi.ControlPlaneMachineTemplateName(spec.Cluster) values["etcdTemplateName"] = clusterapi.EtcdMachineTemplateName(spec.Cluster) }, ) } func parseControlPlaneYAML(logger logr.Logger, controlPlaneYAML []byte) (*ControlPlane, error) { parser, builder, err := newControlPlaneParser(logger) if err != nil { return nil, err } if err := parser.Parse(controlPlaneYAML, builder); err != nil { return nil, err } return builder.ControlPlane, nil } func newControlPlaneParser(logger logr.Logger) (*yamlutil.Parser, *ControlPlaneBuilder, error) { parser, baseBuilder, err := yamlcapi.NewControlPlaneParserAndBuilder( logger, yamlutil.NewMapping( "NutanixCluster", func() *nutanixv1.NutanixCluster { return &nutanixv1.NutanixCluster{} }, ), yamlutil.NewMapping( "NutanixMachineTemplate", func() *nutanixv1.NutanixMachineTemplate { return &nutanixv1.NutanixMachineTemplate{} }, ), ) if err != nil { return nil, nil, fmt.Errorf("failed building nutanix control plane parser: %w", err) } builder := &ControlPlaneBuilder{ BaseBuilder: baseBuilder, ControlPlane: &ControlPlane{}, } return parser, builder, nil }
141
eks-anywhere
aws
Go
package nutanix import ( "context" "testing" "github.com/stretchr/testify/assert" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/constants" ) func TestControlPlaneSpec(t *testing.T) { t.Setenv(constants.EksaNutanixUsernameKey, "admin") t.Setenv(constants.EksaNutanixPasswordKey, "password") logger := test.NewNullLogger() client := test.NewFakeKubeClient() spec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") cp, err := ControlPlaneSpec(context.TODO(), logger, client, spec) assert.NoError(t, err) assert.NotNil(t, cp) }
23
eks-anywhere
aws
Go
package nutanix import ( anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" ) // Defaulter implements the defaulting logic for NutanixDatacenterConfig and NutanixMachineConfig. type Defaulter struct{} // NewDefaulter returns a new Defaulter. func NewDefaulter() *Defaulter { return &Defaulter{} } // SetDefaultsForDatacenterConfig sets defaults for a NutanixDatacenterConfig. func (d *Defaulter) SetDefaultsForDatacenterConfig(dcConf anywherev1.NutanixDatacenterConfig) { dcConf.SetDefaults() } // SetDefaultsForMachineConfig sets defaults for a NutanixMachineConfig. func (d *Defaulter) SetDefaultsForMachineConfig(machineConf anywherev1.NutanixMachineConfig) { machineConf.SetDefaults() }
24
eks-anywhere
aws
Go
package nutanix import ( "fmt" "os" "github.com/nutanix-cloud-native/prism-go-client/environment/credentials" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/constants" ) const ( nutanixEndpointKey = "NUTANIX_ENDPOINT" ) var osSetenv = os.Setenv func setupEnvVars(datacenterConfig *anywherev1.NutanixDatacenterConfig) error { if nutanixUsername, ok := os.LookupEnv(constants.EksaNutanixUsernameKey); ok && len(nutanixUsername) > 0 { if err := osSetenv(constants.NutanixUsernameKey, nutanixUsername); err != nil { return fmt.Errorf("unable to set %s: %v", constants.EksaNutanixUsernameKey, err) } } else { return fmt.Errorf("%s is not set or is empty", constants.EksaNutanixUsernameKey) } if nutanixPassword, ok := os.LookupEnv(constants.EksaNutanixPasswordKey); ok && len(nutanixPassword) > 0 { if err := osSetenv(constants.NutanixPasswordKey, nutanixPassword); err != nil { return fmt.Errorf("unable to set %s: %v", constants.EksaNutanixPasswordKey, err) } } else { return fmt.Errorf("%s is not set or is empty", constants.EksaNutanixPasswordKey) } if err := osSetenv(nutanixEndpointKey, datacenterConfig.Spec.Endpoint); err != nil { return fmt.Errorf("unable to set %s: %v", nutanixEndpointKey, err) } return nil } // GetCredsFromEnv returns nutanix credentials based on the environment. func GetCredsFromEnv() credentials.BasicAuthCredential { username := os.Getenv(constants.EksaNutanixUsernameKey) password := os.Getenv(constants.EksaNutanixPasswordKey) return credentials.BasicAuthCredential{ PrismCentral: credentials.PrismCentralBasicAuth{ BasicAuth: credentials.BasicAuth{ Username: username, Password: password, }, }, } }
55
eks-anywhere
aws
Go
package nutanix import ( "errors" "os" "testing" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/constants" ) func fakeOSSetenv(key string, value string) error { return errors.New("os.Setenv failed") } func restoreOSSetenv(replace func(key string, value string) error) { osSetenv = replace } func TestSetupEnvVarsErrorDatacenter(t *testing.T) { config := &v1alpha1.NutanixDatacenterConfig{ Spec: v1alpha1.NutanixDatacenterConfigSpec{ Endpoint: "test", Insecure: false, Port: 9440, }, } os.Clearenv() if err := setupEnvVars(config); err == nil { t.Fatalf("setupEnvVars() err = nil, want err not nil: %#v", err) } t.Setenv(constants.EksaNutanixUsernameKey, "test") if err := setupEnvVars(config); err == nil { t.Fatalf("setupEnvVars() err = nil, want err not nil: %#v", err) } } func TestSetupEnvVarsErrorDatacenterSetenvFailures(t *testing.T) { storedOSSetenv := osSetenv osSetenv = fakeOSSetenv defer restoreOSSetenv(storedOSSetenv) config := &v1alpha1.NutanixDatacenterConfig{ Spec: v1alpha1.NutanixDatacenterConfigSpec{ Endpoint: "test", Insecure: false, Port: 9440, }, } t.Setenv(constants.EksaNutanixUsernameKey, "test") t.Setenv(constants.EksaNutanixPasswordKey, "test") if err := setupEnvVars(config); err == nil { t.Fatalf("setupEnvVars() err = nil, want err not nil: %#v", err) } }
59
eks-anywhere
aws
Go
package nutanix import ( "context" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" kubeadmv1beta1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/executables" "github.com/aws/eks-anywhere/pkg/types" ) type ProviderKubectlClient interface { ApplyKubeSpecFromBytes(ctx context.Context, cluster *types.Cluster, data []byte) error SetEksaControllerEnvVar(ctx context.Context, envVar, envVarVal, kubeconfig string) error GetEksaCluster(ctx context.Context, cluster *types.Cluster, clusterName string) (*v1alpha1.Cluster, error) GetEksaNutanixDatacenterConfig(ctx context.Context, nutanixDatacenterConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.NutanixDatacenterConfig, error) GetEksaNutanixMachineConfig(ctx context.Context, nutanixMachineConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.NutanixMachineConfig, error) GetKubeadmControlPlane(ctx context.Context, cluster *types.Cluster, clusterName string, opts ...executables.KubectlOpt) (*kubeadmv1beta1.KubeadmControlPlane, error) GetMachineDeployment(ctx context.Context, workerNodeGroupName string, opts ...executables.KubectlOpt) (*clusterv1.MachineDeployment, error) SearchNutanixMachineConfig(ctx context.Context, name string, kubeconfigFile string, namespace string) ([]*v1alpha1.NutanixMachineConfig, error) SearchNutanixDatacenterConfig(ctx context.Context, name string, kubeconfigFile string, namespace string) ([]*v1alpha1.NutanixDatacenterConfig, error) DeleteEksaNutanixDatacenterConfig(ctx context.Context, nutanixDatacenterConfigName string, kubeconfigFile string, namespace string) error DeleteEksaNutanixMachineConfig(ctx context.Context, nutanixMachineConfigName string, kubeconfigFile string, namespace string) error }
27
eks-anywhere
aws
Go
package nutanix import ( "context" _ "embed" "fmt" "net/http" "os" "reflect" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/bootstrapper" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/crypto" "github.com/aws/eks-anywhere/pkg/executables" "github.com/aws/eks-anywhere/pkg/filewriter" "github.com/aws/eks-anywhere/pkg/logger" "github.com/aws/eks-anywhere/pkg/providers" "github.com/aws/eks-anywhere/pkg/providers/common" "github.com/aws/eks-anywhere/pkg/templater" "github.com/aws/eks-anywhere/pkg/types" releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1" ) //go:embed config/cp-template.yaml var defaultCAPIConfigCP string //go:embed config/md-template.yaml var defaultClusterConfigMD string //go:embed config/secret-template.yaml var secretTemplate string //go:embed config/machine-health-check-template.yaml var mhcTemplate []byte var ( eksaNutanixDatacenterResourceType = fmt.Sprintf("nutanixdatacenterconfigs.%s", v1alpha1.GroupVersion.Group) eksaNutanixMachineResourceType = fmt.Sprintf("nutanixmachineconfigs.%s", v1alpha1.GroupVersion.Group) // list of env variables required by CAPX to be present and defined beforehand. requiredEnvs = []string{nutanixEndpointKey, constants.NutanixUsernameKey, constants.NutanixPasswordKey} ) // Provider implements the Nutanix Provider. type Provider struct { clusterConfig *v1alpha1.Cluster datacenterConfig *v1alpha1.NutanixDatacenterConfig machineConfigs map[string]*v1alpha1.NutanixMachineConfig templateBuilder *TemplateBuilder kubectlClient ProviderKubectlClient validator *Validator writer filewriter.FileWriter ipValidator IPValidator skipIPCheck bool } var _ providers.Provider = &Provider{} // NewProvider returns a new nutanix provider. func NewProvider( datacenterConfig *v1alpha1.NutanixDatacenterConfig, machineConfigs map[string]*v1alpha1.NutanixMachineConfig, clusterConfig *v1alpha1.Cluster, providerKubectlClient ProviderKubectlClient, writer filewriter.FileWriter, clientCache *ClientCache, ipValidator IPValidator, certValidator crypto.TlsValidator, httpClient *http.Client, now types.NowFunc, skipIPCheck bool, ) *Provider { datacenterConfig.SetDefaults() for _, machineConfig := range machineConfigs { machineConfig.SetDefaults() } var controlPlaneMachineSpec, etcdMachineSpec *v1alpha1.NutanixMachineConfigSpec if clusterConfig.Spec.ControlPlaneConfiguration.MachineGroupRef != nil && machineConfigs[clusterConfig.Spec.ControlPlaneConfiguration.MachineGroupRef.Name] != nil { controlPlaneMachineSpec = &machineConfigs[clusterConfig.Spec.ControlPlaneConfiguration.MachineGroupRef.Name].Spec } if clusterConfig.Spec.ExternalEtcdConfiguration != nil { if clusterConfig.Spec.ExternalEtcdConfiguration.MachineGroupRef != nil && machineConfigs[clusterConfig.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name] != nil { etcdMachineSpec = &machineConfigs[clusterConfig.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name].Spec } } creds := GetCredsFromEnv() workerNodeGroupMachineSpecs := make(map[string]v1alpha1.NutanixMachineConfigSpec, len(machineConfigs)) templateBuilder := NewNutanixTemplateBuilder(&datacenterConfig.Spec, controlPlaneMachineSpec, etcdMachineSpec, workerNodeGroupMachineSpecs, creds, now) nutanixValidator := NewValidator(clientCache, certValidator, httpClient) return &Provider{ clusterConfig: clusterConfig, datacenterConfig: datacenterConfig, machineConfigs: machineConfigs, templateBuilder: templateBuilder, kubectlClient: providerKubectlClient, validator: nutanixValidator, writer: writer, ipValidator: ipValidator, skipIPCheck: skipIPCheck, } } func (p *Provider) BootstrapClusterOpts(_ *cluster.Spec) ([]bootstrapper.BootstrapClusterOption, error) { // TODO(nutanix): figure out if we need something else here return nil, nil } func (p *Provider) BootstrapSetup(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error { // TODO(nutanix): figure out if we need something else here return nil } func (p *Provider) PostBootstrapSetup(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error { // TODO(nutanix): figure out if we need something else here return nil } func (p *Provider) PostBootstrapDeleteForUpgrade(ctx context.Context) error { // TODO(nutanix): figure out if we need something else here return nil } func (p *Provider) PostBootstrapSetupUpgrade(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error { // TODO(nutanix): figure out if we need something else here return nil } func (p *Provider) PostWorkloadInit(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error { // TODO(nutanix): figure out if we need something else here return nil } func (p *Provider) Name() string { return constants.NutanixProviderName } func (p *Provider) DatacenterResourceType() string { return eksaNutanixDatacenterResourceType } func (p *Provider) MachineResourceType() string { return eksaNutanixMachineResourceType } func (p *Provider) generateSSHKeysIfNotSet() error { var generatedKey string for _, machineConfig := range p.machineConfigs { user := machineConfig.Spec.Users[0] if user.SshAuthorizedKeys[0] == "" { if generatedKey != "" { // use the same key user.SshAuthorizedKeys[0] = generatedKey } else { logger.Info("Provided sshAuthorizedKey is not set or is empty, auto-generating new key pair...", "NutanixMachineConfig", machineConfig.Name) var err error generatedKey, err = common.GenerateSSHAuthKey(p.writer) if err != nil { return err } user.SshAuthorizedKeys[0] = generatedKey } } } return nil } func (p *Provider) DeleteResources(ctx context.Context, clusterSpec *cluster.Spec) error { for _, mc := range p.machineConfigs { if err := p.kubectlClient.DeleteEksaNutanixMachineConfig(ctx, mc.Name, clusterSpec.ManagementCluster.KubeconfigFile, mc.Namespace); err != nil { return err } } return p.kubectlClient.DeleteEksaNutanixDatacenterConfig(ctx, clusterSpec.NutanixDatacenter.Name, clusterSpec.ManagementCluster.KubeconfigFile, clusterSpec.NutanixDatacenter.Namespace) } func (p *Provider) PostClusterDeleteValidate(ctx context.Context, managementCluster *types.Cluster) error { // TODO(nutanix): figure out if we need something else here return nil } func (p *Provider) SetupAndValidateCreateCluster(ctx context.Context, clusterSpec *cluster.Spec) error { if err := p.validator.validateUpgradeRolloutStrategy(clusterSpec); err != nil { return fmt.Errorf("failed setup and validations: %v", err) } if err := setupEnvVars(clusterSpec.NutanixDatacenter); err != nil { return fmt.Errorf("failed setup and validations: %v", err) } creds := GetCredsFromEnv() if err := p.validator.ValidateClusterSpec(ctx, clusterSpec, creds); err != nil { return fmt.Errorf("failed to validate cluster spec: %v", err) } if err := p.generateSSHKeysIfNotSet(); err != nil { return fmt.Errorf("failed to generate ssh key: %v", err) } if !p.skipIPCheck { if err := p.ipValidator.ValidateControlPlaneIPUniqueness(clusterSpec.Cluster); err != nil { return err } } else { logger.Info("Skipping check for whether control plane ip is in use") } return nil } func (p *Provider) SetupAndValidateDeleteCluster(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error { if err := p.validator.validateUpgradeRolloutStrategy(clusterSpec); err != nil { return fmt.Errorf("failed setup and validations: %v", err) } if err := setupEnvVars(p.datacenterConfig); err != nil { return fmt.Errorf("failed setup and validations: %v", err) } return nil } // SetupAndValidateUpgradeCluster - Performs necessary setup and validations for upgrade cluster operation. func (p *Provider) SetupAndValidateUpgradeCluster(ctx context.Context, _ *types.Cluster, clusterSpec *cluster.Spec, _ *cluster.Spec) error { if err := p.validator.validateUpgradeRolloutStrategy(clusterSpec); err != nil { return fmt.Errorf("failed setup and validations: %v", err) } // TODO(nutanix): Add validations when this is supported if err := setupEnvVars(p.datacenterConfig); err != nil { return fmt.Errorf("failed setup and validations: %v", err) } return nil } func (p *Provider) UpdateSecrets(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error { // check if CAPI Secret name and EKS-A Secret name are not the same // this is to ensure that the EKS-A Secret that is watched and CAPX Secret that is reconciled are not the same if CAPXSecretName(clusterSpec) == EKSASecretName(clusterSpec) { return fmt.Errorf("NutanixDatacenterConfig CredentialRef name cannot be the same as the NutanixCluster CredentialRef name") } capxSecretContents, err := p.templateBuilder.GenerateCAPISpecSecret(clusterSpec) if err != nil { return err } if err := p.kubectlClient.ApplyKubeSpecFromBytes(ctx, cluster, capxSecretContents); err != nil { return fmt.Errorf("loading secrets object: %v", err) } eksaSecretContents, err := p.templateBuilder.GenerateEKSASpecSecret(clusterSpec) if err != nil { return err } if err := p.kubectlClient.ApplyKubeSpecFromBytes(ctx, cluster, eksaSecretContents); err != nil { return fmt.Errorf("loading secrets object: %v", err) } return nil } func (p *Provider) GenerateCAPISpecForCreate(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error) { if err := p.UpdateSecrets(ctx, cluster, clusterSpec); err != nil { return nil, nil, fmt.Errorf("updating Nutanix credentials: %v", err) } clusterName := clusterSpec.Cluster.Name cpOpt := func(values map[string]interface{}) { values["controlPlaneTemplateName"] = common.CPMachineTemplateName(clusterName, p.templateBuilder.now) values["etcdTemplateName"] = common.EtcdMachineTemplateName(clusterName, p.templateBuilder.now) } controlPlaneSpec, err = p.templateBuilder.GenerateCAPISpecControlPlane(clusterSpec, cpOpt) if err != nil { return nil, nil, err } workloadTemplateNames := make(map[string]string, len(clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations)) kubeadmconfigTemplateNames := make(map[string]string, len(clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations)) for _, workerNodeGroupConfiguration := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations { workloadTemplateNames[workerNodeGroupConfiguration.Name] = common.WorkerMachineTemplateName(clusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name, p.templateBuilder.now) kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name] = common.KubeadmConfigTemplateName(clusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name, p.templateBuilder.now) p.templateBuilder.workerNodeGroupMachineSpecs[workerNodeGroupConfiguration.MachineGroupRef.Name] = p.machineConfigs[workerNodeGroupConfiguration.MachineGroupRef.Name].Spec } workersSpec, err = p.templateBuilder.GenerateCAPISpecWorkers(clusterSpec, workloadTemplateNames, kubeadmconfigTemplateNames) if err != nil { return nil, nil, err } return controlPlaneSpec, workersSpec, nil } func NeedsNewControlPlaneTemplate(oldSpec, newSpec *cluster.Spec, oldNmc, newNmc *v1alpha1.NutanixMachineConfig) bool { // Another option is to generate MachineTemplates based on the old and new eksa spec, // remove the name field and compare them with DeepEqual // We plan to approach this way since it's more flexible to add/remove fields and test out for validation if oldSpec.Cluster.Spec.KubernetesVersion != newSpec.Cluster.Spec.KubernetesVersion { return true } if oldSpec.Bundles.Spec.Number != newSpec.Bundles.Spec.Number { return true } return AnyImmutableFieldChanged(oldNmc, newNmc) } func nutanixIdentifierChanged(old, new v1alpha1.NutanixResourceIdentifier) bool { if old.Type != new.Type { return true } if old.Type == v1alpha1.NutanixIdentifierName && old.Name != nil && new.Name != nil && *old.Name != *new.Name { return true } if old.Type == v1alpha1.NutanixIdentifierUUID && old.UUID != nil && new.UUID != nil && *old.UUID != *new.UUID { return true } return false } func AnyImmutableFieldChanged(oldNmc, newNmc *v1alpha1.NutanixMachineConfig) bool { if oldNmc.Spec.MemorySize != newNmc.Spec.MemorySize { return true } if oldNmc.Spec.SystemDiskSize != newNmc.Spec.SystemDiskSize { return true } if oldNmc.Spec.VCPUSockets != newNmc.Spec.VCPUSockets { return true } if oldNmc.Spec.VCPUsPerSocket != newNmc.Spec.VCPUsPerSocket { return true } if oldNmc.Spec.OSFamily != newNmc.Spec.OSFamily { return true } if nutanixIdentifierChanged(oldNmc.Spec.Image, newNmc.Spec.Image) { return true } if nutanixIdentifierChanged(oldNmc.Spec.Cluster, newNmc.Spec.Cluster) { return true } if nutanixIdentifierChanged(oldNmc.Spec.Subnet, newNmc.Spec.Subnet) { return true } return false } func (p *Provider) getWorkerNodeMachineConfigs(ctx context.Context, workloadCluster *types.Cluster, newClusterSpec *cluster.Spec, workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration, prevWorkerNodeGroupConfigs map[string]v1alpha1.WorkerNodeGroupConfiguration) (*v1alpha1.NutanixMachineConfig, *v1alpha1.NutanixMachineConfig, error) { if _, ok := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name]; ok { oldWorkerMachineConfig := p.machineConfigs[workerNodeGroupConfiguration.MachineGroupRef.Name] newWorkerMachineConfig, err := p.kubectlClient.GetEksaNutanixMachineConfig(ctx, workerNodeGroupConfiguration.MachineGroupRef.Name, workloadCluster.KubeconfigFile, newClusterSpec.Cluster.Namespace) if err != nil { return oldWorkerMachineConfig, nil, err } return oldWorkerMachineConfig, newWorkerMachineConfig, nil } return nil, nil, nil } func (p *Provider) needsNewMachineTemplate(currentSpec, newClusterSpec *cluster.Spec, workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration, ndc *v1alpha1.NutanixDatacenterConfig, prevWorkerNodeGroupConfigs map[string]v1alpha1.WorkerNodeGroupConfiguration, oldWorkerMachineConfig *v1alpha1.NutanixMachineConfig, newWorkerMachineConfig *v1alpha1.NutanixMachineConfig) (bool, error) { if _, ok := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name]; ok { needsNewWorkloadTemplate := NeedsNewWorkloadTemplate(currentSpec, newClusterSpec, oldWorkerMachineConfig, newWorkerMachineConfig) return needsNewWorkloadTemplate, nil } return true, nil } func (p *Provider) needsNewKubeadmConfigTemplate(workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration, prevWorkerNodeGroupConfigs map[string]v1alpha1.WorkerNodeGroupConfiguration, oldWorkerNodeNmc *v1alpha1.NutanixMachineConfig, newWorkerNodeNmc *v1alpha1.NutanixMachineConfig) (bool, error) { if _, ok := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name]; ok { existingWorkerNodeGroupConfig := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name] return NeedsNewKubeadmConfigTemplate(&workerNodeGroupConfiguration, &existingWorkerNodeGroupConfig, oldWorkerNodeNmc, newWorkerNodeNmc), nil } return true, nil } func NeedsNewWorkloadTemplate(oldSpec, newSpec *cluster.Spec, oldNmc, newNmc *v1alpha1.NutanixMachineConfig) bool { if oldSpec.Cluster.Spec.KubernetesVersion != newSpec.Cluster.Spec.KubernetesVersion { return true } if oldSpec.Bundles.Spec.Number != newSpec.Bundles.Spec.Number { return true } if !v1alpha1.WorkerNodeGroupConfigurationSliceTaintsEqual(oldSpec.Cluster.Spec.WorkerNodeGroupConfigurations, newSpec.Cluster.Spec.WorkerNodeGroupConfigurations) || !v1alpha1.WorkerNodeGroupConfigurationsLabelsMapEqual(oldSpec.Cluster.Spec.WorkerNodeGroupConfigurations, newSpec.Cluster.Spec.WorkerNodeGroupConfigurations) { return true } return AnyImmutableFieldChanged(oldNmc, newNmc) } func NeedsNewKubeadmConfigTemplate(newWorkerNodeGroup *v1alpha1.WorkerNodeGroupConfiguration, oldWorkerNodeGroup *v1alpha1.WorkerNodeGroupConfiguration, oldWorkerNodeNmc *v1alpha1.NutanixMachineConfig, newWorkerNodeNmc *v1alpha1.NutanixMachineConfig) bool { return !v1alpha1.TaintsSliceEqual(newWorkerNodeGroup.Taints, oldWorkerNodeGroup.Taints) || !v1alpha1.MapEqual(newWorkerNodeGroup.Labels, oldWorkerNodeGroup.Labels) || !v1alpha1.UsersSliceEqual(oldWorkerNodeNmc.Spec.Users, newWorkerNodeNmc.Spec.Users) } func (p *Provider) GenerateCAPISpecForUpgrade(ctx context.Context, bootstrapCluster, workloadCluster *types.Cluster, currentSpec, newClusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error) { if err := p.UpdateSecrets(ctx, bootstrapCluster, newClusterSpec); err != nil { return nil, nil, fmt.Errorf("updating Nutanix credentials: %v", err) } clusterName := newClusterSpec.Cluster.Name var controlPlaneTemplateName, workloadTemplateName, kubeadmconfigTemplateName, etcdTemplateName string // Get existing EKSA Cluster eksaCluster, err := p.kubectlClient.GetEksaCluster(ctx, workloadCluster, newClusterSpec.Cluster.Name) if err != nil { return nil, nil, err } // Get current Nutanix Datacenter Config ndc, err := p.kubectlClient.GetEksaNutanixDatacenterConfig(ctx, p.datacenterConfig.Name, workloadCluster.KubeconfigFile, newClusterSpec.Cluster.Namespace) if err != nil { return nil, nil, err } // Get current Nutanix Machine Config controlPlaneMachineConfig := p.machineConfigs[newClusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name] controlPlaneNutanixMachineConfig, err := p.kubectlClient.GetEksaNutanixMachineConfig(ctx, eksaCluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name, workloadCluster.KubeconfigFile, newClusterSpec.Cluster.Namespace) if err != nil { return nil, nil, err } needsNewControlPlaneTemplate := NeedsNewControlPlaneTemplate(currentSpec, newClusterSpec, controlPlaneNutanixMachineConfig, controlPlaneMachineConfig) if !needsNewControlPlaneTemplate { cp, err := p.kubectlClient.GetKubeadmControlPlane(ctx, workloadCluster, eksaCluster.Name, executables.WithCluster(bootstrapCluster), executables.WithNamespace(constants.EksaSystemNamespace)) if err != nil { return nil, nil, err } controlPlaneTemplateName = cp.Spec.MachineTemplate.InfrastructureRef.Name } else { controlPlaneTemplateName = common.CPMachineTemplateName(clusterName, p.templateBuilder.now) } previousWorkerNodeGroupConfigs := cluster.BuildMapForWorkerNodeGroupsByName(currentSpec.Cluster.Spec.WorkerNodeGroupConfigurations) workloadTemplateNames := make(map[string]string, len(newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations)) kubeadmconfigTemplateNames := make(map[string]string, len(newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations)) for _, workerNodeGroupConfiguration := range newClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations { oldWorkerNodeNmc, newWorkerNodeNmc, err := p.getWorkerNodeMachineConfigs(ctx, workloadCluster, newClusterSpec, workerNodeGroupConfiguration, previousWorkerNodeGroupConfigs) if err != nil { return nil, nil, err } needsNewWorkloadTemplate, err := p.needsNewMachineTemplate(currentSpec, newClusterSpec, workerNodeGroupConfiguration, ndc, previousWorkerNodeGroupConfigs, oldWorkerNodeNmc, newWorkerNodeNmc) if err != nil { return nil, nil, err } needsNewKubeadmConfigTemplate, err := p.needsNewKubeadmConfigTemplate(workerNodeGroupConfiguration, previousWorkerNodeGroupConfigs, oldWorkerNodeNmc, newWorkerNodeNmc) if err != nil { return nil, nil, err } if !needsNewKubeadmConfigTemplate { mdName := machineDeploymentName(newClusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name) md, err := p.kubectlClient.GetMachineDeployment(ctx, mdName, executables.WithCluster(bootstrapCluster), executables.WithNamespace(constants.EksaSystemNamespace)) if err != nil { return nil, nil, err } kubeadmconfigTemplateName = md.Spec.Template.Spec.Bootstrap.ConfigRef.Name kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name] = kubeadmconfigTemplateName } else { kubeadmconfigTemplateName = common.KubeadmConfigTemplateName(clusterName, workerNodeGroupConfiguration.Name, p.templateBuilder.now) kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name] = kubeadmconfigTemplateName } if !needsNewWorkloadTemplate { mdName := machineDeploymentName(newClusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name) md, err := p.kubectlClient.GetMachineDeployment(ctx, mdName, executables.WithCluster(bootstrapCluster), executables.WithNamespace(constants.EksaSystemNamespace)) if err != nil { return nil, nil, err } workloadTemplateName = md.Spec.Template.Spec.InfrastructureRef.Name workloadTemplateNames[workerNodeGroupConfiguration.Name] = workloadTemplateName } else { workloadTemplateName = common.WorkerMachineTemplateName(clusterName, workerNodeGroupConfiguration.Name, p.templateBuilder.now) workloadTemplateNames[workerNodeGroupConfiguration.Name] = workloadTemplateName } p.templateBuilder.workerNodeGroupMachineSpecs[workerNodeGroupConfiguration.MachineGroupRef.Name] = p.machineConfigs[workerNodeGroupConfiguration.MachineGroupRef.Name].Spec } cpOpt := func(values map[string]interface{}) { values["controlPlaneTemplateName"] = controlPlaneTemplateName values["etcdTemplateName"] = etcdTemplateName } controlPlaneSpec, err = p.templateBuilder.GenerateCAPISpecControlPlane(newClusterSpec, cpOpt) if err != nil { return nil, nil, err } workersSpec, err = p.templateBuilder.GenerateCAPISpecWorkers(newClusterSpec, workloadTemplateNames, kubeadmconfigTemplateNames) if err != nil { return nil, nil, err } return controlPlaneSpec, workersSpec, nil } func (p *Provider) GenerateStorageClass() []byte { // TODO(nutanix): figure out if we need something else here return nil } func (p *Provider) GenerateMHC(_ *cluster.Spec) ([]byte, error) { data := map[string]string{ "clusterName": p.clusterConfig.Name, "eksaSystemNamespace": constants.EksaSystemNamespace, } mhc, err := templater.Execute(string(mhcTemplate), data) if err != nil { return nil, err } return mhc, nil } func (p *Provider) UpdateKubeConfig(content *[]byte, clusterName string) error { // TODO(nutanix): figure out if we need something else here return nil } func (p *Provider) Version(clusterSpec *cluster.Spec) string { return clusterSpec.VersionsBundle.Nutanix.Version } func (p *Provider) EnvMap(_ *cluster.Spec) (map[string]string, error) { // TODO(nutanix): determine if any env vars are needed and add them to requiredEnvs envMap := make(map[string]string) for _, key := range requiredEnvs { if env, ok := os.LookupEnv(key); ok && len(env) > 0 { envMap[key] = env } else { return nil, fmt.Errorf("required env not set %s", key) } } return envMap, nil } func (p *Provider) GetDeployments() map[string][]string { return map[string][]string{ "capx-system": {"capx-controller-manager"}, } } func (p *Provider) GetInfrastructureBundle(clusterSpec *cluster.Spec) *types.InfrastructureBundle { bundle := clusterSpec.VersionsBundle manifests := []releasev1alpha1.Manifest{ bundle.Nutanix.Components, bundle.Nutanix.Metadata, bundle.Nutanix.ClusterTemplate, } folderName := fmt.Sprintf("infrastructure-nutanix/%s/", p.Version(clusterSpec)) infraBundle := types.InfrastructureBundle{ FolderName: folderName, Manifests: manifests, } return &infraBundle } func (p *Provider) DatacenterConfig(_ *cluster.Spec) providers.DatacenterConfig { return p.datacenterConfig } func (p *Provider) MachineConfigs(_ *cluster.Spec) []providers.MachineConfig { configs := make(map[string]providers.MachineConfig, len(p.machineConfigs)) controlPlaneMachineName := p.clusterConfig.Spec.ControlPlaneConfiguration.MachineGroupRef.Name p.machineConfigs[controlPlaneMachineName].Annotations = map[string]string{p.clusterConfig.ControlPlaneAnnotation(): "true"} if p.clusterConfig.IsManaged() { p.machineConfigs[controlPlaneMachineName].SetManagedBy(p.clusterConfig.ManagedBy()) } configs[controlPlaneMachineName] = p.machineConfigs[controlPlaneMachineName] if p.clusterConfig.Spec.ExternalEtcdConfiguration != nil { etcdMachineName := p.clusterConfig.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name p.machineConfigs[etcdMachineName].Annotations = map[string]string{p.clusterConfig.EtcdAnnotation(): "true"} if etcdMachineName != controlPlaneMachineName { configs[etcdMachineName] = p.machineConfigs[etcdMachineName] if p.clusterConfig.IsManaged() { p.machineConfigs[etcdMachineName].SetManagedBy(p.clusterConfig.ManagedBy()) } } } for _, workerNodeGroupConfiguration := range p.clusterConfig.Spec.WorkerNodeGroupConfigurations { workerMachineName := workerNodeGroupConfiguration.MachineGroupRef.Name if _, ok := configs[workerMachineName]; !ok { configs[workerMachineName] = p.machineConfigs[workerMachineName] if p.clusterConfig.IsManaged() { p.machineConfigs[workerMachineName].SetManagedBy(p.clusterConfig.ManagedBy()) } } } return configsMapToSlice(configs) } func configsMapToSlice(c map[string]providers.MachineConfig) []providers.MachineConfig { configs := make([]providers.MachineConfig, 0, len(c)) for _, config := range c { configs = append(configs, config) } return configs } func (p *Provider) ValidateNewSpec(_ context.Context, _ *types.Cluster, _ *cluster.Spec) error { // TODO(nutanix): figure out if we need something else here return nil } func (p *Provider) ChangeDiff(currentSpec, newSpec *cluster.Spec) *types.ComponentChangeDiff { if currentSpec.VersionsBundle.Nutanix.Version == newSpec.VersionsBundle.Nutanix.Version { return nil } return &types.ComponentChangeDiff{ ComponentName: constants.NutanixProviderName, NewVersion: newSpec.VersionsBundle.Nutanix.Version, OldVersion: currentSpec.VersionsBundle.Nutanix.Version, } } func (p *Provider) RunPostControlPlaneUpgrade(ctx context.Context, oldClusterSpec *cluster.Spec, clusterSpec *cluster.Spec, workloadCluster *types.Cluster, managementCluster *types.Cluster) error { // TODO(nutanix): figure out if we need something else here return nil } func (p *Provider) UpgradeNeeded(ctx context.Context, newSpec, currentSpec *cluster.Spec, cluster *types.Cluster) (bool, error) { cc := currentSpec.Cluster existingVdc, err := p.kubectlClient.GetEksaNutanixDatacenterConfig(ctx, cc.Spec.DatacenterRef.Name, cluster.KubeconfigFile, newSpec.Cluster.Namespace) if err != nil { return false, err } if !reflect.DeepEqual(existingVdc.Spec, p.datacenterConfig.Spec) { logger.V(3).Info("New provider spec is different from the new spec") return true, nil } machineConfigsSpecChanged, err := p.machineConfigsSpecChanged(ctx, cc, cluster, newSpec) if err != nil { return false, err } return machineConfigsSpecChanged, nil } func (p *Provider) machineConfigsSpecChanged(ctx context.Context, cc *v1alpha1.Cluster, cluster *types.Cluster, newClusterSpec *cluster.Spec) (bool, error) { machineConfigMap := make(map[string]*v1alpha1.NutanixMachineConfig) for _, config := range p.MachineConfigs(nil) { mc := config.(*v1alpha1.NutanixMachineConfig) machineConfigMap[mc.Name] = mc } for _, oldMcRef := range cc.MachineConfigRefs() { existingVmc, err := p.kubectlClient.GetEksaNutanixMachineConfig(ctx, oldMcRef.Name, cluster.KubeconfigFile, newClusterSpec.Cluster.Namespace) if err != nil { return false, err } csmc, ok := machineConfigMap[oldMcRef.Name] if !ok { logger.V(3).Info(fmt.Sprintf("Old machine config spec %s not found in the existing spec", oldMcRef.Name)) return true, nil } if !reflect.DeepEqual(existingVmc.Spec, csmc.Spec) { logger.V(3).Info(fmt.Sprintf("New machine config spec %s is different from the existing spec", oldMcRef.Name)) return true, nil } } return false, nil } func (p *Provider) RunPostControlPlaneCreation(ctx context.Context, clusterSpec *cluster.Spec, cluster *types.Cluster) error { // TODO(nutanix): figure out if we need something else here return nil } func (p *Provider) MachineDeploymentsToDelete(workloadCluster *types.Cluster, currentSpec, newSpec *cluster.Spec) []string { nodeGroupsToDelete := cluster.NodeGroupsToDelete(currentSpec, newSpec) machineDeployments := make([]string, 0, len(nodeGroupsToDelete)) for _, nodeGroup := range nodeGroupsToDelete { mdName := machineDeploymentName(workloadCluster.Name, nodeGroup.Name) machineDeployments = append(machineDeployments, mdName) } return machineDeployments } func (p *Provider) InstallCustomProviderComponents(ctx context.Context, kubeconfigFile string) error { return nil } func (p *Provider) PreCAPIInstallOnBootstrap(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error { return nil } func (p *Provider) PostMoveManagementToBootstrap(ctx context.Context, bootstrapCluster *types.Cluster) error { // TODO(nutanix): figure out if we need something else here return nil } // PreCoreComponentsUpgrade staisfies the Provider interface. func (p *Provider) PreCoreComponentsUpgrade( ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, ) error { return nil }
707
eks-anywhere
aws
Go
package nutanix import ( "bytes" "context" _ "embed" "errors" "fmt" "net/http" "os" "testing" "time" "github.com/golang/mock/gomock" "github.com/nutanix-cloud-native/prism-go-client/utils" v3 "github.com/nutanix-cloud-native/prism-go-client/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/api/resource" "sigs.k8s.io/yaml" "github.com/aws/eks-anywhere/internal/test" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/crypto" mockCrypto "github.com/aws/eks-anywhere/pkg/crypto/mocks" "github.com/aws/eks-anywhere/pkg/executables" mockexecutables "github.com/aws/eks-anywhere/pkg/executables/mocks" "github.com/aws/eks-anywhere/pkg/filewriter" filewritermocks "github.com/aws/eks-anywhere/pkg/filewriter/mocks" mocknutanix "github.com/aws/eks-anywhere/pkg/providers/nutanix/mocks" "github.com/aws/eks-anywhere/pkg/types" ) //go:embed testdata/eksa-cluster.json var nutanixClusterConfigSpecJSON string //go:embed testdata/datacenterConfig.json var nutanixDatacenterConfigSpecJSON string //go:embed testdata/machineConfig.json var nutanixMachineConfigSpecJSON string //go:embed testdata/machineDeployment.json var nutanixMachineDeploymentSpecJSON string func thenErrorExpected(t *testing.T, expected string, err error) { if err == nil { t.Fatalf("Expected=<%s> actual=<nil>", expected) } actual := err.Error() if expected != actual { t.Fatalf("Expected=<%s> actual=<%s>", expected, actual) } } func testDefaultNutanixProvider(t *testing.T) *Provider { ctrl := gomock.NewController(t) executable := mockexecutables.NewMockExecutable(ctrl) kubectl := executables.NewKubectl(executable) mockClient := mocknutanix.NewMockClient(ctrl) mockCertValidator := mockCrypto.NewMockTlsValidator(ctrl) mockTransport := mocknutanix.NewMockRoundTripper(ctrl) mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes() mockHTTPClient := &http.Client{Transport: mockTransport} mockWriter := filewritermocks.NewMockFileWriter(ctrl) provider := testNutanixProvider(t, mockClient, kubectl, mockCertValidator, mockHTTPClient, mockWriter) return provider } func testNutanixProvider(t *testing.T, nutanixClient Client, kubectl *executables.Kubectl, certValidator crypto.TlsValidator, httpClient *http.Client, writer filewriter.FileWriter) *Provider { clusterConf := &anywherev1.Cluster{} err := yaml.Unmarshal([]byte(nutanixClusterConfigSpec), clusterConf) require.NoError(t, err) dcConf := &anywherev1.NutanixDatacenterConfig{} err = yaml.Unmarshal([]byte(nutanixDatacenterConfigSpec), dcConf) require.NoError(t, err) machineConf := &anywherev1.NutanixMachineConfig{} err = yaml.Unmarshal([]byte(nutanixMachineConfigSpec), machineConf) require.NoError(t, err) workerConfs := map[string]*anywherev1.NutanixMachineConfig{ "eksa-unit-test": machineConf, } t.Setenv(constants.EksaNutanixUsernameKey, "admin") t.Setenv(constants.EksaNutanixPasswordKey, "password") clientCache := &ClientCache{ clients: make(map[string]Client), } clientCache.clients[dcConf.Name] = nutanixClient ctrl := gomock.NewController(t) mockIPValidator := mocknutanix.NewMockIPValidator(ctrl) mockIPValidator.EXPECT().ValidateControlPlaneIPUniqueness(gomock.Any()).Return(nil).AnyTimes() provider := NewProvider(dcConf, workerConfs, clusterConf, kubectl, writer, clientCache, mockIPValidator, certValidator, httpClient, time.Now, false) require.NotNil(t, provider) return provider } func testNutanixProviderWithClusterSpec(t *testing.T, nutanixClient Client, kubectl *executables.Kubectl, certValidator crypto.TlsValidator, httpClient *http.Client, writer filewriter.FileWriter, clusterSpec *cluster.Spec) *Provider { t.Setenv(constants.EksaNutanixUsernameKey, "admin") t.Setenv(constants.EksaNutanixPasswordKey, "password") clientCache := &ClientCache{ clients: make(map[string]Client), } clientCache.clients[clusterSpec.NutanixDatacenter.Name] = nutanixClient ctrl := gomock.NewController(t) mockIPValidator := mocknutanix.NewMockIPValidator(ctrl) mockIPValidator.EXPECT().ValidateControlPlaneIPUniqueness(gomock.Any()).Return(nil).AnyTimes() provider := NewProvider(clusterSpec.NutanixDatacenter, clusterSpec.NutanixMachineConfigs, clusterSpec.Cluster, kubectl, writer, clientCache, mockIPValidator, certValidator, httpClient, time.Now, false) require.NotNil(t, provider) return provider } func TestNutanixProviderBootstrapClusterOpts(t *testing.T) { provider := testDefaultNutanixProvider(t) clusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") opts, err := provider.BootstrapClusterOpts(clusterSpec) assert.NoError(t, err) assert.Nil(t, opts) } func TestNutanixProviderBootstrapSetup(t *testing.T) { provider := testDefaultNutanixProvider(t) err := provider.BootstrapSetup(context.Background(), provider.clusterConfig, &types.Cluster{Name: "eksa-unit-test"}) assert.NoError(t, err) } func TestNutanixProviderPostBootstrapSetup(t *testing.T) { provider := testDefaultNutanixProvider(t) err := provider.PostBootstrapSetup(context.Background(), provider.clusterConfig, &types.Cluster{Name: "eksa-unit-test"}) assert.NoError(t, err) } func TestNutanixProviderPostBootstrapDeleteForUpgrade(t *testing.T) { provider := testDefaultNutanixProvider(t) err := provider.PostBootstrapDeleteForUpgrade(context.Background()) assert.NoError(t, err) } func TestNutanixProviderPostBootstrapSetupUpgrade(t *testing.T) { provider := testDefaultNutanixProvider(t) err := provider.PostBootstrapSetupUpgrade(context.Background(), provider.clusterConfig, &types.Cluster{Name: "eksa-unit-test"}) assert.NoError(t, err) } func TestNutanixProviderPostWorkloadInit(t *testing.T) { provider := testDefaultNutanixProvider(t) clusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") err := provider.PostWorkloadInit(context.Background(), &types.Cluster{Name: "eksa-unit-test"}, clusterSpec) assert.NoError(t, err) } func TestNutanixProviderName(t *testing.T) { provider := testDefaultNutanixProvider(t) name := provider.Name() assert.Equal(t, "nutanix", name) } func TestNutanixProviderDatacenterResourceType(t *testing.T) { provider := testDefaultNutanixProvider(t) resource := provider.DatacenterResourceType() assert.Equal(t, "nutanixdatacenterconfigs.anywhere.eks.amazonaws.com", resource) } func TestNutanixProviderMachineResourceType(t *testing.T) { provider := testDefaultNutanixProvider(t) resource := provider.MachineResourceType() assert.Equal(t, "nutanixmachineconfigs.anywhere.eks.amazonaws.com", resource) } func TestNutanixProviderDeleteResources(t *testing.T) { ctrl := gomock.NewController(t) executable := mockexecutables.NewMockExecutable(ctrl) executable.EXPECT().Execute(gomock.Any(), "delete", []string{"nutanixdatacenterconfigs.anywhere.eks.amazonaws.com", "eksa-unit-test", "--kubeconfig", "testdata/kubeconfig.yaml", "--namespace", "default", "--ignore-not-found=true"}, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(bytes.Buffer{}, nil) executable.EXPECT().Execute(gomock.Any(), "delete", []string{"nutanixmachineconfigs.anywhere.eks.amazonaws.com", "eksa-unit-test", "--kubeconfig", "testdata/kubeconfig.yaml", "--namespace", "default", "--ignore-not-found=true"}, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(bytes.Buffer{}, nil) kubectl := executables.NewKubectl(executable) mockClient := mocknutanix.NewMockClient(ctrl) mockCertValidator := mockCrypto.NewMockTlsValidator(ctrl) mockTransport := mocknutanix.NewMockRoundTripper(ctrl) mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes() mockHTTPClient := &http.Client{Transport: mockTransport} mockWriter := filewritermocks.NewMockFileWriter(ctrl) provider := testNutanixProvider(t, mockClient, kubectl, mockCertValidator, mockHTTPClient, mockWriter) clusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") clusterSpec.ManagementCluster = &types.Cluster{Name: "eksa-unit-test", KubeconfigFile: "testdata/kubeconfig.yaml"} err := provider.DeleteResources(context.Background(), clusterSpec) assert.NoError(t, err) } func TestNutanixProviderPostClusterDeleteValidate(t *testing.T) { provider := testDefaultNutanixProvider(t) err := provider.PostClusterDeleteValidate(context.Background(), &types.Cluster{Name: "eksa-unit-test"}) assert.NoError(t, err) } func TestNutanixProviderSetupAndValidateCreate(t *testing.T) { ctrl := gomock.NewController(t) tests := []struct { name string clusterConfFile string expectErr bool expectErrStr string }{ { name: "valid cluster config", clusterConfFile: "testdata/eksa-cluster.yaml", expectErr: false, }, { name: "valid cluster config with trust bundle", clusterConfFile: "testdata/cluster_nutanix_with_trust_bundle.yaml", expectErr: false, }, { name: "valid cluster config with invalid trust bundle", clusterConfFile: "testdata/cluster_nutanix_with_invalid_trust_bundle.yaml", expectErr: true, expectErrStr: "failed to validate cluster spec: invalid cert", }, { name: "valid cluster config with invalid pe cluster name - same as pc name", clusterConfFile: "testdata/eksa-cluster-invalid-pe-cluster-pc.yaml", expectErr: true, expectErrStr: "failed to validate cluster spec: failed to validate machine config: failed to find cluster with name \"prism-central\": failed to find cluster by name \"prism-central\": <nil>", }, { name: "valid cluster config with invalid pe cluster name - non existent pe name", clusterConfFile: "testdata/eksa-cluster-invalid-pe-cluster-random-name.yaml", expectErr: true, expectErrStr: "failed to validate cluster spec: failed to validate machine config: failed to find cluster with name \"non-existent-cluster\": failed to find cluster by name \"non-existent-cluster\": <nil>", }, { name: "cluster config with unsupported upgrade strategy configuration for cp", clusterConfFile: "testdata/cluster_nutanix_with_upgrade_strategy_cp.yaml", expectErr: true, expectErrStr: "failed setup and validations: Upgrade rollout strategy customization is not supported for nutanix provider", }, { name: "cluster config with unsupported upgrade strategy configuration for md", clusterConfFile: "testdata/cluster_nutanix_with_upgrade_strategy_md.yaml", expectErr: true, expectErrStr: "failed setup and validations: Upgrade rollout strategy customization is not supported for nutanix provider", }, } executable := mockexecutables.NewMockExecutable(ctrl) kubectl := executables.NewKubectl(executable) mockClient := mocknutanix.NewMockClient(ctrl) mockClient.EXPECT().GetCurrentLoggedInUser(gomock.Any()).Return(&v3.UserIntentResponse{}, nil).AnyTimes() clusters := &v3.ClusterListIntentResponse{ Entities: []*v3.ClusterIntentResponse{ { Metadata: &v3.Metadata{ UUID: utils.StringPtr("a15f6966-bfc7-4d1e-8575-224096fc1cda"), }, Spec: &v3.Cluster{ Name: utils.StringPtr("prism-cluster"), }, Status: &v3.ClusterDefStatus{ Resources: &v3.ClusterObj{ Config: &v3.ClusterConfig{ ServiceList: []*string{utils.StringPtr("AOS")}, }, }, }, }, { Metadata: &v3.Metadata{ UUID: utils.StringPtr("4692a614-85e7-4abc-9bf3-8fb0f9d790bc"), }, Spec: &v3.Cluster{ Name: utils.StringPtr("prism-central"), }, Status: &v3.ClusterDefStatus{ Resources: &v3.ClusterObj{ Config: &v3.ClusterConfig{ ServiceList: []*string{utils.StringPtr("PRISM_CENTRAL")}, }, }, }, }, { Metadata: &v3.Metadata{ UUID: utils.StringPtr("a15f6966-bfc7-4d1e-8575-224096fc1abc"), }, Spec: &v3.Cluster{ Name: utils.StringPtr("prism-cluster-2"), }, Status: &v3.ClusterDefStatus{ Resources: &v3.ClusterObj{ Config: &v3.ClusterConfig{ ServiceList: []*string{utils.StringPtr("AOS")}, }, }, }, }, { Metadata: &v3.Metadata{ UUID: utils.StringPtr("a15f6966-bfc7-4d1e-8575-224096fc1xyz"), }, Spec: &v3.Cluster{ Name: utils.StringPtr("prism-cluster-3"), }, Status: &v3.ClusterDefStatus{ Resources: &v3.ClusterObj{ Config: &v3.ClusterConfig{ ServiceList: []*string{utils.StringPtr("AOS")}, }, }, }, }, }, } mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(clusters, nil).AnyTimes() subnets := &v3.SubnetListIntentResponse{ Entities: []*v3.SubnetIntentResponse{ { Metadata: &v3.Metadata{ UUID: utils.StringPtr("a15f6966-bfc7-4d1e-8575-224096fc1cdb"), }, Spec: &v3.Subnet{ Name: utils.StringPtr("prism-subnet"), }, }, }, } mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).Return(subnets, nil).AnyTimes() images := &v3.ImageListIntentResponse{ Entities: []*v3.ImageIntentResponse{ { Metadata: &v3.Metadata{ UUID: utils.StringPtr("a15f6966-bfc7-4d1e-8575-224096fc1cdc"), }, Spec: &v3.Image{ Name: utils.StringPtr("prism-image"), }, }, }, } mockClient.EXPECT().ListImage(gomock.Any(), gomock.Any()).Return(images, nil).AnyTimes() mockCertValidator := mockCrypto.NewMockTlsValidator(ctrl) mockCertValidator.EXPECT().ValidateCert(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) mockCertValidator.EXPECT().ValidateCert(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("invalid cert")) mockTransport := mocknutanix.NewMockRoundTripper(ctrl) mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes() mockHTTPClient := &http.Client{Transport: mockTransport} mockWriter := filewritermocks.NewMockFileWriter(ctrl) provider := testNutanixProvider(t, mockClient, kubectl, mockCertValidator, mockHTTPClient, mockWriter) assert.NotNil(t, provider) for _, tt := range tests { clusterSpec := test.NewFullClusterSpec(t, tt.clusterConfFile) err := provider.SetupAndValidateCreateCluster(context.Background(), clusterSpec) if tt.expectErr { assert.Error(t, err, tt.name) thenErrorExpected(t, tt.expectErrStr, err) } else { assert.NoError(t, err, tt.name) } } sshKeyTests := []struct { name string clusterConfFile string expectErr bool performTest func(t *testing.T, provider *Provider, clusterSpec *cluster.Spec) error }{ { name: "validate is ssh key gets generated for cp", clusterConfFile: "testdata/eksa-cluster-multiple-machineconfigs.yaml", expectErr: false, performTest: func(t *testing.T, provider *Provider, clusterSpec *cluster.Spec) error { // Set the SSH Authorized Key to empty string controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name clusterSpec.NutanixMachineConfigs[controlPlaneMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] = "" err := provider.SetupAndValidateCreateCluster(context.Background(), clusterSpec) if err != nil { return fmt.Errorf("provider.SetupAndValidateCreateCluster() err = %v, want err = nil", err) } // Expect the SSH Authorized Key to be not empty if clusterSpec.NutanixMachineConfigs[controlPlaneMachineConfigName].Spec.Users[0].SshAuthorizedKeys[0] == "" { return fmt.Errorf("sshAuthorizedKey has not changed for control plane machine") } return nil }, }, } for _, tc := range sshKeyTests { t.Run(tc.name, func(t *testing.T) { clusterSpec := test.NewFullClusterSpec(t, tc.clusterConfFile) // to avoid "because: there are no expected calls of the method "Write" for that receiver" // using test.NewWriter(t) instead of filewritermocks.NewMockFileWriter(ctrl) _, mockWriter := test.NewWriter(t) provider := testNutanixProviderWithClusterSpec(t, mockClient, kubectl, mockCertValidator, mockHTTPClient, mockWriter, clusterSpec) assert.NotNil(t, provider) err := tc.performTest(t, provider, clusterSpec) if tc.expectErr { if err == nil { t.Fatalf("Test failed. %s", err) } } else { if err != nil { t.Fatalf("Test failed. %s", err) } } }) } } func TestNutanixProviderSetupAndValidateDeleteCluster(t *testing.T) { provider := testDefaultNutanixProvider(t) tests := []struct { name string clusterConfFile string expectErr bool expectErrStr string }{ { name: "valid cluster config", clusterConfFile: "testdata/eksa-cluster.yaml", expectErr: false, }, { name: "cluster config with unsupported upgrade strategy configuration for cp", clusterConfFile: "testdata/cluster_nutanix_with_upgrade_strategy_cp.yaml", expectErr: true, expectErrStr: "failed setup and validations: Upgrade rollout strategy customization is not supported for nutanix provider", }, { name: "cluster config with unsupported upgrade strategy configuration for md", clusterConfFile: "testdata/cluster_nutanix_with_upgrade_strategy_md.yaml", expectErr: true, expectErrStr: "failed setup and validations: Upgrade rollout strategy customization is not supported for nutanix provider", }, } for _, tt := range tests { clusterSpec := test.NewFullClusterSpec(t, tt.clusterConfFile) err := provider.SetupAndValidateDeleteCluster(context.Background(), &types.Cluster{Name: "eksa-unit-test"}, clusterSpec) if tt.expectErr { assert.Error(t, err, tt.name) thenErrorExpected(t, tt.expectErrStr, err) } else { assert.NoError(t, err, tt.name) } } } func TestNutanixProviderSetupAndValidateUpgradeCluster(t *testing.T) { provider := testDefaultNutanixProvider(t) tests := []struct { name string clusterConfFile string expectErr bool expectErrStr string }{ { name: "valid cluster config", clusterConfFile: "testdata/eksa-cluster.yaml", expectErr: false, }, { name: "cluster config with unsupported upgrade strategy configuration for cp", clusterConfFile: "testdata/cluster_nutanix_with_upgrade_strategy_cp.yaml", expectErr: true, expectErrStr: "failed setup and validations: Upgrade rollout strategy customization is not supported for nutanix provider", }, { name: "cluster config with unsupported upgrade strategy configuration for md", clusterConfFile: "testdata/cluster_nutanix_with_upgrade_strategy_md.yaml", expectErr: true, expectErrStr: "failed setup and validations: Upgrade rollout strategy customization is not supported for nutanix provider", }, } for _, tt := range tests { clusterSpec := test.NewFullClusterSpec(t, tt.clusterConfFile) err := provider.SetupAndValidateUpgradeCluster(context.Background(), &types.Cluster{Name: "eksa-unit-test"}, clusterSpec, clusterSpec) if tt.expectErr { assert.Error(t, err, tt.name) thenErrorExpected(t, tt.expectErrStr, err) } else { assert.NoError(t, err, tt.name) } } } func TestNutanixProviderUpdateSecrets(t *testing.T) { ctrl := gomock.NewController(t) executable := mockexecutables.NewMockExecutable(ctrl) executable.EXPECT().ExecuteWithStdin(gomock.Any(), gomock.Any(), gomock.Any()).Return(bytes.Buffer{}, nil).Times(2) kubectl := executables.NewKubectl(executable) mockClient := mocknutanix.NewMockClient(ctrl) mockCertValidator := mockCrypto.NewMockTlsValidator(ctrl) mockTransport := mocknutanix.NewMockRoundTripper(ctrl) mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes() mockHTTPClient := &http.Client{Transport: mockTransport} mockWriter := filewritermocks.NewMockFileWriter(ctrl) provider := testNutanixProvider(t, mockClient, kubectl, mockCertValidator, mockHTTPClient, mockWriter) cluster := &types.Cluster{Name: "eksa-unit-test"} clusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") err := provider.UpdateSecrets(context.Background(), cluster, clusterSpec) assert.NoError(t, err) storedMarshal := jsonMarshal jsonMarshal = fakemarshal err = provider.UpdateSecrets(context.Background(), cluster, clusterSpec) assert.ErrorContains(t, err, "marshalling failed") restoremarshal(storedMarshal) clusterSpec.NutanixDatacenter.Spec.CredentialRef.Name = "capx-eksa-unit-test" err = provider.UpdateSecrets(context.Background(), cluster, clusterSpec) assert.ErrorContains(t, err, "NutanixDatacenterConfig CredentialRef name cannot be the same as the NutanixCluster CredentialRef name") } func TestNutanixProviderGenerateCAPISpecForCreate(t *testing.T) { ctrl := gomock.NewController(t) executable := mockexecutables.NewMockExecutable(ctrl) executable.EXPECT().ExecuteWithStdin(gomock.Any(), gomock.Any(), gomock.Any()).Return(bytes.Buffer{}, nil).Times(2) kubectl := executables.NewKubectl(executable) mockClient := mocknutanix.NewMockClient(ctrl) mockCertValidator := mockCrypto.NewMockTlsValidator(ctrl) mockTransport := mocknutanix.NewMockRoundTripper(ctrl) mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes() mockHTTPClient := &http.Client{Transport: mockTransport} mockWriter := filewritermocks.NewMockFileWriter(ctrl) provider := testNutanixProvider(t, mockClient, kubectl, mockCertValidator, mockHTTPClient, mockWriter) cluster := &types.Cluster{Name: "eksa-unit-test"} clusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") cpSpec, workerSpec, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec) assert.NoError(t, err) assert.NotNil(t, cpSpec) assert.NotNil(t, workerSpec) } func TestNutanixProviderGenerateCAPISpecForCreate_Error(t *testing.T) { ctrl := gomock.NewController(t) executable := mockexecutables.NewMockExecutable(ctrl) executable.EXPECT().ExecuteWithStdin(gomock.Any(), gomock.Any(), gomock.Any()).Return(bytes.Buffer{}, errors.New("test error")) kubectl := executables.NewKubectl(executable) mockClient := mocknutanix.NewMockClient(ctrl) mockCertValidator := mockCrypto.NewMockTlsValidator(ctrl) mockTransport := mocknutanix.NewMockRoundTripper(ctrl) mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes() mockHTTPClient := &http.Client{Transport: mockTransport} mockWriter := filewritermocks.NewMockFileWriter(ctrl) provider := testNutanixProvider(t, mockClient, kubectl, mockCertValidator, mockHTTPClient, mockWriter) cluster := &types.Cluster{Name: "eksa-unit-test"} clusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") cpSpec, workerSpec, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec) assert.EqualError(t, err, "updating Nutanix credentials: loading secrets object: executing apply: test error") assert.Nil(t, cpSpec) assert.Nil(t, workerSpec) } func TestNutanixProviderGenerateCAPISpecForUpgrade(t *testing.T) { ctrl := gomock.NewController(t) executable := mockexecutables.NewMockExecutable(ctrl) executable.EXPECT().ExecuteWithStdin(gomock.Any(), gomock.Any(), gomock.Any()).Return(bytes.Buffer{}, nil).Times(2) executable.EXPECT().Execute(gomock.Any(), "get", "clusters.anywhere.eks.amazonaws.com", "-A", "-o", "jsonpath={.items[0]}", "--kubeconfig", "testdata/kubeconfig.yaml", "--field-selector=metadata.name=eksa-unit-test").Return(*bytes.NewBufferString(nutanixClusterConfigSpecJSON), nil) executable.EXPECT().Execute(gomock.Any(), "get", "--ignore-not-found", "-o", "json", "--kubeconfig", "testdata/kubeconfig.yaml", "nutanixmachineconfigs.anywhere.eks.amazonaws.com", "--namespace", "default", "eksa-unit-test").Return(*bytes.NewBufferString(nutanixMachineConfigSpecJSON), nil).AnyTimes() executable.EXPECT().Execute(gomock.Any(), "get", "--ignore-not-found", "-o", "json", "--kubeconfig", "testdata/kubeconfig.yaml", "nutanixdatacenterconfigs.anywhere.eks.amazonaws.com", "--namespace", "default", "eksa-unit-test").Return(*bytes.NewBufferString(nutanixDatacenterConfigSpecJSON), nil) executable.EXPECT().Execute(gomock.Any(), "get", "machinedeployments.cluster.x-k8s.io", "eksa-unit-test-eksa-unit-test", "-o", "json", "--kubeconfig", "testdata/kubeconfig.yaml", "--namespace", "eksa-system").Return(*bytes.NewBufferString(nutanixMachineDeploymentSpecJSON), nil).Times(2) executable.EXPECT().Execute(gomock.Any(), "get", "kubeadmcontrolplanes.controlplane.cluster.x-k8s.io", "eksa-unit-test", "-o", "json", "--kubeconfig", "testdata/kubeconfig.yaml", "--namespace", "eksa-system").Return(*bytes.NewBufferString(nutanixMachineDeploymentSpecJSON), nil) kubectl := executables.NewKubectl(executable) mockClient := mocknutanix.NewMockClient(ctrl) mockCertValidator := mockCrypto.NewMockTlsValidator(ctrl) mockTransport := mocknutanix.NewMockRoundTripper(ctrl) mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes() mockHTTPClient := &http.Client{Transport: mockTransport} mockWriter := filewritermocks.NewMockFileWriter(ctrl) provider := testNutanixProvider(t, mockClient, kubectl, mockCertValidator, mockHTTPClient, mockWriter) cluster := &types.Cluster{Name: "eksa-unit-test", KubeconfigFile: "testdata/kubeconfig.yaml"} clusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") cpSpec, workerSpec, err := provider.GenerateCAPISpecForUpgrade(context.Background(), cluster, cluster, clusterSpec, clusterSpec) assert.NoError(t, err) assert.NotEmpty(t, cpSpec) assert.NotEmpty(t, workerSpec) } func TestNutanixProviderGenerateCAPISpecForUpgrade_Error(t *testing.T) { ctrl := gomock.NewController(t) executable := mockexecutables.NewMockExecutable(ctrl) executable.EXPECT().ExecuteWithStdin(gomock.Any(), gomock.Any(), gomock.Any()).Return(bytes.Buffer{}, errors.New("test error")) kubectl := executables.NewKubectl(executable) mockClient := mocknutanix.NewMockClient(ctrl) mockCertValidator := mockCrypto.NewMockTlsValidator(ctrl) mockTransport := mocknutanix.NewMockRoundTripper(ctrl) mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes() mockHTTPClient := &http.Client{Transport: mockTransport} mockWriter := filewritermocks.NewMockFileWriter(ctrl) provider := testNutanixProvider(t, mockClient, kubectl, mockCertValidator, mockHTTPClient, mockWriter) cluster := &types.Cluster{Name: "eksa-unit-test", KubeconfigFile: "testdata/kubeconfig.yaml"} clusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") cpSpec, workerSpec, err := provider.GenerateCAPISpecForUpgrade(context.Background(), cluster, cluster, clusterSpec, clusterSpec) assert.EqualError(t, err, "updating Nutanix credentials: loading secrets object: executing apply: test error") assert.Nil(t, cpSpec) assert.Nil(t, workerSpec) } func TestNeedsNewControlPlaneTemplate(t *testing.T) { tests := []struct { name string newClusterSpec func(spec cluster.Spec) cluster.Spec newMachineConfig func(anywherev1.NutanixMachineConfig) anywherev1.NutanixMachineConfig expectedResult bool }{ { name: "kubernetes version changed", newClusterSpec: func(spec cluster.Spec) cluster.Spec { s := spec.DeepCopy() s.Cluster.Spec.KubernetesVersion = "1.21.2" return *s }, newMachineConfig: func(spec anywherev1.NutanixMachineConfig) anywherev1.NutanixMachineConfig { return spec }, expectedResult: true, }, { name: "bundle spec number changed", newClusterSpec: func(spec cluster.Spec) cluster.Spec { s := spec.DeepCopy() s.Bundles.Spec.Number = 42 return *s }, newMachineConfig: func(spec anywherev1.NutanixMachineConfig) anywherev1.NutanixMachineConfig { return spec }, expectedResult: true, }, } for _, tt := range tests { oldClusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") newClusterSpec := tt.newClusterSpec(*oldClusterSpec) oldMachineConf := &anywherev1.NutanixMachineConfig{} err := yaml.Unmarshal([]byte(nutanixMachineConfigSpec), oldMachineConf) require.NoError(t, err) newMachineConf := tt.newMachineConfig(*oldMachineConf) assert.Equal(t, tt.expectedResult, NeedsNewControlPlaneTemplate(oldClusterSpec, &newClusterSpec, oldMachineConf, &newMachineConf)) } } func TestNeedsNewWorkloadTemplate(t *testing.T) { tests := []struct { name string newClusterSpec func(spec cluster.Spec) cluster.Spec newMachineConfig func(anywherev1.NutanixMachineConfig) anywherev1.NutanixMachineConfig expectedResult bool }{ { name: "kubernetes version changed", newClusterSpec: func(spec cluster.Spec) cluster.Spec { s := spec.DeepCopy() s.Cluster.Spec.KubernetesVersion = "1.21.2" return *s }, newMachineConfig: func(spec anywherev1.NutanixMachineConfig) anywherev1.NutanixMachineConfig { return spec }, expectedResult: true, }, { name: "bundle spec number changed", newClusterSpec: func(spec cluster.Spec) cluster.Spec { s := spec.DeepCopy() s.Bundles.Spec.Number = 42 return *s }, newMachineConfig: func(spec anywherev1.NutanixMachineConfig) anywherev1.NutanixMachineConfig { return spec }, expectedResult: true, }, { name: "woker node config labels changed", newClusterSpec: func(spec cluster.Spec) cluster.Spec { s := spec.DeepCopy() s.Cluster.Spec.WorkerNodeGroupConfigurations[0].Labels = map[string]string{"foo": "bar"} return *s }, newMachineConfig: func(spec anywherev1.NutanixMachineConfig) anywherev1.NutanixMachineConfig { return spec }, expectedResult: true, }, } for _, tt := range tests { oldClusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") newClusterSpec := tt.newClusterSpec(*oldClusterSpec) oldMachineConf := &anywherev1.NutanixMachineConfig{} err := yaml.Unmarshal([]byte(nutanixMachineConfigSpec), oldMachineConf) require.NoError(t, err) newMachineConf := tt.newMachineConfig(*oldMachineConf) assert.Equal(t, tt.expectedResult, NeedsNewWorkloadTemplate(oldClusterSpec, &newClusterSpec, oldMachineConf, &newMachineConf)) } } func TestAnyImmutableFieldChanged(t *testing.T) { tests := []struct { name string newMachineConfig func(anywherev1.NutanixMachineConfig) anywherev1.NutanixMachineConfig expectedResult bool }{ { name: "machine image changed", newMachineConfig: func(spec anywherev1.NutanixMachineConfig) anywherev1.NutanixMachineConfig { conf := spec.DeepCopy() conf.Spec.Image.Name = utils.StringPtr("new-image") return *conf }, expectedResult: true, }, { name: "machine image identifier type changed", newMachineConfig: func(spec anywherev1.NutanixMachineConfig) anywherev1.NutanixMachineConfig { conf := spec.DeepCopy() conf.Spec.Image.Type = anywherev1.NutanixIdentifierUUID conf.Spec.Image.Name = utils.StringPtr("49ab2c64-72a1-4637-9673-e2f13b1463cb") return *conf }, expectedResult: true, }, { name: "machine memory size changed", newMachineConfig: func(spec anywherev1.NutanixMachineConfig) anywherev1.NutanixMachineConfig { conf := spec.DeepCopy() conf.Spec.MemorySize = resource.MustParse("4Gi") return *conf }, expectedResult: true, }, { name: "machine system disk size changed", newMachineConfig: func(spec anywherev1.NutanixMachineConfig) anywherev1.NutanixMachineConfig { conf := spec.DeepCopy() conf.Spec.SystemDiskSize = resource.MustParse("20Gi") return *conf }, expectedResult: true, }, { name: "machine VCPU sockets changed", newMachineConfig: func(spec anywherev1.NutanixMachineConfig) anywherev1.NutanixMachineConfig { conf := spec.DeepCopy() conf.Spec.VCPUSockets = 2 return *conf }, expectedResult: true, }, { name: "machine vcpus per socket changed", newMachineConfig: func(spec anywherev1.NutanixMachineConfig) anywherev1.NutanixMachineConfig { conf := spec.DeepCopy() conf.Spec.VCPUsPerSocket = 2 return *conf }, expectedResult: true, }, { name: "machine cluster changed", newMachineConfig: func(spec anywherev1.NutanixMachineConfig) anywherev1.NutanixMachineConfig { conf := spec.DeepCopy() conf.Spec.Cluster.Name = utils.StringPtr("new-cluster") return *conf }, expectedResult: true, }, { name: "machine subnet changed", newMachineConfig: func(spec anywherev1.NutanixMachineConfig) anywherev1.NutanixMachineConfig { conf := spec.DeepCopy() conf.Spec.Subnet.Name = utils.StringPtr("new-subnet") return *conf }, expectedResult: true, }, { name: "machine OS Family changed", newMachineConfig: func(spec anywherev1.NutanixMachineConfig) anywherev1.NutanixMachineConfig { conf := spec.DeepCopy() conf.Spec.OSFamily = "new-os-family" return *conf }, expectedResult: true, }, } for _, tt := range tests { oldMachineConf := &anywherev1.NutanixMachineConfig{} err := yaml.Unmarshal([]byte(nutanixMachineConfigSpec), oldMachineConf) require.NoError(t, err) newMachineConf := tt.newMachineConfig(*oldMachineConf) assert.Equal(t, tt.expectedResult, AnyImmutableFieldChanged(oldMachineConf, &newMachineConf)) } } func TestNutanixProviderGenerateStorageClass(t *testing.T) { provider := testDefaultNutanixProvider(t) sc := provider.GenerateStorageClass() assert.Nil(t, sc) } func TestNutanixProviderGenerateMHC(t *testing.T) { provider := testDefaultNutanixProvider(t) clusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") mhc, err := provider.GenerateMHC(clusterSpec) assert.NoError(t, err) assert.NotNil(t, mhc) } func TestNutanixProviderUpdateKubeconfig(t *testing.T) { provider := testDefaultNutanixProvider(t) err := provider.UpdateKubeConfig(nil, "test") assert.NoError(t, err) } func TestNutanixProviderVersion(t *testing.T) { provider := testDefaultNutanixProvider(t) clusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") v := provider.Version(clusterSpec) assert.NotNil(t, v) } func TestNutanixProviderEnvMap(t *testing.T) { provider := testDefaultNutanixProvider(t) clusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") t.Run("required envs not set", func(t *testing.T) { os.Clearenv() envMap, err := provider.EnvMap(clusterSpec) assert.Error(t, err) assert.Nil(t, envMap) }) t.Run("required envs set", func(t *testing.T) { t.Setenv(constants.NutanixUsernameKey, "nutanix") t.Setenv(constants.NutanixPasswordKey, "nutanix") t.Setenv(nutanixEndpointKey, "prism.nutanix.com") envMap, err := provider.EnvMap(clusterSpec) assert.NoError(t, err) assert.NotNil(t, envMap) }) } func TestNutanixProviderGetDeployments(t *testing.T) { provider := testDefaultNutanixProvider(t) deps := provider.GetDeployments() assert.NotNil(t, deps) assert.Contains(t, deps, "capx-system") } func TestNutanixProviderGetInfrastructureBundle(t *testing.T) { provider := testDefaultNutanixProvider(t) clusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") bundle := provider.GetInfrastructureBundle(clusterSpec) assert.NotNil(t, bundle) } func TestNutanixProviderDatacenterConfig(t *testing.T) { provider := testDefaultNutanixProvider(t) clusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") dc := provider.DatacenterConfig(clusterSpec) assert.Equal(t, provider.datacenterConfig, dc) } func TestNutanixProviderMachineConfigs(t *testing.T) { provider := testDefaultNutanixProvider(t) clusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") confs := provider.MachineConfigs(clusterSpec) require.NotEmpty(t, confs) assert.Len(t, confs, 1) } func TestNutanixProviderValidateNewSpec(t *testing.T) { provider := testDefaultNutanixProvider(t) clusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") err := provider.ValidateNewSpec(context.Background(), &types.Cluster{Name: "eksa-unit-test"}, clusterSpec) assert.NoError(t, err) } func TestNutanixProviderChangeDiff(t *testing.T) { provider := testDefaultNutanixProvider(t) clusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") cd := provider.ChangeDiff(clusterSpec, clusterSpec) assert.Nil(t, cd) } func TestNutanixProviderChangeDiffWithChange(t *testing.T) { provider := testDefaultNutanixProvider(t) clusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") newClusterSpec := clusterSpec.DeepCopy() clusterSpec.VersionsBundle.Nutanix.Version = "v0.5.2" newClusterSpec.VersionsBundle.Nutanix.Version = "v1.0.0" want := &types.ComponentChangeDiff{ ComponentName: "nutanix", NewVersion: "v1.0.0", OldVersion: "v0.5.2", } cd := provider.ChangeDiff(clusterSpec, newClusterSpec) assert.Equal(t, cd, want) } func TestNutanixProviderRunPostControlPlaneUpgrade(t *testing.T) { provider := testDefaultNutanixProvider(t) clusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") cluster := &types.Cluster{Name: "eksa-unit-test"} err := provider.RunPostControlPlaneUpgrade(context.Background(), clusterSpec, clusterSpec, cluster, cluster) assert.NoError(t, err) } func TestNutanixProviderUpgradeNeeded(t *testing.T) { ctrl := gomock.NewController(t) executable := mockexecutables.NewMockExecutable(ctrl) executable.EXPECT().Execute(gomock.Any(), "get", []string{"--ignore-not-found", "-o", "json", "--kubeconfig", "testdata/kubeconfig.yaml", "nutanixdatacenterconfigs.anywhere.eks.amazonaws.com", "--namespace", "default", "eksa-unit-test"}, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(*bytes.NewBufferString(nutanixDatacenterConfigSpecJSON), nil) executable.EXPECT().Execute(gomock.Any(), "get", []string{"--ignore-not-found", "-o", "json", "--kubeconfig", "testdata/kubeconfig.yaml", "nutanixmachineconfigs.anywhere.eks.amazonaws.com", "--namespace", "default", "eksa-unit-test"}, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(*bytes.NewBufferString(nutanixMachineConfigSpecJSON), nil) kubectl := executables.NewKubectl(executable) mockClient := mocknutanix.NewMockClient(ctrl) mockCertValidator := mockCrypto.NewMockTlsValidator(ctrl) mockTransport := mocknutanix.NewMockRoundTripper(ctrl) mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes() mockHTTPClient := &http.Client{Transport: mockTransport} mockWriter := filewritermocks.NewMockFileWriter(ctrl) provider := testNutanixProvider(t, mockClient, kubectl, mockCertValidator, mockHTTPClient, mockWriter) clusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") cluster := &types.Cluster{Name: "eksa-unit-test", KubeconfigFile: "testdata/kubeconfig.yaml"} upgrade, err := provider.UpgradeNeeded(context.Background(), clusterSpec, clusterSpec, cluster) assert.NoError(t, err) assert.False(t, upgrade) } func TestNutanixProviderRunPostControlPlaneCreation(t *testing.T) { provider := testDefaultNutanixProvider(t) clusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") cluster := &types.Cluster{Name: "eksa-unit-test"} err := provider.RunPostControlPlaneCreation(context.Background(), clusterSpec, cluster) assert.NoError(t, err) } func TestNutanixProviderMachineDeploymentsToDelete(t *testing.T) { provider := testDefaultNutanixProvider(t) clusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") cluster := &types.Cluster{Name: "eksa-unit-test"} deps := provider.MachineDeploymentsToDelete(cluster, clusterSpec, clusterSpec) assert.NotNil(t, deps) assert.Len(t, deps, 0) } func TestNutanixProviderInstallCustomProviderComponents(t *testing.T) { provider := testDefaultNutanixProvider(t) kubeConfigFile := "test" err := provider.InstallCustomProviderComponents(context.Background(), kubeConfigFile) assert.NoError(t, err) } func TestNutanixProviderPreCAPIInstallOnBootstrap(t *testing.T) { provider := testDefaultNutanixProvider(t) cluster := &types.Cluster{Name: "eksa-unit-test"} clusterSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") err := provider.PreCAPIInstallOnBootstrap(context.Background(), cluster, clusterSpec) assert.NoError(t, err) } func TestNutanixProviderPostMoveManagementToBootstrap(t *testing.T) { provider := testDefaultNutanixProvider(t) cluster := &types.Cluster{Name: "eksa-unit-test"} err := provider.PostMoveManagementToBootstrap(context.Background(), cluster) assert.NoError(t, err) }
1,012
eks-anywhere
aws
Go
package nutanix import ( "encoding/base64" "encoding/json" "fmt" "github.com/nutanix-cloud-native/prism-go-client/environment/credentials" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/clusterapi" "github.com/aws/eks-anywhere/pkg/config" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/crypto" "github.com/aws/eks-anywhere/pkg/providers" "github.com/aws/eks-anywhere/pkg/registrymirror" "github.com/aws/eks-anywhere/pkg/registrymirror/containerd" "github.com/aws/eks-anywhere/pkg/templater" "github.com/aws/eks-anywhere/pkg/types" ) var jsonMarshal = json.Marshal // TemplateBuilder builds templates for nutanix. type TemplateBuilder struct { datacenterSpec *v1alpha1.NutanixDatacenterConfigSpec controlPlaneMachineSpec *v1alpha1.NutanixMachineConfigSpec etcdMachineSpec *v1alpha1.NutanixMachineConfigSpec workerNodeGroupMachineSpecs map[string]v1alpha1.NutanixMachineConfigSpec creds credentials.BasicAuthCredential now types.NowFunc } var _ providers.TemplateBuilder = &TemplateBuilder{} func NewNutanixTemplateBuilder( datacenterSpec *v1alpha1.NutanixDatacenterConfigSpec, controlPlaneMachineSpec, etcdMachineSpec *v1alpha1.NutanixMachineConfigSpec, workerNodeGroupMachineSpecs map[string]v1alpha1.NutanixMachineConfigSpec, creds credentials.BasicAuthCredential, now types.NowFunc, ) *TemplateBuilder { return &TemplateBuilder{ datacenterSpec: datacenterSpec, controlPlaneMachineSpec: controlPlaneMachineSpec, etcdMachineSpec: etcdMachineSpec, workerNodeGroupMachineSpecs: workerNodeGroupMachineSpecs, creds: creds, now: now, } } func (ntb *TemplateBuilder) GenerateCAPISpecControlPlane(clusterSpec *cluster.Spec, buildOptions ...providers.BuildMapOption) (content []byte, err error) { var etcdMachineSpec v1alpha1.NutanixMachineConfigSpec if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil { etcdMachineSpec = *ntb.etcdMachineSpec } values, err := buildTemplateMapCP(ntb.datacenterSpec, clusterSpec, *ntb.controlPlaneMachineSpec, etcdMachineSpec) if err != nil { return nil, err } for _, buildOption := range buildOptions { buildOption(values) } bytes, err := templater.Execute(defaultCAPIConfigCP, values) if err != nil { return nil, err } return bytes, nil } func (ntb *TemplateBuilder) GenerateCAPISpecWorkers(clusterSpec *cluster.Spec, workloadTemplateNames, kubeadmconfigTemplateNames map[string]string) (content []byte, err error) { workerSpecs := make([][]byte, 0, len(clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations)) for _, workerNodeGroupConfiguration := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations { values, err := buildTemplateMapMD(clusterSpec, ntb.workerNodeGroupMachineSpecs[workerNodeGroupConfiguration.MachineGroupRef.Name], workerNodeGroupConfiguration) if err != nil { return nil, err } values["workloadTemplateName"] = workloadTemplateNames[workerNodeGroupConfiguration.Name] values["workloadkubeadmconfigTemplateName"] = kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name] values["autoscalingConfig"] = workerNodeGroupConfiguration.AutoScalingConfiguration bytes, err := templater.Execute(defaultClusterConfigMD, values) if err != nil { return nil, err } workerSpecs = append(workerSpecs, bytes) } return templater.AppendYamlResources(workerSpecs...), nil } // GenerateCAPISpecSecret generates the secret containing the credentials for the nutanix prism central and is used by the // CAPX controller. The secret is named after the cluster name. func (ntb *TemplateBuilder) GenerateCAPISpecSecret(clusterSpec *cluster.Spec, buildOptions ...providers.BuildMapOption) (content []byte, err error) { return ntb.generateSpecSecret(CAPXSecretName(clusterSpec), ntb.creds, buildOptions...) } // CAPXSecretName returns the name of the secret containing the credentials for the nutanix prism central and is used by the // CAPX controller. func CAPXSecretName(spec *cluster.Spec) string { return fmt.Sprintf("capx-%s", spec.Cluster.Name) } // GenerateEKSASpecSecret generates the secret containing the credentials for the nutanix prism central and is used by the // EKS-A controller. The secret is named nutanix-credentials. func (ntb *TemplateBuilder) GenerateEKSASpecSecret(clusterSpec *cluster.Spec, buildOptions ...providers.BuildMapOption) (content []byte, err error) { return ntb.generateSpecSecret(EKSASecretName(clusterSpec), ntb.creds, buildOptions...) } // EKSASecretName returns the name of the secret containing the credentials for the nutanix prism central and is used by the // EKS-Anywhere controller. func EKSASecretName(spec *cluster.Spec) string { if spec.NutanixDatacenter.Spec.CredentialRef != nil { return spec.NutanixDatacenter.Spec.CredentialRef.Name } return constants.NutanixCredentialsName } func (ntb *TemplateBuilder) generateSpecSecret(secretName string, creds credentials.BasicAuthCredential, buildOptions ...providers.BuildMapOption) ([]byte, error) { values, err := buildTemplateMapSecret(secretName, creds) if err != nil { return nil, err } for _, buildOption := range buildOptions { buildOption(values) } bytes, err := templater.Execute(secretTemplate, values) if err != nil { return nil, err } return bytes, nil } func machineDeploymentName(clusterName, nodeGroupName string) string { return fmt.Sprintf("%s-%s", clusterName, nodeGroupName) } func buildTemplateMapCP( datacenterSpec *v1alpha1.NutanixDatacenterConfigSpec, clusterSpec *cluster.Spec, controlPlaneMachineSpec v1alpha1.NutanixMachineConfigSpec, etcdMachineSpec v1alpha1.NutanixMachineConfigSpec, ) (map[string]interface{}, error) { bundle := clusterSpec.VersionsBundle format := "cloud-config" apiServerExtraArgs := clusterapi.OIDCToExtraArgs(clusterSpec.OIDCConfig). Append(clusterapi.AwsIamAuthExtraArgs(clusterSpec.AWSIamConfig)). Append(clusterapi.PodIAMAuthExtraArgs(clusterSpec.Cluster.Spec.PodIAMConfig)) kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)). Append(clusterapi.ControlPlaneNodeLabelsExtraArgs(clusterSpec.Cluster.Spec.ControlPlaneConfiguration)) values := map[string]interface{}{ "apiServerExtraArgs": apiServerExtraArgs.ToPartialYaml(), "clusterName": clusterSpec.Cluster.Name, "controlPlaneEndpointIp": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host, "controlPlaneReplicas": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Count, "controlPlaneSshAuthorizedKey": controlPlaneMachineSpec.Users[0].SshAuthorizedKeys[0], "controlPlaneSshUsername": controlPlaneMachineSpec.Users[0].Name, "controlPlaneTaints": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Taints, "eksaSystemNamespace": constants.EksaSystemNamespace, "format": format, "podCidrs": clusterSpec.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks, "serviceCidrs": clusterSpec.Cluster.Spec.ClusterNetwork.Services.CidrBlocks, "kubernetesVersion": bundle.KubeDistro.Kubernetes.Tag, "kubernetesRepository": bundle.KubeDistro.Kubernetes.Repository, "corednsRepository": bundle.KubeDistro.CoreDNS.Repository, "corednsVersion": bundle.KubeDistro.CoreDNS.Tag, "etcdRepository": bundle.KubeDistro.Etcd.Repository, "etcdImageTag": bundle.KubeDistro.Etcd.Tag, "kubeletExtraArgs": kubeletExtraArgs.ToPartialYaml(), "kubeVipImage": bundle.Nutanix.KubeVip.VersionedImage(), "kubeVipSvcEnable": false, "kubeVipLBEnable": false, "externalEtcdVersion": bundle.KubeDistro.EtcdVersion, "etcdCipherSuites": crypto.SecureCipherSuitesString(), "nutanixEndpoint": datacenterSpec.Endpoint, "nutanixPort": datacenterSpec.Port, "nutanixAdditionalTrustBundle": datacenterSpec.AdditionalTrustBundle, "nutanixInsecure": datacenterSpec.Insecure, "vcpusPerSocket": controlPlaneMachineSpec.VCPUsPerSocket, "vcpuSockets": controlPlaneMachineSpec.VCPUSockets, "memorySize": controlPlaneMachineSpec.MemorySize.String(), "systemDiskSize": controlPlaneMachineSpec.SystemDiskSize.String(), "imageIDType": controlPlaneMachineSpec.Image.Type, "imageName": controlPlaneMachineSpec.Image.Name, "imageUUID": controlPlaneMachineSpec.Image.UUID, "nutanixPEClusterIDType": controlPlaneMachineSpec.Cluster.Type, "nutanixPEClusterName": controlPlaneMachineSpec.Cluster.Name, "nutanixPEClusterUUID": controlPlaneMachineSpec.Cluster.UUID, "secretName": CAPXSecretName(clusterSpec), "subnetIDType": controlPlaneMachineSpec.Subnet.Type, "subnetName": controlPlaneMachineSpec.Subnet.Name, "subnetUUID": controlPlaneMachineSpec.Subnet.UUID, } if clusterSpec.Cluster.Spec.RegistryMirrorConfiguration != nil { registryMirror := registrymirror.FromCluster(clusterSpec.Cluster) values["registryMirrorMap"] = containerd.ToAPIEndpoints(registryMirror.NamespacedRegistryMap) values["mirrorBase"] = registryMirror.BaseRegistry values["publicMirror"] = containerd.ToAPIEndpoint(registryMirror.CoreEKSAMirror()) values["insecureSkip"] = registryMirror.InsecureSkipVerify if len(registryMirror.CACertContent) > 0 { values["registryCACert"] = registryMirror.CACertContent } if registryMirror.Auth { values["registryAuth"] = registryMirror.Auth username, password, err := config.ReadCredentials() if err != nil { return values, err } values["registryUsername"] = username values["registryPassword"] = password } } if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil { values["externalEtcd"] = true values["externalEtcdReplicas"] = clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.Count values["etcdSshUsername"] = etcdMachineSpec.Users[0].Name } if controlPlaneMachineSpec.Project != nil { values["projectIDType"] = controlPlaneMachineSpec.Project.Type values["projectName"] = controlPlaneMachineSpec.Project.Name values["projectUUID"] = controlPlaneMachineSpec.Project.UUID } if clusterSpec.AWSIamConfig != nil { values["awsIamAuth"] = true } if clusterSpec.Cluster.Spec.ProxyConfiguration != nil { values["proxyConfig"] = true values["httpProxy"] = clusterSpec.Cluster.Spec.ProxyConfiguration.HttpProxy values["httpsProxy"] = clusterSpec.Cluster.Spec.ProxyConfiguration.HttpsProxy values["noProxy"] = generateNoProxyList(clusterSpec) } if len(controlPlaneMachineSpec.AdditionalCategories) > 0 { values["additionalCategories"] = controlPlaneMachineSpec.AdditionalCategories } return values, nil } func buildTemplateMapMD(clusterSpec *cluster.Spec, workerNodeGroupMachineSpec v1alpha1.NutanixMachineConfigSpec, workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration) (map[string]interface{}, error) { bundle := clusterSpec.VersionsBundle format := "cloud-config" kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)). Append(clusterapi.WorkerNodeLabelsExtraArgs(workerNodeGroupConfiguration)) values := map[string]interface{}{ "clusterName": clusterSpec.Cluster.Name, "eksaSystemNamespace": constants.EksaSystemNamespace, "format": format, "kubernetesVersion": bundle.KubeDistro.Kubernetes.Tag, "workerReplicas": *workerNodeGroupConfiguration.Count, "workerPoolName": "md-0", "workerSshAuthorizedKey": workerNodeGroupMachineSpec.Users[0].SshAuthorizedKeys[0], "workerSshUsername": workerNodeGroupMachineSpec.Users[0].Name, "vcpusPerSocket": workerNodeGroupMachineSpec.VCPUsPerSocket, "vcpuSockets": workerNodeGroupMachineSpec.VCPUSockets, "memorySize": workerNodeGroupMachineSpec.MemorySize.String(), "systemDiskSize": workerNodeGroupMachineSpec.SystemDiskSize.String(), "imageIDType": workerNodeGroupMachineSpec.Image.Type, "imageName": workerNodeGroupMachineSpec.Image.Name, "imageUUID": workerNodeGroupMachineSpec.Image.UUID, "kubeletExtraArgs": kubeletExtraArgs.ToPartialYaml(), "nutanixPEClusterIDType": workerNodeGroupMachineSpec.Cluster.Type, "nutanixPEClusterName": workerNodeGroupMachineSpec.Cluster.Name, "nutanixPEClusterUUID": workerNodeGroupMachineSpec.Cluster.UUID, "subnetIDType": workerNodeGroupMachineSpec.Subnet.Type, "subnetName": workerNodeGroupMachineSpec.Subnet.Name, "subnetUUID": workerNodeGroupMachineSpec.Subnet.UUID, "workerNodeGroupName": fmt.Sprintf("%s-%s", clusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name), "workerNodeGroupTaints": workerNodeGroupConfiguration.Taints, } if clusterSpec.Cluster.Spec.RegistryMirrorConfiguration != nil { registryMirror := registrymirror.FromCluster(clusterSpec.Cluster) values["registryMirrorMap"] = containerd.ToAPIEndpoints(registryMirror.NamespacedRegistryMap) values["mirrorBase"] = registryMirror.BaseRegistry values["publicMirror"] = containerd.ToAPIEndpoint(registryMirror.CoreEKSAMirror()) values["insecureSkip"] = registryMirror.InsecureSkipVerify if len(registryMirror.CACertContent) > 0 { values["registryCACert"] = registryMirror.CACertContent } if registryMirror.Auth { values["registryAuth"] = registryMirror.Auth username, password, err := config.ReadCredentials() if err != nil { return values, err } values["registryUsername"] = username values["registryPassword"] = password } } if workerNodeGroupMachineSpec.Project != nil { values["projectIDType"] = workerNodeGroupMachineSpec.Project.Type values["projectName"] = workerNodeGroupMachineSpec.Project.Name values["projectUUID"] = workerNodeGroupMachineSpec.Project.UUID } if clusterSpec.Cluster.Spec.ProxyConfiguration != nil { values["proxyConfig"] = true values["httpProxy"] = clusterSpec.Cluster.Spec.ProxyConfiguration.HttpProxy values["httpsProxy"] = clusterSpec.Cluster.Spec.ProxyConfiguration.HttpsProxy values["noProxy"] = generateNoProxyList(clusterSpec) } if len(workerNodeGroupMachineSpec.AdditionalCategories) > 0 { values["additionalCategories"] = workerNodeGroupMachineSpec.AdditionalCategories } return values, nil } func buildTemplateMapSecret(secretName string, creds credentials.BasicAuthCredential) (map[string]interface{}, error) { encodedCreds, err := jsonMarshal(creds) if err != nil { return nil, err } nutanixCreds := []credentials.Credential{{ Type: credentials.BasicAuthCredentialType, Data: encodedCreds, }} credsJSON, err := jsonMarshal(nutanixCreds) if err != nil { return nil, err } values := map[string]interface{}{ "secretName": secretName, "eksaSystemNamespace": constants.EksaSystemNamespace, "base64EncodedCredentials": base64.StdEncoding.EncodeToString(credsJSON), } return values, nil } func generateNoProxyList(clusterSpec *cluster.Spec) []string { capacity := len(clusterSpec.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks) + len(clusterSpec.Cluster.Spec.ClusterNetwork.Services.CidrBlocks) + len(clusterSpec.Cluster.Spec.ProxyConfiguration.NoProxy) + 4 noProxyList := make([]string, 0, capacity) noProxyList = append(noProxyList, clusterSpec.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks...) noProxyList = append(noProxyList, clusterSpec.Cluster.Spec.ClusterNetwork.Services.CidrBlocks...) noProxyList = append(noProxyList, clusterSpec.Cluster.Spec.ProxyConfiguration.NoProxy...) // Add no-proxy defaults noProxyList = append(noProxyList, clusterapi.NoProxyDefaults()...) noProxyList = append(noProxyList, clusterSpec.Config.NutanixDatacenter.Spec.Endpoint, clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host, ) return noProxyList }
375
eks-anywhere
aws
Go
package nutanix import ( _ "embed" "errors" "os" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "sigs.k8s.io/yaml" "github.com/aws/eks-anywhere/internal/test" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/constants" ) //go:embed testdata/eksa-cluster.yaml var nutanixClusterConfigSpec string //go:embed testdata/datacenterConfig.yaml var nutanixDatacenterConfigSpec string //go:embed testdata/machineConfig.yaml var nutanixMachineConfigSpec string //go:embed testdata/machineConfig_project.yaml var nutanixMachineConfigSpecWithProject string //go:embed testdata/machineConfig_additional_categories.yaml var nutanixMachineConfigSpecWithAdditionalCategories string func fakemarshal(v interface{}) ([]byte, error) { return []byte{}, errors.New("marshalling failed") } func restoremarshal(replace func(v interface{}) ([]byte, error)) { jsonMarshal = replace } func TestNewNutanixTemplateBuilder(t *testing.T) { dcConf, machineConf, workerConfs := minimalNutanixConfigSpec(t) t.Setenv(constants.EksaNutanixUsernameKey, "admin") t.Setenv(constants.EksaNutanixPasswordKey, "password") creds := GetCredsFromEnv() builder := NewNutanixTemplateBuilder(&dcConf.Spec, &machineConf.Spec, &machineConf.Spec, workerConfs, creds, time.Now) assert.NotNil(t, builder) buildSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") cpSpec, err := builder.GenerateCAPISpecControlPlane(buildSpec) assert.NoError(t, err) assert.NotNil(t, cpSpec) workloadTemplateNames := map[string]string{ "eksa-unit-test": "eksa-unit-test", } kubeadmconfigTemplateNames := map[string]string{ "eksa-unit-test": "eksa-unit-test", } workerSpec, err := builder.GenerateCAPISpecWorkers(buildSpec, workloadTemplateNames, kubeadmconfigTemplateNames) assert.NoError(t, err) assert.NotNil(t, workerSpec) secretSpec, err := builder.GenerateCAPISpecSecret(buildSpec) assert.NoError(t, err) assert.NotNil(t, secretSpec) expectedSecret, err := os.ReadFile("testdata/templated_secret.yaml") require.NoError(t, err) assert.Equal(t, expectedSecret, secretSpec) secretSpec, err = builder.GenerateEKSASpecSecret(buildSpec) assert.NoError(t, err) assert.NotNil(t, secretSpec) expectedSecret, err = os.ReadFile("testdata/templated_secret_eksa.yaml") require.NoError(t, err) assert.Equal(t, expectedSecret, secretSpec) } func TestNewNutanixTemplateBuilderGenerateSpecSecretFailure(t *testing.T) { storedMarshal := jsonMarshal jsonMarshal = fakemarshal defer restoremarshal(storedMarshal) t.Setenv(constants.NutanixUsernameKey, "admin") t.Setenv(constants.NutanixPasswordKey, "password") creds := GetCredsFromEnv() builder := NewNutanixTemplateBuilder(nil, nil, nil, nil, creds, time.Now) assert.NotNil(t, builder) buildSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") secretSpec, err := builder.GenerateCAPISpecSecret(buildSpec) assert.Nil(t, secretSpec) assert.Error(t, err) secretSpec, err = builder.GenerateEKSASpecSecret(buildSpec) assert.Nil(t, secretSpec) assert.Error(t, err) } func TestNewNutanixTemplateBuilderGenerateSpecSecretDefaultCreds(t *testing.T) { dcConf, machineConf, workerConfs := minimalNutanixConfigSpec(t) t.Setenv(constants.NutanixUsernameKey, "admin") t.Setenv(constants.NutanixPasswordKey, "password") creds := GetCredsFromEnv() builder := NewNutanixTemplateBuilder(&dcConf.Spec, &machineConf.Spec, &machineConf.Spec, workerConfs, creds, time.Now) assert.NotNil(t, builder) buildSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster-no-credentialref.yaml") secretSpec, err := builder.GenerateCAPISpecSecret(buildSpec) assert.NoError(t, err) assert.NotNil(t, secretSpec) secretSpec, err = builder.GenerateEKSASpecSecret(buildSpec) assert.NoError(t, err) assert.NotNil(t, secretSpec) } func TestNutanixTemplateBuilderGenerateCAPISpecForCreateWithAutoscalingConfiguration(t *testing.T) { dcConf, machineConf, workerConfs := minimalNutanixConfigSpec(t) t.Setenv(constants.NutanixUsernameKey, "admin") t.Setenv(constants.NutanixPasswordKey, "password") creds := GetCredsFromEnv() builder := NewNutanixTemplateBuilder(&dcConf.Spec, &machineConf.Spec, &machineConf.Spec, workerConfs, creds, time.Now) assert.NotNil(t, builder) buildSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster-autoscaler.yaml") workloadTemplateNames := map[string]string{ "eksa-unit-test": "eksa-unit-test", } kubeadmconfigTemplateNames := map[string]string{ "eksa-unit-test": "eksa-unit-test", } workerSpec, err := builder.GenerateCAPISpecWorkers(buildSpec, workloadTemplateNames, kubeadmconfigTemplateNames) assert.NoError(t, err) expectedWorkerSpec, err := os.ReadFile("testdata/expected_results_autoscaling_md.yaml") require.NoError(t, err) assert.Equal(t, string(workerSpec), string(expectedWorkerSpec)) } func TestNewNutanixTemplateBuilderOIDCConfig(t *testing.T) { dcConf, machineConf, workerConfs := minimalNutanixConfigSpec(t) t.Setenv(constants.EksaNutanixUsernameKey, "admin") t.Setenv(constants.EksaNutanixPasswordKey, "password") creds := GetCredsFromEnv() builder := NewNutanixTemplateBuilder(&dcConf.Spec, &machineConf.Spec, &machineConf.Spec, workerConfs, creds, time.Now) assert.NotNil(t, builder) buildSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster-oidc.yaml") cpSpec, err := builder.GenerateCAPISpecControlPlane(buildSpec) assert.NoError(t, err) assert.NotNil(t, cpSpec) expectedControlPlaneSpec, err := os.ReadFile("testdata/expected_results_oidc.yaml") require.NoError(t, err) assert.Equal(t, expectedControlPlaneSpec, cpSpec) } func TestNewNutanixTemplateBuilderRegistryMirrorConfig(t *testing.T) { t.Setenv(constants.RegistryUsername, "username") t.Setenv(constants.RegistryPassword, "password") dcConf, machineConf, workerConfs := minimalNutanixConfigSpec(t) t.Setenv(constants.EksaNutanixUsernameKey, "admin") t.Setenv(constants.EksaNutanixPasswordKey, "password") creds := GetCredsFromEnv() builder := NewNutanixTemplateBuilder(&dcConf.Spec, &machineConf.Spec, &machineConf.Spec, workerConfs, creds, time.Now) assert.NotNil(t, builder) buildSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster-registry-mirror.yaml") cpSpec, err := builder.GenerateCAPISpecControlPlane(buildSpec) assert.NoError(t, err) assert.NotNil(t, cpSpec) expectedControlPlaneSpec, err := os.ReadFile("testdata/expected_results_registry_mirror.yaml") require.NoError(t, err) assert.Equal(t, expectedControlPlaneSpec, cpSpec) workloadTemplateNames := map[string]string{ "eksa-unit-test": "eksa-unit-test", } kubeadmconfigTemplateNames := map[string]string{ "eksa-unit-test": "eksa-unit-test", } workerSpec, err := builder.GenerateCAPISpecWorkers(buildSpec, workloadTemplateNames, kubeadmconfigTemplateNames) assert.NoError(t, err) assert.NotNil(t, workerSpec) expectedWorkersSpec, err := os.ReadFile("testdata/expected_results_registry_mirror_md.yaml") require.NoError(t, err) assert.Equal(t, expectedWorkersSpec, workerSpec) } func TestNewNutanixTemplateBuilderRegistryMirrorConfigNoRegistryCredsSet(t *testing.T) { dcConf, machineConf, workerConfs := minimalNutanixConfigSpec(t) t.Setenv(constants.EksaNutanixUsernameKey, "admin") t.Setenv(constants.EksaNutanixPasswordKey, "password") creds := GetCredsFromEnv() builder := NewNutanixTemplateBuilder(&dcConf.Spec, &machineConf.Spec, &machineConf.Spec, workerConfs, creds, time.Now) assert.NotNil(t, builder) buildSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster-registry-mirror.yaml") _, err := builder.GenerateCAPISpecControlPlane(buildSpec) assert.Error(t, err) workloadTemplateNames := map[string]string{ "eksa-unit-test": "eksa-unit-test", } kubeadmconfigTemplateNames := map[string]string{ "eksa-unit-test": "eksa-unit-test", } _, err = builder.GenerateCAPISpecWorkers(buildSpec, workloadTemplateNames, kubeadmconfigTemplateNames) assert.Error(t, err) } func TestNewNutanixTemplateBuilderProject(t *testing.T) { t.Setenv(constants.EksaNutanixUsernameKey, "admin") t.Setenv(constants.EksaNutanixPasswordKey, "password") creds := GetCredsFromEnv() dcConf, _, _ := minimalNutanixConfigSpec(t) machineConf := &anywherev1.NutanixMachineConfig{} err := yaml.Unmarshal([]byte(nutanixMachineConfigSpecWithProject), machineConf) require.NoError(t, err) workerConfs := map[string]anywherev1.NutanixMachineConfigSpec{ "eksa-unit-test": machineConf.Spec, } builder := NewNutanixTemplateBuilder(&dcConf.Spec, &machineConf.Spec, &machineConf.Spec, workerConfs, creds, time.Now) assert.NotNil(t, builder) buildSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster-project.yaml") cpSpec, err := builder.GenerateCAPISpecControlPlane(buildSpec) assert.NoError(t, err) assert.NotNil(t, cpSpec) expectedControlPlaneSpec, err := os.ReadFile("testdata/expected_results_project.yaml") require.NoError(t, err) assert.Equal(t, expectedControlPlaneSpec, cpSpec) workloadTemplateNames := map[string]string{ "eksa-unit-test": "eksa-unit-test", } kubeadmconfigTemplateNames := map[string]string{ "eksa-unit-test": "eksa-unit-test", } workerSpec, err := builder.GenerateCAPISpecWorkers(buildSpec, workloadTemplateNames, kubeadmconfigTemplateNames) assert.NoError(t, err) assert.NotNil(t, workerSpec) expectedWorkersSpec, err := os.ReadFile("testdata/expected_results_project_md.yaml") require.NoError(t, err) assert.Equal(t, expectedWorkersSpec, workerSpec) } func TestNewNutanixTemplateBuilderAdditionalCategories(t *testing.T) { t.Setenv(constants.EksaNutanixUsernameKey, "admin") t.Setenv(constants.EksaNutanixPasswordKey, "password") creds := GetCredsFromEnv() dcConf, _, _ := minimalNutanixConfigSpec(t) machineConf := &anywherev1.NutanixMachineConfig{} err := yaml.Unmarshal([]byte(nutanixMachineConfigSpecWithAdditionalCategories), machineConf) require.NoError(t, err) workerConfs := map[string]anywherev1.NutanixMachineConfigSpec{ "eksa-unit-test": machineConf.Spec, } builder := NewNutanixTemplateBuilder(&dcConf.Spec, &machineConf.Spec, &machineConf.Spec, workerConfs, creds, time.Now) assert.NotNil(t, builder) buildSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster-additional-categories.yaml") cpSpec, err := builder.GenerateCAPISpecControlPlane(buildSpec) assert.NoError(t, err) assert.NotNil(t, cpSpec) expectedControlPlaneSpec, err := os.ReadFile("testdata/expected_results_additional_categories.yaml") require.NoError(t, err) assert.Equal(t, expectedControlPlaneSpec, cpSpec) workloadTemplateNames := map[string]string{ "eksa-unit-test": "eksa-unit-test", } kubeadmconfigTemplateNames := map[string]string{ "eksa-unit-test": "eksa-unit-test", } workerSpec, err := builder.GenerateCAPISpecWorkers(buildSpec, workloadTemplateNames, kubeadmconfigTemplateNames) assert.NoError(t, err) assert.NotNil(t, workerSpec) expectedWorkersSpec, err := os.ReadFile("testdata/expected_results_additional_categories_md.yaml") require.NoError(t, err) assert.Equal(t, expectedWorkersSpec, workerSpec) } func TestNewNutanixTemplateBuilderNodeTaintsAndLabels(t *testing.T) { dcConf, machineConf, workerConfs := minimalNutanixConfigSpec(t) t.Setenv(constants.EksaNutanixUsernameKey, "admin") t.Setenv(constants.EksaNutanixPasswordKey, "password") creds := GetCredsFromEnv() builder := NewNutanixTemplateBuilder(&dcConf.Spec, &machineConf.Spec, &machineConf.Spec, workerConfs, creds, time.Now) assert.NotNil(t, builder) buildSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster-node-taints-labels.yaml") cpSpec, err := builder.GenerateCAPISpecControlPlane(buildSpec) assert.NoError(t, err) assert.NotNil(t, cpSpec) expectedControlPlaneSpec, err := os.ReadFile("testdata/expected_results_node_taints_labels.yaml") require.NoError(t, err) assert.Equal(t, expectedControlPlaneSpec, cpSpec) workloadTemplateNames := map[string]string{ "eksa-unit-test": "eksa-unit-test", } kubeadmconfigTemplateNames := map[string]string{ "eksa-unit-test": "eksa-unit-test", } workerSpec, err := builder.GenerateCAPISpecWorkers(buildSpec, workloadTemplateNames, kubeadmconfigTemplateNames) assert.NoError(t, err) assert.NotNil(t, workerSpec) expectedWorkersSpec, err := os.ReadFile("testdata/expected_results_node_taints_labels_md.yaml") require.NoError(t, err) assert.Equal(t, expectedWorkersSpec, workerSpec) } func TestNewNutanixTemplateBuilderIAMAuth(t *testing.T) { dcConf, machineConf, workerConfs := minimalNutanixConfigSpec(t) t.Setenv(constants.EksaNutanixUsernameKey, "admin") t.Setenv(constants.EksaNutanixPasswordKey, "password") creds := GetCredsFromEnv() builder := NewNutanixTemplateBuilder(&dcConf.Spec, &machineConf.Spec, &machineConf.Spec, workerConfs, creds, time.Now) assert.NotNil(t, builder) buildSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster-iamauth.yaml") cpSpec, err := builder.GenerateCAPISpecControlPlane(buildSpec) assert.NoError(t, err) assert.NotNil(t, cpSpec) expectedControlPlaneSpec, err := os.ReadFile("testdata/expected_results_iamauth.yaml") require.NoError(t, err) assert.Equal(t, expectedControlPlaneSpec, cpSpec) } func TestNewNutanixTemplateBuilderIRSA(t *testing.T) { dcConf, machineConf, workerConfs := minimalNutanixConfigSpec(t) t.Setenv(constants.EksaNutanixUsernameKey, "admin") t.Setenv(constants.EksaNutanixPasswordKey, "password") creds := GetCredsFromEnv() builder := NewNutanixTemplateBuilder(&dcConf.Spec, &machineConf.Spec, &machineConf.Spec, workerConfs, creds, time.Now) assert.NotNil(t, builder) buildSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster-irsa.yaml") cpSpec, err := builder.GenerateCAPISpecControlPlane(buildSpec) assert.NoError(t, err) assert.NotNil(t, cpSpec) expectedControlPlaneSpec, err := os.ReadFile("testdata/expected_results_irsa.yaml") require.NoError(t, err) assert.Equal(t, expectedControlPlaneSpec, cpSpec) } func TestNewNutanixTemplateBuilderProxy(t *testing.T) { dcConf, machineConf, workerConfs := minimalNutanixConfigSpec(t) t.Setenv(constants.EksaNutanixUsernameKey, "admin") t.Setenv(constants.EksaNutanixPasswordKey, "password") creds := GetCredsFromEnv() builder := NewNutanixTemplateBuilder(&dcConf.Spec, &machineConf.Spec, &machineConf.Spec, workerConfs, creds, time.Now) assert.NotNil(t, builder) buildSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster-proxy.yaml") cpSpec, err := builder.GenerateCAPISpecControlPlane(buildSpec) assert.NoError(t, err) assert.NotNil(t, cpSpec) expectedControlPlaneSpec, err := os.ReadFile("testdata/expected_results_proxy.yaml") require.NoError(t, err) assert.Equal(t, expectedControlPlaneSpec, cpSpec) workloadTemplateNames := map[string]string{ "eksa-unit-test": "eksa-unit-test", } kubeadmconfigTemplateNames := map[string]string{ "eksa-unit-test": "eksa-unit-test", } workerSpec, err := builder.GenerateCAPISpecWorkers(buildSpec, workloadTemplateNames, kubeadmconfigTemplateNames) assert.NoError(t, err) assert.NotNil(t, workerSpec) expectedWorkersSpec, err := os.ReadFile("testdata/expected_results_proxy_md.yaml") require.NoError(t, err) assert.Equal(t, expectedWorkersSpec, workerSpec) } func minimalNutanixConfigSpec(t *testing.T) (*anywherev1.NutanixDatacenterConfig, *anywherev1.NutanixMachineConfig, map[string]anywherev1.NutanixMachineConfigSpec) { dcConf := &anywherev1.NutanixDatacenterConfig{} err := yaml.Unmarshal([]byte(nutanixDatacenterConfigSpec), dcConf) require.NoError(t, err) machineConf := &anywherev1.NutanixMachineConfig{} err = yaml.Unmarshal([]byte(nutanixMachineConfigSpec), machineConf) require.NoError(t, err) workerConfs := map[string]anywherev1.NutanixMachineConfigSpec{ "eksa-unit-test": machineConf.Spec, } return dcConf, machineConf, workerConfs }
431
eks-anywhere
aws
Go
package nutanix import ( "context" "fmt" "net/http" "strconv" "strings" "github.com/nutanix-cloud-native/prism-go-client/environment/credentials" "github.com/nutanix-cloud-native/prism-go-client/utils" v3 "github.com/nutanix-cloud-native/prism-go-client/v3" "k8s.io/apimachinery/pkg/api/resource" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/crypto" "github.com/aws/eks-anywhere/pkg/logger" "github.com/aws/eks-anywhere/pkg/networkutils" ) const ( minNutanixCPUSockets = 1 minNutanixCPUPerSocket = 1 minNutanixMemoryMiB = 2048 minNutanixDiskGiB = 20 ) // IPValidator is an interface that defines methods to validate the control plane IP. type IPValidator interface { ValidateControlPlaneIPUniqueness(cluster *anywherev1.Cluster) error } // Validator is a client to validate nutanix resources. type Validator struct { httpClient *http.Client certValidator crypto.TlsValidator clientCache *ClientCache } // NewValidator returns a new validator client. func NewValidator(clientCache *ClientCache, certValidator crypto.TlsValidator, httpClient *http.Client) *Validator { return &Validator{ clientCache: clientCache, certValidator: certValidator, httpClient: httpClient, } } // ValidateClusterSpec validates the cluster spec. func (v *Validator) ValidateClusterSpec(ctx context.Context, spec *cluster.Spec, creds credentials.BasicAuthCredential) error { logger.Info("ValidateClusterSpec for Nutanix datacenter", spec.NutanixDatacenter.Name) client, err := v.clientCache.GetNutanixClient(spec.NutanixDatacenter, creds) if err != nil { return err } if err := v.ValidateDatacenterConfig(ctx, client, spec.NutanixDatacenter); err != nil { return err } for _, conf := range spec.NutanixMachineConfigs { if err := v.ValidateMachineConfig(ctx, client, conf); err != nil { return fmt.Errorf("failed to validate machine config: %v", err) } } return nil } // ValidateDatacenterConfig validates the datacenter config. func (v *Validator) ValidateDatacenterConfig(ctx context.Context, client Client, config *anywherev1.NutanixDatacenterConfig) error { if config.Spec.Insecure { logger.Info("Warning: Skipping TLS validation for insecure connection to Nutanix Prism Central; this is not recommended for production use") } if err := v.validateEndpointAndPort(config.Spec); err != nil { return err } if err := v.validateCredentials(ctx, client); err != nil { return err } if err := v.validateTrustBundleConfig(config.Spec); err != nil { return err } if err := v.validateCredentialRef(config); err != nil { return err } return nil } func (v *Validator) validateCredentialRef(config *anywherev1.NutanixDatacenterConfig) error { if config.Spec.CredentialRef == nil { return fmt.Errorf("credentialRef must be provided") } if config.Spec.CredentialRef.Kind != constants.SecretKind { return fmt.Errorf("credentialRef kind must be %s", constants.SecretKind) } if config.Spec.CredentialRef.Name == "" { return fmt.Errorf("credentialRef name must be provided") } return nil } func (v *Validator) validateEndpointAndPort(dcConf anywherev1.NutanixDatacenterConfigSpec) error { if !networkutils.IsPortValid(strconv.Itoa(dcConf.Port)) { return fmt.Errorf("nutanix prism central port %d out of range", dcConf.Port) } if dcConf.Endpoint == "" { return fmt.Errorf("nutanix prism central endpoint must be provided") } server := fmt.Sprintf("%s:%d", dcConf.Endpoint, dcConf.Port) if !strings.HasPrefix(server, "https://") { server = fmt.Sprintf("https://%s", server) } if _, err := v.httpClient.Get(server); err != nil { return fmt.Errorf("failed to reach server %s: %v", server, err) } return nil } func (v *Validator) validateCredentials(ctx context.Context, client Client) error { _, err := client.GetCurrentLoggedInUser(ctx) if err != nil { return err } return nil } func (v *Validator) validateTrustBundleConfig(dcConf anywherev1.NutanixDatacenterConfigSpec) error { if dcConf.AdditionalTrustBundle == "" { return nil } return v.certValidator.ValidateCert(dcConf.Endpoint, fmt.Sprintf("%d", dcConf.Port), dcConf.AdditionalTrustBundle) } func (v *Validator) validateMachineSpecs(machineSpec anywherev1.NutanixMachineConfigSpec) error { if machineSpec.VCPUSockets < minNutanixCPUSockets { return fmt.Errorf("vCPU sockets %d must be greater than or equal to %d", machineSpec.VCPUSockets, minNutanixCPUSockets) } if machineSpec.VCPUsPerSocket < minNutanixCPUPerSocket { return fmt.Errorf("vCPUs per socket %d must be greater than or equal to %d", machineSpec.VCPUsPerSocket, minNutanixCPUPerSocket) } minNutanixMemory, err := resource.ParseQuantity(fmt.Sprintf("%dMi", minNutanixMemoryMiB)) if err != nil { return err } if machineSpec.MemorySize.Cmp(minNutanixMemory) < 0 { return fmt.Errorf("MemorySize must be greater than or equal to %dMi", minNutanixMemoryMiB) } minNutanixDisk, err := resource.ParseQuantity(fmt.Sprintf("%dGi", minNutanixDiskGiB)) if err != nil { return err } if machineSpec.SystemDiskSize.Cmp(minNutanixDisk) < 0 { return fmt.Errorf("SystemDiskSize must be greater than or equal to %dGi", minNutanixDiskGiB) } return nil } // ValidateMachineConfig validates the Prism Element cluster, subnet, and image for the machine. func (v *Validator) ValidateMachineConfig(ctx context.Context, client Client, config *anywherev1.NutanixMachineConfig) error { if err := v.validateMachineSpecs(config.Spec); err != nil { return err } if err := v.validateClusterConfig(ctx, client, config.Spec.Cluster); err != nil { return err } if err := v.validateSubnetConfig(ctx, client, config.Spec.Subnet); err != nil { return err } if err := v.validateImageConfig(ctx, client, config.Spec.Image); err != nil { return err } if config.Spec.Project != nil { if err := v.validateProjectConfig(ctx, client, *config.Spec.Project); err != nil { return err } } if config.Spec.AdditionalCategories != nil { if err := v.validateAdditionalCategories(ctx, client, config.Spec.AdditionalCategories); err != nil { return err } } return nil } func (v *Validator) validateClusterConfig(ctx context.Context, client Client, identifier anywherev1.NutanixResourceIdentifier) error { switch identifier.Type { case anywherev1.NutanixIdentifierName: if identifier.Name == nil || *identifier.Name == "" { return fmt.Errorf("missing cluster name") } else { clusterName := *identifier.Name if _, err := findClusterUUIDByName(ctx, client, clusterName); err != nil { return fmt.Errorf("failed to find cluster with name %q: %v", clusterName, err) } } case anywherev1.NutanixIdentifierUUID: if identifier.UUID == nil || *identifier.UUID == "" { return fmt.Errorf("missing cluster uuid") } else { clusterUUID := *identifier.UUID if _, err := client.GetCluster(ctx, clusterUUID); err != nil { return fmt.Errorf("failed to find cluster with uuid %v: %v", clusterUUID, err) } } default: return fmt.Errorf("invalid cluster identifier type: %s; valid types are: %q and %q", identifier.Type, anywherev1.NutanixIdentifierName, anywherev1.NutanixIdentifierUUID) } return nil } func (v *Validator) validateImageConfig(ctx context.Context, client Client, identifier anywherev1.NutanixResourceIdentifier) error { switch identifier.Type { case anywherev1.NutanixIdentifierName: if identifier.Name == nil || *identifier.Name == "" { return fmt.Errorf("missing image name") } else { imageName := *identifier.Name if _, err := findImageUUIDByName(ctx, client, imageName); err != nil { return fmt.Errorf("failed to find image with name %q: %v", imageName, err) } } case anywherev1.NutanixIdentifierUUID: if identifier.UUID == nil || *identifier.UUID == "" { return fmt.Errorf("missing image uuid") } else { imageUUID := *identifier.UUID if _, err := client.GetImage(ctx, imageUUID); err != nil { return fmt.Errorf("failed to find image with uuid %s: %v", imageUUID, err) } } default: return fmt.Errorf("invalid image identifier type: %s; valid types are: %q and %q", identifier.Type, anywherev1.NutanixIdentifierName, anywherev1.NutanixIdentifierUUID) } return nil } func (v *Validator) validateSubnetConfig(ctx context.Context, client Client, identifier anywherev1.NutanixResourceIdentifier) error { switch identifier.Type { case anywherev1.NutanixIdentifierName: if identifier.Name == nil || *identifier.Name == "" { return fmt.Errorf("missing subnet name") } else { subnetName := *identifier.Name if _, err := findSubnetUUIDByName(ctx, client, subnetName); err != nil { return fmt.Errorf("failed to find subnet with name %s: %v", subnetName, err) } } case anywherev1.NutanixIdentifierUUID: if identifier.UUID == nil || *identifier.UUID == "" { return fmt.Errorf("missing subnet uuid") } else { subnetUUID := *identifier.UUID if _, err := client.GetSubnet(ctx, subnetUUID); err != nil { return fmt.Errorf("failed to find subnet with uuid %s: %v", subnetUUID, err) } } default: return fmt.Errorf("invalid subnet identifier type: %s; valid types are: %q and %q", identifier.Type, anywherev1.NutanixIdentifierName, anywherev1.NutanixIdentifierUUID) } return nil } func (v *Validator) validateProjectConfig(ctx context.Context, client Client, identifier anywherev1.NutanixResourceIdentifier) error { switch identifier.Type { case anywherev1.NutanixIdentifierName: if identifier.Name == nil || *identifier.Name == "" { return fmt.Errorf("missing project name") } projectName := *identifier.Name if _, err := findProjectUUIDByName(ctx, client, projectName); err != nil { return fmt.Errorf("failed to find project with name %q: %v", projectName, err) } case anywherev1.NutanixIdentifierUUID: if identifier.UUID == nil || *identifier.UUID == "" { return fmt.Errorf("missing project uuid") } projectUUID := *identifier.UUID if _, err := client.GetProject(ctx, projectUUID); err != nil { return fmt.Errorf("failed to find project with uuid %s: %v", projectUUID, err) } default: return fmt.Errorf("invalid project identifier type: %s; valid types are: %q and %q", identifier.Type, anywherev1.NutanixIdentifierName, anywherev1.NutanixIdentifierUUID) } return nil } func (v *Validator) validateAdditionalCategories(ctx context.Context, client Client, categories []anywherev1.NutanixCategoryIdentifier) error { for _, category := range categories { if category.Key == "" { return fmt.Errorf("missing category key") } if category.Value == "" { return fmt.Errorf("missing category value") } if _, err := client.GetCategoryKey(ctx, category.Key); err != nil { return fmt.Errorf("failed to find category with key %q: %v", category.Key, err) } if _, err := client.GetCategoryValue(ctx, category.Key, category.Value); err != nil { return fmt.Errorf("failed to find category value %q for category %q: %v", category.Value, category.Key, err) } } return nil } // findSubnetUUIDByName retrieves the subnet uuid by the given subnet name. func findSubnetUUIDByName(ctx context.Context, v3Client Client, subnetName string) (*string, error) { res, err := v3Client.ListSubnet(ctx, &v3.DSMetadata{ Filter: utils.StringPtr(fmt.Sprintf("name==%s", subnetName)), }) if err != nil || len(res.Entities) == 0 { return nil, fmt.Errorf("failed to find subnet by name %q: %v", subnetName, err) } if len(res.Entities) > 1 { return nil, fmt.Errorf("found more than one (%v) subnet with name %q", len(res.Entities), subnetName) } return res.Entities[0].Metadata.UUID, nil } // findClusterUUIDByName retrieves the cluster uuid by the given cluster name. func findClusterUUIDByName(ctx context.Context, v3Client Client, clusterName string) (*string, error) { res, err := v3Client.ListCluster(ctx, &v3.DSMetadata{ Filter: utils.StringPtr(fmt.Sprintf("name==%s", clusterName)), }) if err != nil { return nil, fmt.Errorf("failed to find cluster by name %q: %v", clusterName, err) } entities := make([]*v3.ClusterIntentResponse, 0) for _, entity := range res.Entities { if entity.Status != nil && entity.Status.Resources != nil && entity.Status.Resources.Config != nil { serviceList := entity.Status.Resources.Config.ServiceList isPrismCentral := false for _, svc := range serviceList { // Prism Central is also internally a cluster, but we filter that out here as we only care about prism element clusters if svc != nil && strings.ToUpper(*svc) == "PRISM_CENTRAL" { isPrismCentral = true } } if !isPrismCentral && *entity.Spec.Name == clusterName { entities = append(entities, entity) } } } if len(entities) == 0 { return nil, fmt.Errorf("failed to find cluster by name %q: %v", clusterName, err) } if len(entities) > 1 { return nil, fmt.Errorf("found more than one (%v) cluster with name %q", len(entities), clusterName) } return entities[0].Metadata.UUID, nil } // findImageUUIDByName retrieves the image uuid by the given image name. func findImageUUIDByName(ctx context.Context, v3Client Client, imageName string) (*string, error) { res, err := v3Client.ListImage(ctx, &v3.DSMetadata{ Filter: utils.StringPtr(fmt.Sprintf("name==%s", imageName)), }) if err != nil || len(res.Entities) == 0 { return nil, fmt.Errorf("failed to find image by name %q: %v", imageName, err) } if len(res.Entities) > 1 { return nil, fmt.Errorf("found more than one (%v) image with name %q", len(res.Entities), imageName) } return res.Entities[0].Metadata.UUID, nil } // findProjectUUIDByName retrieves the project uuid by the given image name. func findProjectUUIDByName(ctx context.Context, v3Client Client, projectName string) (*string, error) { res, err := v3Client.ListProject(ctx, &v3.DSMetadata{ Filter: utils.StringPtr(fmt.Sprintf("name==%s", projectName)), }) if err != nil || len(res.Entities) == 0 { return nil, fmt.Errorf("failed to find project by name %q: %v", projectName, err) } if len(res.Entities) > 1 { return nil, fmt.Errorf("found more than one (%v) project with name %q", len(res.Entities), projectName) } return res.Entities[0].Metadata.UUID, nil } func (v *Validator) validateUpgradeRolloutStrategy(clusterSpec *cluster.Spec) error { if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy != nil { return fmt.Errorf("Upgrade rollout strategy customization is not supported for nutanix provider") } for _, workerNodeGroupConfiguration := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations { if workerNodeGroupConfiguration.UpgradeRolloutStrategy != nil { return fmt.Errorf("Upgrade rollout strategy customization is not supported for nutanix provider") } } return nil }
435
eks-anywhere
aws
Go
package nutanix import ( "context" _ "embed" "encoding/json" "errors" "net/http" "testing" "github.com/golang/mock/gomock" "github.com/nutanix-cloud-native/prism-go-client/utils" v3 "github.com/nutanix-cloud-native/prism-go-client/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/api/resource" "sigs.k8s.io/yaml" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" mockCrypto "github.com/aws/eks-anywhere/pkg/crypto/mocks" mocknutanix "github.com/aws/eks-anywhere/pkg/providers/nutanix/mocks" "github.com/aws/eks-anywhere/pkg/utils/ptr" ) //go:embed testdata/datacenterConfig_with_trust_bundle.yaml var nutanixDatacenterConfigSpecWithTrustBundle string //go:embed testdata/datacenterConfig_with_invalid_port.yaml var nutanixDatacenterConfigSpecWithInvalidPort string //go:embed testdata/datacenterConfig_with_invalid_endpoint.yaml var nutanixDatacenterConfigSpecWithInvalidEndpoint string //go:embed testdata/datacenterConfig_with_insecure.yaml var nutanixDatacenterConfigSpecWithInsecure string //go:embed testdata/datacenterConfig_no_credentialRef.yaml var nutanixDatacenterConfigSpecWithNoCredentialRef string //go:embed testdata/datacenterConfig_invalid_credentialRef_kind.yaml var nutanixDatacenterConfigSpecWithInvalidCredentialRefKind string //go:embed testdata/datacenterConfig_empty_credentialRef_name.yaml var nutanixDatacenterConfigSpecWithEmptyCredentialRefName string func fakeClusterList() *v3.ClusterListIntentResponse { return &v3.ClusterListIntentResponse{ Entities: []*v3.ClusterIntentResponse{ { Metadata: &v3.Metadata{ UUID: utils.StringPtr("a15f6966-bfc7-4d1e-8575-224096fc1cdb"), }, Spec: &v3.Cluster{ Name: utils.StringPtr("prism-cluster"), }, Status: &v3.ClusterDefStatus{ Resources: &v3.ClusterObj{ Config: &v3.ClusterConfig{ ServiceList: []*string{utils.StringPtr("AOS")}, }, }, }, }, }, } } func fakeSubnetList() *v3.SubnetListIntentResponse { return &v3.SubnetListIntentResponse{ Entities: []*v3.SubnetIntentResponse{ { Metadata: &v3.Metadata{ UUID: utils.StringPtr("b15f6966-bfc7-4d1e-8575-224096fc1cdb"), }, Spec: &v3.Subnet{ Name: utils.StringPtr("prism-subnet"), }, }, }, } } func fakeImageList() *v3.ImageListIntentResponse { return &v3.ImageListIntentResponse{ Entities: []*v3.ImageIntentResponse{ { Metadata: &v3.Metadata{ UUID: utils.StringPtr("c15f6966-bfc7-4d1e-8575-224096fc1cdb"), }, Spec: &v3.Image{ Name: utils.StringPtr("prism-image"), }, }, }, } } func fakeProjectList() *v3.ProjectListResponse { return &v3.ProjectListResponse{ Entities: []*v3.Project{ { Metadata: &v3.Metadata{ UUID: utils.StringPtr("5c9a0641-1025-40ed-9e1d-0d0a23043e57"), }, Spec: &v3.ProjectSpec{ Name: "prism-image", }, }, }, } } func TestNutanixValidatorValidateMachineConfig(t *testing.T) { ctrl := gomock.NewController(t) tests := []struct { name string setup func(*anywherev1.NutanixMachineConfig, *mocknutanix.MockClient, *mockCrypto.MockTlsValidator, *mocknutanix.MockRoundTripper) *Validator expectedError string }{ { name: "invalid vcpu sockets", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { machineConf.Spec.VCPUSockets = 0 clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "vCPU sockets 0 must be greater than or equal to 1", }, { name: "invalid vcpus per socket", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { machineConf.Spec.VCPUsPerSocket = 0 clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "vCPUs per socket 0 must be greater than or equal to 1", }, { name: "memory size less than min required", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { machineConf.Spec.MemorySize = resource.MustParse("100Mi") clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "MemorySize must be greater than or equal to 2048Mi", }, { name: "invalid system size", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { machineConf.Spec.SystemDiskSize = resource.MustParse("100Mi") clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "SystemDiskSize must be greater than or equal to 20Gi", }, { name: "empty cluster name", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { machineConf.Spec.Cluster.Name = nil clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "missing cluster name", }, { name: "empty cluster uuid", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { machineConf.Spec.Cluster.Type = anywherev1.NutanixIdentifierUUID machineConf.Spec.Cluster.UUID = nil clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "missing cluster uuid", }, { name: "invalid cluster identifier type", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { machineConf.Spec.Cluster.Type = "notanidentifier" clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "invalid cluster identifier type", }, { name: "list cluster request failed", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(nil, errors.New("cluster not found")) clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "failed to find cluster by name", }, { name: "list cluster request did not find match", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(&v3.ClusterListIntentResponse{}, nil) clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "failed to find cluster by name", }, { name: "duplicate clusters found", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { clusters := fakeClusterList() clusters.Entities = append(clusters.Entities, clusters.Entities[0]) mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(clusters, nil) clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "found more than one (2) cluster with name", }, { name: "empty subnet name", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(fakeClusterList(), nil) machineConf.Spec.Subnet.Name = nil clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "missing subnet name", }, { name: "empty subnet uuid", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(fakeClusterList(), nil) machineConf.Spec.Subnet.Type = anywherev1.NutanixIdentifierUUID machineConf.Spec.Subnet.UUID = nil clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "missing subnet uuid", }, { name: "invalid subnet identifier type", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(fakeClusterList(), nil) machineConf.Spec.Subnet.Type = "notanidentifier" clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "invalid subnet identifier type", }, { name: "list subnet request failed", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(fakeClusterList(), nil) mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).Return(nil, errors.New("subnet not found")) clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "failed to find subnet by name", }, { name: "list subnet request did not find match", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(fakeClusterList(), nil) mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).Return(&v3.SubnetListIntentResponse{}, nil) clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "failed to find subnet by name", }, { name: "duplicate subnets found", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(fakeClusterList(), nil) subnets := fakeSubnetList() subnets.Entities = append(subnets.Entities, subnets.Entities[0]) mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).Return(subnets, nil) clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "found more than one (2) subnet with name", }, { name: "empty image name", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(fakeClusterList(), nil) mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).Return(fakeSubnetList(), nil) machineConf.Spec.Image.Name = nil clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "missing image name", }, { name: "empty image uuid", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(fakeClusterList(), nil) mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).Return(fakeSubnetList(), nil) machineConf.Spec.Image.Type = anywherev1.NutanixIdentifierUUID machineConf.Spec.Image.UUID = nil clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "missing image uuid", }, { name: "invalid image identifier type", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(fakeClusterList(), nil) mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).Return(fakeSubnetList(), nil) machineConf.Spec.Image.Type = "notanidentifier" clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "invalid image identifier type", }, { name: "list image request failed", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(fakeClusterList(), nil) mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).Return(fakeSubnetList(), nil) mockClient.EXPECT().ListImage(gomock.Any(), gomock.Any()).Return(nil, errors.New("image not found")) clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "failed to find image by name", }, { name: "list image request did not find match", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(fakeClusterList(), nil) mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).Return(fakeSubnetList(), nil) mockClient.EXPECT().ListImage(gomock.Any(), gomock.Any()).Return(&v3.ImageListIntentResponse{}, nil) clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "failed to find image by name", }, { name: "duplicate image found", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(fakeClusterList(), nil) mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).Return(fakeSubnetList(), nil) images := fakeImageList() images.Entities = append(images.Entities, images.Entities[0]) mockClient.EXPECT().ListImage(gomock.Any(), gomock.Any()).Return(images, nil) clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "found more than one (2) image with name", }, { name: "filters out prism central clusters", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { clusters := fakeClusterList() tmp, err := json.Marshal(clusters.Entities[0]) assert.NoError(t, err) var cluster v3.ClusterIntentResponse err = json.Unmarshal(tmp, &cluster) assert.NoError(t, err) cluster.Status.Resources.Config.ServiceList = []*string{utils.StringPtr("PRISM_CENTRAL")} clusters.Entities = append(clusters.Entities, &cluster) mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(clusters, nil) mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).Return(fakeSubnetList(), nil) mockClient.EXPECT().ListImage(gomock.Any(), gomock.Any()).Return(fakeImageList(), nil) clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "", }, { name: "empty project name", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(fakeClusterList(), nil) mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).Return(fakeSubnetList(), nil) mockClient.EXPECT().ListImage(gomock.Any(), gomock.Any()).Return(fakeImageList(), nil) machineConf.Spec.Project = &anywherev1.NutanixResourceIdentifier{ Type: anywherev1.NutanixIdentifierName, Name: nil, } clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "missing project name", }, { name: "empty project uuid", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(fakeClusterList(), nil) mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).Return(fakeSubnetList(), nil) mockClient.EXPECT().ListImage(gomock.Any(), gomock.Any()).Return(fakeImageList(), nil) machineConf.Spec.Project = &anywherev1.NutanixResourceIdentifier{ Type: anywherev1.NutanixIdentifierUUID, UUID: nil, } clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "missing project uuid", }, { name: "invalid project identifier type", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(fakeClusterList(), nil) mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).Return(fakeSubnetList(), nil) mockClient.EXPECT().ListImage(gomock.Any(), gomock.Any()).Return(fakeImageList(), nil) machineConf.Spec.Project = &anywherev1.NutanixResourceIdentifier{ Type: "notatype", UUID: nil, } clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "invalid project identifier type", }, { name: "list project request failed", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(fakeClusterList(), nil) mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).Return(fakeSubnetList(), nil) mockClient.EXPECT().ListImage(gomock.Any(), gomock.Any()).Return(fakeImageList(), nil) mockClient.EXPECT().ListProject(gomock.Any(), gomock.Any()).Return(nil, errors.New("project not found")) machineConf.Spec.Project = &anywherev1.NutanixResourceIdentifier{ Type: anywherev1.NutanixIdentifierName, Name: ptr.String("notaproject"), } clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "failed to find project by name", }, { name: "list project request did not find match", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(fakeClusterList(), nil) mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).Return(fakeSubnetList(), nil) mockClient.EXPECT().ListImage(gomock.Any(), gomock.Any()).Return(fakeImageList(), nil) mockClient.EXPECT().ListProject(gomock.Any(), gomock.Any()).Return(&v3.ProjectListResponse{}, nil) machineConf.Spec.Project = &anywherev1.NutanixResourceIdentifier{ Type: anywherev1.NutanixIdentifierName, Name: ptr.String("notaproject"), } clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "failed to find project by name", }, { name: "duplicate project found", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(fakeClusterList(), nil) mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).Return(fakeSubnetList(), nil) mockClient.EXPECT().ListImage(gomock.Any(), gomock.Any()).Return(fakeImageList(), nil) projects := fakeProjectList() projects.Entities = append(projects.Entities, projects.Entities[0]) mockClient.EXPECT().ListProject(gomock.Any(), gomock.Any()).Return(projects, nil) machineConf.Spec.Project = &anywherev1.NutanixResourceIdentifier{ Type: anywherev1.NutanixIdentifierName, Name: ptr.String("project"), } clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "found more than one (2) project with name", }, { name: "empty category key", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(fakeClusterList(), nil) mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).Return(fakeSubnetList(), nil) mockClient.EXPECT().ListImage(gomock.Any(), gomock.Any()).Return(fakeImageList(), nil) machineConf.Spec.AdditionalCategories = []anywherev1.NutanixCategoryIdentifier{ { Key: "", Value: "", }, } clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "missing category key", }, { name: "empty category value", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(fakeClusterList(), nil) mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).Return(fakeSubnetList(), nil) mockClient.EXPECT().ListImage(gomock.Any(), gomock.Any()).Return(fakeImageList(), nil) machineConf.Spec.AdditionalCategories = []anywherev1.NutanixCategoryIdentifier{ { Key: "key", Value: "", }, } clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "missing category value", }, { name: "get category key failed", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(fakeClusterList(), nil) mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).Return(fakeSubnetList(), nil) mockClient.EXPECT().ListImage(gomock.Any(), gomock.Any()).Return(fakeImageList(), nil) mockClient.EXPECT().GetCategoryKey(gomock.Any(), gomock.Any()).Return(nil, errors.New("category key not found")) machineConf.Spec.AdditionalCategories = []anywherev1.NutanixCategoryIdentifier{ { Key: "nonexistent", Value: "value", }, } clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "failed to find category with key", }, { name: "get category value failed", setup: func(machineConf *anywherev1.NutanixMachineConfig, mockClient *mocknutanix.MockClient, validator *mockCrypto.MockTlsValidator, transport *mocknutanix.MockRoundTripper) *Validator { mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).Return(fakeClusterList(), nil) mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).Return(fakeSubnetList(), nil) mockClient.EXPECT().ListImage(gomock.Any(), gomock.Any()).Return(fakeImageList(), nil) categoryKey := v3.CategoryKeyStatus{ Name: ptr.String("key"), } mockClient.EXPECT().GetCategoryKey(gomock.Any(), gomock.Any()).Return(&categoryKey, nil) mockClient.EXPECT().GetCategoryValue(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("category value not found")) machineConf.Spec.AdditionalCategories = []anywherev1.NutanixCategoryIdentifier{ { Key: "key", Value: "nonexistent", }, } clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} return NewValidator(clientCache, validator, &http.Client{Transport: transport}) }, expectedError: "failed to find category value", }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { machineConfig := &anywherev1.NutanixMachineConfig{} err := yaml.Unmarshal([]byte(nutanixMachineConfigSpec), machineConfig) require.NoError(t, err) mockClient := mocknutanix.NewMockClient(ctrl) validator := tc.setup(machineConfig, mockClient, mockCrypto.NewMockTlsValidator(ctrl), mocknutanix.NewMockRoundTripper(ctrl)) err = validator.ValidateMachineConfig(context.Background(), mockClient, machineConfig) if tc.expectedError != "" { assert.Contains(t, err.Error(), tc.expectedError) } else { assert.NoError(t, err) } }) } } func TestNutanixValidatorValidateDatacenterConfig(t *testing.T) { tests := []struct { name string dcConfFile string expectErr bool }{ { name: "valid datacenter config without trust bundle", dcConfFile: nutanixDatacenterConfigSpec, }, { name: "valid datacenter config with trust bundle", dcConfFile: nutanixDatacenterConfigSpecWithTrustBundle, }, { name: "valid datacenter config with insecure", dcConfFile: nutanixDatacenterConfigSpecWithInsecure, }, { name: "valid datacenter config with invalid port", dcConfFile: nutanixDatacenterConfigSpecWithInvalidPort, expectErr: true, }, { name: "valid datacenter config with invalid endpoint", dcConfFile: nutanixDatacenterConfigSpecWithInvalidEndpoint, expectErr: true, }, { name: "nil credentialRef", dcConfFile: nutanixDatacenterConfigSpecWithNoCredentialRef, expectErr: true, }, { name: "invalid credentialRef kind", dcConfFile: nutanixDatacenterConfigSpecWithInvalidCredentialRefKind, expectErr: true, }, { name: "empty credentialRef name", dcConfFile: nutanixDatacenterConfigSpecWithEmptyCredentialRefName, expectErr: true, }, } ctrl := gomock.NewController(t) mockClient := mocknutanix.NewMockClient(ctrl) mockClient.EXPECT().GetCurrentLoggedInUser(gomock.Any()).Return(&v3.UserIntentResponse{}, nil).AnyTimes() mockTLSValidator := mockCrypto.NewMockTlsValidator(ctrl) mockTLSValidator.EXPECT().ValidateCert(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() mockTransport := mocknutanix.NewMockRoundTripper(ctrl) mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes() mockHTTPClient := &http.Client{Transport: mockTransport} clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} validator := NewValidator(clientCache, mockTLSValidator, mockHTTPClient) require.NotNil(t, validator) for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { dcConf := &anywherev1.NutanixDatacenterConfig{} err := yaml.Unmarshal([]byte(tc.dcConfFile), dcConf) require.NoError(t, err) err = validator.ValidateDatacenterConfig(context.Background(), clientCache.clients["test"], dcConf) if tc.expectErr { assert.Error(t, err, tc.name) } else { assert.NoError(t, err, tc.name) } }) } } func TestNutanixValidatorValidateDatacenterConfigWithInvalidCreds(t *testing.T) { tests := []struct { name string dcConfFile string expectErr bool }{ { name: "valid datacenter config without trust bundle", dcConfFile: nutanixDatacenterConfigSpec, expectErr: true, }, } ctrl := gomock.NewController(t) mockClient := mocknutanix.NewMockClient(ctrl) mockClient.EXPECT().GetCurrentLoggedInUser(gomock.Any()).Return(&v3.UserIntentResponse{}, errors.New("GetCurrentLoggedInUser returned error")).AnyTimes() mockTLSValidator := mockCrypto.NewMockTlsValidator(ctrl) mockTLSValidator.EXPECT().ValidateCert(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() mockTransport := mocknutanix.NewMockRoundTripper(ctrl) mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes() mockHTTPClient := &http.Client{Transport: mockTransport} clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} validator := NewValidator(clientCache, mockTLSValidator, mockHTTPClient) require.NotNil(t, validator) for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { dcConf := &anywherev1.NutanixDatacenterConfig{} err := yaml.Unmarshal([]byte(tc.dcConfFile), dcConf) require.NoError(t, err) err = validator.ValidateDatacenterConfig(context.Background(), clientCache.clients["test"], dcConf) if tc.expectErr { assert.Error(t, err, tc.name) } else { assert.NoError(t, err, tc.name) } }) } }
673
eks-anywhere
aws
Go
package nutanix import ( "context" "fmt" "time" "github.com/go-logr/logr" "github.com/nutanix-cloud-native/cluster-api-provider-nutanix/api/v1beta1" "k8s.io/apimachinery/pkg/api/equality" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/clusterapi" capiyaml "github.com/aws/eks-anywhere/pkg/clusterapi/yaml" "github.com/aws/eks-anywhere/pkg/providers/common" "github.com/aws/eks-anywhere/pkg/yamlutil" ) type ( // Workers represents the nutanix specific CAPI spec for worker nodes. Workers = clusterapi.Workers[*v1beta1.NutanixMachineTemplate] workersBuilder = capiyaml.WorkersBuilder[*v1beta1.NutanixMachineTemplate] ) // WorkersSpec generates a nutanix specific CAPI spec for an eks-a cluster worker nodes. // It talks to the cluster with a client to detect changes in immutable objects and generates new // names for them. func WorkersSpec(ctx context.Context, logger logr.Logger, client kubernetes.Client, spec *cluster.Spec) (*Workers, error) { ndcs := spec.NutanixDatacenter.Spec machineConfigs := spec.NutanixMachineConfigs wnmcs := make(map[string]v1alpha1.NutanixMachineConfigSpec, len(spec.Cluster.Spec.WorkerNodeGroupConfigurations)) for _, machineConfig := range machineConfigs { machineConfig.SetDefaults() } creds := GetCredsFromEnv() templateBuilder := NewNutanixTemplateBuilder(&ndcs, nil, nil, wnmcs, creds, time.Now) workloadTemplateNames, kubeadmconfigTemplateNames := getTemplateNames(spec, templateBuilder, machineConfigs) workersYaml, err := templateBuilder.GenerateCAPISpecWorkers(spec, workloadTemplateNames, kubeadmconfigTemplateNames) if err != nil { return nil, err } workers, err := parseWorkersYaml(logger, workersYaml) if err != nil { return nil, fmt.Errorf("parsing nutanix CAPI workers yaml: %w", err) } if err = workers.UpdateImmutableObjectNames(ctx, client, getMachineTemplate, machineTemplateEquals); err != nil { return nil, fmt.Errorf("updating nutanix worker immutable object names: %w", err) } return workers, nil } func getTemplateNames(spec *cluster.Spec, templateBuilder *TemplateBuilder, machineConfigs map[string]*v1alpha1.NutanixMachineConfig) (map[string]string, map[string]string) { workloadTemplateNames := make(map[string]string, len(spec.Cluster.Spec.WorkerNodeGroupConfigurations)) kubeadmconfigTemplateNames := make(map[string]string, len(spec.Cluster.Spec.WorkerNodeGroupConfigurations)) for _, workerNodeGroupConfiguration := range spec.Cluster.Spec.WorkerNodeGroupConfigurations { workloadTemplateNames[workerNodeGroupConfiguration.Name] = common.WorkerMachineTemplateName(spec.Cluster.Name, workerNodeGroupConfiguration.Name, templateBuilder.now) kubeadmconfigTemplateNames[workerNodeGroupConfiguration.Name] = common.KubeadmConfigTemplateName(spec.Cluster.Name, workerNodeGroupConfiguration.Name, templateBuilder.now) templateBuilder.workerNodeGroupMachineSpecs[workerNodeGroupConfiguration.MachineGroupRef.Name] = machineConfigs[workerNodeGroupConfiguration.MachineGroupRef.Name].Spec } return workloadTemplateNames, kubeadmconfigTemplateNames } func parseWorkersYaml(logger logr.Logger, workersYaml []byte) (*Workers, error) { parser, builder, err := newWorkersParserAndBuilder(logger) if err != nil { return nil, err } if err = parser.Parse(workersYaml, builder); err != nil { return nil, fmt.Errorf("parsing nutanix CAPI workers yaml: %w", err) } return builder.Workers, nil } func newWorkersParserAndBuilder(logger logr.Logger) (*yamlutil.Parser, *workersBuilder, error) { parser, builder, err := capiyaml.NewWorkersParserAndBuilder( logger, machineTemplateMapping(), ) if err != nil { return nil, nil, fmt.Errorf("building nutanix workers parser and builder: %w", err) } return parser, builder, nil } func machineTemplateMapping() yamlutil.Mapping[*v1beta1.NutanixMachineTemplate] { return yamlutil.NewMapping( "NutanixMachineTemplate", func() *v1beta1.NutanixMachineTemplate { return &v1beta1.NutanixMachineTemplate{} }, ) } func getMachineTemplate(ctx context.Context, client kubernetes.Client, name, namespace string) (*v1beta1.NutanixMachineTemplate, error) { m := &v1beta1.NutanixMachineTemplate{} if err := client.Get(ctx, name, namespace, m); err != nil { return nil, fmt.Errorf("reading nutanixMachineTemplate: %w", err) } return m, nil } func machineTemplateEquals(new, old *v1beta1.NutanixMachineTemplate) bool { return equality.Semantic.DeepDerivative(new.Spec, old.Spec) }
119
eks-anywhere
aws
Go
package nutanix import ( "context" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/constants" ) func TestWorkersSpec(t *testing.T) { t.Setenv(constants.EksaNutanixUsernameKey, "admin") t.Setenv(constants.EksaNutanixPasswordKey, "password") logger := test.NewNullLogger() client := test.NewFakeKubeClient() spec := test.NewFullClusterSpec(t, "testdata/eksa-cluster-multiple-worker-md.yaml") workers, err := WorkersSpec(context.TODO(), logger, client, spec) require.NoError(t, err) assert.Len(t, workers.Groups, 2) }
25
eks-anywhere
aws
Go
// Code generated by MockGen. DO NOT EDIT. // Source: pkg/providers/nutanix/client.go // Package mocks is a generated GoMock package. package mocks import ( context "context" reflect "reflect" gomock "github.com/golang/mock/gomock" v3 "github.com/nutanix-cloud-native/prism-go-client/v3" ) // MockClient is a mock of Client interface. type MockClient struct { ctrl *gomock.Controller recorder *MockClientMockRecorder } // MockClientMockRecorder is the mock recorder for MockClient. type MockClientMockRecorder struct { mock *MockClient } // NewMockClient creates a new mock instance. func NewMockClient(ctrl *gomock.Controller) *MockClient { mock := &MockClient{ctrl: ctrl} mock.recorder = &MockClientMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockClient) EXPECT() *MockClientMockRecorder { return m.recorder } // GetCategoryKey mocks base method. func (m *MockClient) GetCategoryKey(ctx context.Context, name string) (*v3.CategoryKeyStatus, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetCategoryKey", ctx, name) ret0, _ := ret[0].(*v3.CategoryKeyStatus) ret1, _ := ret[1].(error) return ret0, ret1 } // GetCategoryKey indicates an expected call of GetCategoryKey. func (mr *MockClientMockRecorder) GetCategoryKey(ctx, name interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCategoryKey", reflect.TypeOf((*MockClient)(nil).GetCategoryKey), ctx, name) } // GetCategoryQuery mocks base method. func (m *MockClient) GetCategoryQuery(ctx context.Context, query *v3.CategoryQueryInput) (*v3.CategoryQueryResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetCategoryQuery", ctx, query) ret0, _ := ret[0].(*v3.CategoryQueryResponse) ret1, _ := ret[1].(error) return ret0, ret1 } // GetCategoryQuery indicates an expected call of GetCategoryQuery. func (mr *MockClientMockRecorder) GetCategoryQuery(ctx, query interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCategoryQuery", reflect.TypeOf((*MockClient)(nil).GetCategoryQuery), ctx, query) } // GetCategoryValue mocks base method. func (m *MockClient) GetCategoryValue(ctx context.Context, name, value string) (*v3.CategoryValueStatus, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetCategoryValue", ctx, name, value) ret0, _ := ret[0].(*v3.CategoryValueStatus) ret1, _ := ret[1].(error) return ret0, ret1 } // GetCategoryValue indicates an expected call of GetCategoryValue. func (mr *MockClientMockRecorder) GetCategoryValue(ctx, name, value interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCategoryValue", reflect.TypeOf((*MockClient)(nil).GetCategoryValue), ctx, name, value) } // GetCluster mocks base method. func (m *MockClient) GetCluster(ctx context.Context, uuid string) (*v3.ClusterIntentResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetCluster", ctx, uuid) ret0, _ := ret[0].(*v3.ClusterIntentResponse) ret1, _ := ret[1].(error) return ret0, ret1 } // GetCluster indicates an expected call of GetCluster. func (mr *MockClientMockRecorder) GetCluster(ctx, uuid interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCluster", reflect.TypeOf((*MockClient)(nil).GetCluster), ctx, uuid) } // GetCurrentLoggedInUser mocks base method. func (m *MockClient) GetCurrentLoggedInUser(ctx context.Context) (*v3.UserIntentResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetCurrentLoggedInUser", ctx) ret0, _ := ret[0].(*v3.UserIntentResponse) ret1, _ := ret[1].(error) return ret0, ret1 } // GetCurrentLoggedInUser indicates an expected call of GetCurrentLoggedInUser. func (mr *MockClientMockRecorder) GetCurrentLoggedInUser(ctx interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentLoggedInUser", reflect.TypeOf((*MockClient)(nil).GetCurrentLoggedInUser), ctx) } // GetImage mocks base method. func (m *MockClient) GetImage(ctx context.Context, uuid string) (*v3.ImageIntentResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetImage", ctx, uuid) ret0, _ := ret[0].(*v3.ImageIntentResponse) ret1, _ := ret[1].(error) return ret0, ret1 } // GetImage indicates an expected call of GetImage. func (mr *MockClientMockRecorder) GetImage(ctx, uuid interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImage", reflect.TypeOf((*MockClient)(nil).GetImage), ctx, uuid) } // GetProject mocks base method. func (m *MockClient) GetProject(ctx context.Context, uuid string) (*v3.Project, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetProject", ctx, uuid) ret0, _ := ret[0].(*v3.Project) ret1, _ := ret[1].(error) return ret0, ret1 } // GetProject indicates an expected call of GetProject. func (mr *MockClientMockRecorder) GetProject(ctx, uuid interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProject", reflect.TypeOf((*MockClient)(nil).GetProject), ctx, uuid) } // GetSubnet mocks base method. func (m *MockClient) GetSubnet(ctx context.Context, uuid string) (*v3.SubnetIntentResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetSubnet", ctx, uuid) ret0, _ := ret[0].(*v3.SubnetIntentResponse) ret1, _ := ret[1].(error) return ret0, ret1 } // GetSubnet indicates an expected call of GetSubnet. func (mr *MockClientMockRecorder) GetSubnet(ctx, uuid interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnet", reflect.TypeOf((*MockClient)(nil).GetSubnet), ctx, uuid) } // ListCategories mocks base method. func (m *MockClient) ListCategories(ctx context.Context, getEntitiesRequest *v3.CategoryListMetadata) (*v3.CategoryKeyListResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListCategories", ctx, getEntitiesRequest) ret0, _ := ret[0].(*v3.CategoryKeyListResponse) ret1, _ := ret[1].(error) return ret0, ret1 } // ListCategories indicates an expected call of ListCategories. func (mr *MockClientMockRecorder) ListCategories(ctx, getEntitiesRequest interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListCategories", reflect.TypeOf((*MockClient)(nil).ListCategories), ctx, getEntitiesRequest) } // ListCategoryValues mocks base method. func (m *MockClient) ListCategoryValues(ctx context.Context, name string, getEntitiesRequest *v3.CategoryListMetadata) (*v3.CategoryValueListResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListCategoryValues", ctx, name, getEntitiesRequest) ret0, _ := ret[0].(*v3.CategoryValueListResponse) ret1, _ := ret[1].(error) return ret0, ret1 } // ListCategoryValues indicates an expected call of ListCategoryValues. func (mr *MockClientMockRecorder) ListCategoryValues(ctx, name, getEntitiesRequest interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListCategoryValues", reflect.TypeOf((*MockClient)(nil).ListCategoryValues), ctx, name, getEntitiesRequest) } // ListCluster mocks base method. func (m *MockClient) ListCluster(ctx context.Context, getEntitiesRequest *v3.DSMetadata) (*v3.ClusterListIntentResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListCluster", ctx, getEntitiesRequest) ret0, _ := ret[0].(*v3.ClusterListIntentResponse) ret1, _ := ret[1].(error) return ret0, ret1 } // ListCluster indicates an expected call of ListCluster. func (mr *MockClientMockRecorder) ListCluster(ctx, getEntitiesRequest interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListCluster", reflect.TypeOf((*MockClient)(nil).ListCluster), ctx, getEntitiesRequest) } // ListImage mocks base method. func (m *MockClient) ListImage(ctx context.Context, getEntitiesRequest *v3.DSMetadata) (*v3.ImageListIntentResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListImage", ctx, getEntitiesRequest) ret0, _ := ret[0].(*v3.ImageListIntentResponse) ret1, _ := ret[1].(error) return ret0, ret1 } // ListImage indicates an expected call of ListImage. func (mr *MockClientMockRecorder) ListImage(ctx, getEntitiesRequest interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListImage", reflect.TypeOf((*MockClient)(nil).ListImage), ctx, getEntitiesRequest) } // ListProject mocks base method. func (m *MockClient) ListProject(ctx context.Context, getEntitiesRequest *v3.DSMetadata) (*v3.ProjectListResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListProject", ctx, getEntitiesRequest) ret0, _ := ret[0].(*v3.ProjectListResponse) ret1, _ := ret[1].(error) return ret0, ret1 } // ListProject indicates an expected call of ListProject. func (mr *MockClientMockRecorder) ListProject(ctx, getEntitiesRequest interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListProject", reflect.TypeOf((*MockClient)(nil).ListProject), ctx, getEntitiesRequest) } // ListSubnet mocks base method. func (m *MockClient) ListSubnet(ctx context.Context, getEntitiesRequest *v3.DSMetadata) (*v3.SubnetListIntentResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListSubnet", ctx, getEntitiesRequest) ret0, _ := ret[0].(*v3.SubnetListIntentResponse) ret1, _ := ret[1].(error) return ret0, ret1 } // ListSubnet indicates an expected call of ListSubnet. func (mr *MockClientMockRecorder) ListSubnet(ctx, getEntitiesRequest interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSubnet", reflect.TypeOf((*MockClient)(nil).ListSubnet), ctx, getEntitiesRequest) }
247
eks-anywhere
aws
Go
// Code generated by MockGen. DO NOT EDIT. // Source: net/http (interfaces: RoundTripper) // Package mocks is a generated GoMock package. package mocks import ( http "net/http" reflect "reflect" gomock "github.com/golang/mock/gomock" ) // MockRoundTripper is a mock of RoundTripper interface. type MockRoundTripper struct { ctrl *gomock.Controller recorder *MockRoundTripperMockRecorder } // MockRoundTripperMockRecorder is the mock recorder for MockRoundTripper. type MockRoundTripperMockRecorder struct { mock *MockRoundTripper } // NewMockRoundTripper creates a new mock instance. func NewMockRoundTripper(ctrl *gomock.Controller) *MockRoundTripper { mock := &MockRoundTripper{ctrl: ctrl} mock.recorder = &MockRoundTripperMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockRoundTripper) EXPECT() *MockRoundTripperMockRecorder { return m.recorder } // RoundTrip mocks base method. func (m *MockRoundTripper) RoundTrip(arg0 *http.Request) (*http.Response, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RoundTrip", arg0) ret0, _ := ret[0].(*http.Response) ret1, _ := ret[1].(error) return ret0, ret1 } // RoundTrip indicates an expected call of RoundTrip. func (mr *MockRoundTripperMockRecorder) RoundTrip(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RoundTrip", reflect.TypeOf((*MockRoundTripper)(nil).RoundTrip), arg0) }
51
eks-anywhere
aws
Go
// Code generated by MockGen. DO NOT EDIT. // Source: pkg/providers/nutanix/validator.go // Package mocks is a generated GoMock package. package mocks import ( reflect "reflect" v1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" gomock "github.com/golang/mock/gomock" ) // MockIPValidator is a mock of IPValidator interface. type MockIPValidator struct { ctrl *gomock.Controller recorder *MockIPValidatorMockRecorder } // MockIPValidatorMockRecorder is the mock recorder for MockIPValidator. type MockIPValidatorMockRecorder struct { mock *MockIPValidator } // NewMockIPValidator creates a new mock instance. func NewMockIPValidator(ctrl *gomock.Controller) *MockIPValidator { mock := &MockIPValidator{ctrl: ctrl} mock.recorder = &MockIPValidatorMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockIPValidator) EXPECT() *MockIPValidatorMockRecorder { return m.recorder } // ValidateControlPlaneIPUniqueness mocks base method. func (m *MockIPValidator) ValidateControlPlaneIPUniqueness(cluster *v1alpha1.Cluster) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateControlPlaneIPUniqueness", cluster) ret0, _ := ret[0].(error) return ret0 } // ValidateControlPlaneIPUniqueness indicates an expected call of ValidateControlPlaneIPUniqueness. func (mr *MockIPValidatorMockRecorder) ValidateControlPlaneIPUniqueness(cluster interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateControlPlaneIPUniqueness", reflect.TypeOf((*MockIPValidator)(nil).ValidateControlPlaneIPUniqueness), cluster) }
50
eks-anywhere
aws
Go
package reconciler import ( "context" "fmt" "reflect" "github.com/go-logr/logr" "github.com/nutanix-cloud-native/prism-go-client/environment/credentials" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/controller" "github.com/aws/eks-anywhere/pkg/controller/clientutil" "github.com/aws/eks-anywhere/pkg/controller/clusters" "github.com/aws/eks-anywhere/pkg/controller/serverside" "github.com/aws/eks-anywhere/pkg/providers/nutanix" ) // CNIReconciler is an interface for reconciling CNI in the Tinkerbell cluster reconciler. type CNIReconciler interface { Reconcile(ctx context.Context, logger logr.Logger, client client.Client, spec *cluster.Spec) (controller.Result, error) } // RemoteClientRegistry is an interface that defines methods for remote clients. type RemoteClientRegistry interface { GetClient(ctx context.Context, cluster client.ObjectKey) (client.Client, error) } // IPValidator is an interface that defines methods to validate the control plane IP. type IPValidator interface { ValidateControlPlaneIP(ctx context.Context, log logr.Logger, spec *cluster.Spec) (controller.Result, error) } // Reconciler reconciles a Nutanix cluster. type Reconciler struct { client client.Client validator *nutanix.Validator cniReconciler CNIReconciler remoteClientRegistry RemoteClientRegistry ipValidator IPValidator *serverside.ObjectApplier } // New defines a new Nutanix reconciler. func New(client client.Client, validator *nutanix.Validator, cniReconciler CNIReconciler, registry RemoteClientRegistry, ipValidator IPValidator) *Reconciler { return &Reconciler{ client: client, validator: validator, cniReconciler: cniReconciler, remoteClientRegistry: registry, ipValidator: ipValidator, ObjectApplier: serverside.NewObjectApplier(client), } } func getSecret(ctx context.Context, kubectl client.Client, secretName, secretNS string) (*apiv1.Secret, error) { secret := &apiv1.Secret{} secretKey := client.ObjectKey{ Namespace: secretNS, Name: secretName, } if err := kubectl.Get(ctx, secretKey, secret); err != nil { return nil, err } return secret, nil } // GetNutanixCredsFromSecret returns the Nutanix credentials from a secret. func GetNutanixCredsFromSecret(ctx context.Context, kubectl client.Client, secretName, secretNS string) (credentials.BasicAuthCredential, error) { secret, err := getSecret(ctx, kubectl, secretName, secretNS) if err != nil { return credentials.BasicAuthCredential{}, fmt.Errorf("failed getting nutanix credentials secret: %v", err) } creds, err := credentials.ParseCredentials(secret.Data["credentials"]) if err != nil { return credentials.BasicAuthCredential{}, fmt.Errorf("failed parsing nutanix credentials: %v", err) } return credentials.BasicAuthCredential{PrismCentral: credentials.PrismCentralBasicAuth{ BasicAuth: credentials.BasicAuth{ Username: creds.Username, Password: creds.Password, }, }}, nil } func (r *Reconciler) reconcileClusterSecret(ctx context.Context, log logr.Logger, c *cluster.Spec) (controller.Result, error) { eksaSecret := &apiv1.Secret{} eksaSecretKey := client.ObjectKey{ Namespace: constants.EksaSystemNamespace, Name: nutanix.EKSASecretName(c), } if err := r.client.Get(ctx, eksaSecretKey, eksaSecret); err != nil { log.Error(err, "Failed to get EKS-A secret %s/%s", constants.EksaSystemNamespace, c.NutanixDatacenter.Spec.CredentialRef.Name) return controller.Result{}, err } capxSecret := &apiv1.Secret{} capxSecretKey := client.ObjectKey{ Namespace: constants.EksaSystemNamespace, Name: nutanix.CAPXSecretName(c), } if err := r.client.Get(ctx, capxSecretKey, capxSecret); err == nil { if reflect.DeepEqual(eksaSecret.Data, capxSecret.Data) { return controller.Result{}, nil } capxSecret.Data = eksaSecret.Data if err := r.client.Update(ctx, capxSecret); err != nil { log.Error(err, "Failed to update CAPX secret %s/%s", constants.EksaSystemNamespace, c.Cluster.Name) return controller.Result{}, err } return controller.Result{}, nil } capxSecret = &apiv1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: constants.EksaSystemNamespace, Name: nutanix.CAPXSecretName(c), }, Data: eksaSecret.Data, } if err := r.client.Create(ctx, capxSecret); err != nil { log.Error(err, "Failed to create CAPX secret %s/%s", constants.EksaSystemNamespace, c.Cluster.Name) return controller.Result{}, err } return controller.Result{}, nil } // Reconcile reconciles the cluster to the desired state. func (r *Reconciler) Reconcile(ctx context.Context, log logr.Logger, c *anywherev1.Cluster) (controller.Result, error) { log = log.WithValues("provider", "nutanix") clusterSpec, err := cluster.BuildSpec(ctx, clientutil.NewKubeClient(r.client), c) if err != nil { return controller.Result{}, err } return controller.NewPhaseRunner[*cluster.Spec]().Register( r.reconcileClusterSecret, r.ipValidator.ValidateControlPlaneIP, r.ValidateClusterSpec, clusters.CleanupStatusAfterValidate, r.ReconcileControlPlane, r.CheckControlPlaneReady, r.ReconcileCNI, r.ReconcileWorkers, ).Run(ctx, log, clusterSpec) } // ReconcileCNI reconciles the CNI to the desired state. func (r *Reconciler) ReconcileCNI(ctx context.Context, log logr.Logger, clusterSpec *cluster.Spec) (controller.Result, error) { log = log.WithValues("phase", "reconcileCNI") c, err := r.remoteClientRegistry.GetClient(ctx, controller.CapiClusterObjectKey(clusterSpec.Cluster)) if err != nil { return controller.Result{}, err } return r.cniReconciler.Reconcile(ctx, log, c, clusterSpec) } // ValidateClusterSpec performs additional, context-aware validations on the cluster spec. func (r *Reconciler) ValidateClusterSpec(ctx context.Context, log logr.Logger, clusterSpec *cluster.Spec) (controller.Result, error) { log = log.WithValues("phase", "validateClusterSpec") creds, err := GetNutanixCredsFromSecret(ctx, r.client, clusterSpec.NutanixDatacenter.Spec.CredentialRef.Name, "eksa-system") if err != nil { return controller.Result{}, err } if err := r.validator.ValidateClusterSpec(ctx, clusterSpec, creds); err != nil { log.Error(err, "Invalid cluster spec", "cluster", clusterSpec.Cluster.Name) failureMessage := err.Error() clusterSpec.Cluster.Status.FailureMessage = &failureMessage return controller.ResultWithReturn(), nil } return controller.Result{}, nil } // ReconcileControlPlane reconciles the control plane to the desired state. func (r *Reconciler) ReconcileControlPlane(ctx context.Context, log logr.Logger, clusterSpec *cluster.Spec) (controller.Result, error) { log = log.WithValues("phase", "reconcileControlPlane") cp, err := nutanix.ControlPlaneSpec(ctx, log, clientutil.NewKubeClient(r.client), clusterSpec) if err != nil { return controller.Result{}, err } return clusters.ReconcileControlPlane(ctx, r.client, toClientControlPlane(cp)) } func toClientControlPlane(cp *nutanix.ControlPlane) *clusters.ControlPlane { return &clusters.ControlPlane{ Cluster: cp.Cluster, ProviderCluster: cp.ProviderCluster, KubeadmControlPlane: cp.KubeadmControlPlane, ControlPlaneMachineTemplate: cp.ControlPlaneMachineTemplate, EtcdCluster: cp.EtcdCluster, EtcdMachineTemplate: cp.EtcdMachineTemplate, } } // ReconcileWorkerNodes reconciles the worker nodes to the desired state. func (r *Reconciler) ReconcileWorkerNodes(ctx context.Context, log logr.Logger, eksCluster *anywherev1.Cluster) (controller.Result, error) { log = log.WithValues("provider", "nutanix", "reconcile type", "workers") clusterSpec, err := cluster.BuildSpec(ctx, clientutil.NewKubeClient(r.client), eksCluster) if err != nil { return controller.Result{}, err } return controller.NewPhaseRunner[*cluster.Spec]().Register( r.ValidateClusterSpec, r.ReconcileWorkers, ).Run(ctx, log, clusterSpec) } // ReconcileWorkers reconciles the workers to the desired state. func (r *Reconciler) ReconcileWorkers(ctx context.Context, log logr.Logger, spec *cluster.Spec) (controller.Result, error) { if spec.NutanixDatacenter == nil { return controller.Result{}, nil } log = log.WithValues("phase", "reconcileWorkers") log.Info("Applying worker CAPI objects") w, err := nutanix.WorkersSpec(ctx, log, clientutil.NewKubeClient(r.client), spec) if err != nil { return controller.Result{}, err } return clusters.ReconcileWorkersForEKSA(ctx, log, r.client, spec.Cluster, clusters.ToWorkers(w)) } // CheckControlPlaneReady checks whether the control plane for an eks-a cluster is ready or not. // Requeues with the appropriate wait times whenever the cluster is not ready yet. func (r *Reconciler) CheckControlPlaneReady(ctx context.Context, log logr.Logger, clusterSpec *cluster.Spec) (controller.Result, error) { log = log.WithValues("phase", "checkControlPlaneReady") return clusters.CheckControlPlaneReady(ctx, r.client, log, clusterSpec.Cluster) }
246
eks-anywhere
aws
Go
package snow import ( "fmt" etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" "github.com/go-logr/logr" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/clusterapi" "github.com/aws/eks-anywhere/pkg/constants" snowv1 "github.com/aws/eks-anywhere/pkg/providers/snow/api/v1beta1" ) const ( // SnowClusterKind is the kubernetes object kind for CAPAS Cluster. SnowClusterKind = "AWSSnowCluster" // SnowMachineTemplateKind is the kubernetes object kind for CAPAS machine template. SnowMachineTemplateKind = "AWSSnowMachineTemplate" // SnowIPPoolKind is the kubernetes object kind for CAPAS IP pool. SnowIPPoolKind = "AWSSnowIPPool" ignoreEtcdKubernetesManifestFolderPreflightError = "DirAvailable--etc-kubernetes-manifests" ) // CAPICluster generates the CAPICluster object for snow provider. func CAPICluster(clusterSpec *cluster.Spec, snowCluster *snowv1.AWSSnowCluster, kubeadmControlPlane *controlplanev1.KubeadmControlPlane, etcdCluster *etcdv1.EtcdadmCluster) *clusterv1.Cluster { return clusterapi.Cluster(clusterSpec, snowCluster, kubeadmControlPlane, etcdCluster) } // KubeadmControlPlane generates the kubeadmControlPlane object for snow provider from clusterSpec and snowMachineTemplate. func KubeadmControlPlane(log logr.Logger, clusterSpec *cluster.Spec, snowMachineTemplate *snowv1.AWSSnowMachineTemplate) (*controlplanev1.KubeadmControlPlane, error) { kcp, err := clusterapi.KubeadmControlPlane(clusterSpec, snowMachineTemplate) if err != nil { return nil, fmt.Errorf("generating KubeadmControlPlane: %v", err) } if err := clusterapi.SetKubeVipInKubeadmControlPlane(kcp, clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host, clusterSpec.VersionsBundle.Snow.KubeVip.VersionedImage()); err != nil { return nil, fmt.Errorf("setting kube-vip: %v", err) } initConfigKubeletExtraArg := kcp.Spec.KubeadmConfigSpec.InitConfiguration.NodeRegistration.KubeletExtraArgs initConfigKubeletExtraArg["provider-id"] = "aws-snow:////'{{ ds.meta_data.instance_id }}'" joinConfigKubeletExtraArg := kcp.Spec.KubeadmConfigSpec.JoinConfiguration.NodeRegistration.KubeletExtraArgs joinConfigKubeletExtraArg["provider-id"] = "aws-snow:////'{{ ds.meta_data.instance_id }}'" addStackedEtcdExtraArgsInKubeadmControlPlane(kcp, clusterSpec.Cluster.Spec.ExternalEtcdConfiguration) machineConfig := clusterSpec.SnowMachineConfig(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name) osFamily := machineConfig.OSFamily() switch osFamily { case v1alpha1.Bottlerocket: clusterapi.SetProxyConfigInKubeadmControlPlaneForBottlerocket(kcp, clusterSpec.Cluster) clusterapi.SetRegistryMirrorInKubeadmControlPlaneForBottlerocket(kcp, clusterSpec.Cluster.Spec.RegistryMirrorConfiguration) clusterapi.SetBottlerocketInKubeadmControlPlane(kcp, clusterSpec.VersionsBundle) clusterapi.SetBottlerocketAdminContainerImageInKubeadmControlPlane(kcp, clusterSpec.VersionsBundle) clusterapi.SetBottlerocketControlContainerImageInKubeadmControlPlane(kcp, clusterSpec.VersionsBundle) clusterapi.SetUnstackedEtcdConfigInKubeadmControlPlaneForBottlerocket(kcp, clusterSpec.Cluster.Spec.ExternalEtcdConfiguration) addBottlerocketBootstrapSnowInKubeadmControlPlane(kcp, clusterSpec.VersionsBundle.Snow.BottlerocketBootstrapSnow) clusterapi.SetBottlerocketHostConfigInKubeadmControlPlane(kcp, machineConfig.Spec.HostOSConfiguration) case v1alpha1.Ubuntu: kcp.Spec.KubeadmConfigSpec.PreKubeadmCommands = append(kcp.Spec.KubeadmConfigSpec.PreKubeadmCommands, "/etc/eks/bootstrap.sh", ) if err := clusterapi.SetProxyConfigInKubeadmControlPlaneForUbuntu(kcp, clusterSpec.Cluster); err != nil { return nil, err } if err := clusterapi.SetRegistryMirrorInKubeadmControlPlaneForUbuntu(kcp, clusterSpec.Cluster.Spec.RegistryMirrorConfiguration); err != nil { return nil, err } clusterapi.CreateContainerdConfigFileInKubeadmControlPlane(kcp, clusterSpec.Cluster) clusterapi.RestartContainerdInKubeadmControlPlane(kcp, clusterSpec.Cluster) clusterapi.SetUnstackedEtcdConfigInKubeadmControlPlaneForUbuntu(kcp, clusterSpec.Cluster.Spec.ExternalEtcdConfiguration) kcp.Spec.KubeadmConfigSpec.JoinConfiguration.NodeRegistration.IgnorePreflightErrors = append( kcp.Spec.KubeadmConfigSpec.JoinConfiguration.NodeRegistration.IgnorePreflightErrors, ignoreEtcdKubernetesManifestFolderPreflightError, ) default: log.Info("Warning: unsupported OS family when setting up KubeadmControlPlane", "OS family", osFamily) } return kcp, nil } // KubeadmConfigTemplate generates the kubeadmConfigTemplate object for snow provider from clusterSpec and workerNodeGroupConfig. func KubeadmConfigTemplate(log logr.Logger, clusterSpec *cluster.Spec, workerNodeGroupConfig v1alpha1.WorkerNodeGroupConfiguration) (*bootstrapv1.KubeadmConfigTemplate, error) { kct, err := clusterapi.KubeadmConfigTemplate(clusterSpec, workerNodeGroupConfig) if err != nil { return nil, fmt.Errorf("generating KubeadmConfigTemplate: %v", err) } joinConfigKubeletExtraArg := kct.Spec.Template.Spec.JoinConfiguration.NodeRegistration.KubeletExtraArgs joinConfigKubeletExtraArg["provider-id"] = "aws-snow:////'{{ ds.meta_data.instance_id }}'" machineConfig := clusterSpec.SnowMachineConfig(workerNodeGroupConfig.MachineGroupRef.Name) osFamily := machineConfig.OSFamily() switch osFamily { case v1alpha1.Bottlerocket: clusterapi.SetProxyConfigInKubeadmConfigTemplateForBottlerocket(kct, clusterSpec.Cluster) clusterapi.SetRegistryMirrorInKubeadmConfigTemplateForBottlerocket(kct, clusterSpec.Cluster.Spec.RegistryMirrorConfiguration) clusterapi.SetBottlerocketInKubeadmConfigTemplate(kct, clusterSpec.VersionsBundle) clusterapi.SetBottlerocketAdminContainerImageInKubeadmConfigTemplate(kct, clusterSpec.VersionsBundle) clusterapi.SetBottlerocketControlContainerImageInKubeadmConfigTemplate(kct, clusterSpec.VersionsBundle) addBottlerocketBootstrapSnowInKubeadmConfigTemplate(kct, clusterSpec.VersionsBundle.Snow.BottlerocketBootstrapSnow) clusterapi.SetBottlerocketHostConfigInKubeadmConfigTemplate(kct, machineConfig.Spec.HostOSConfiguration) case v1alpha1.Ubuntu: kct.Spec.Template.Spec.PreKubeadmCommands = append(kct.Spec.Template.Spec.PreKubeadmCommands, "/etc/eks/bootstrap.sh", ) if err := clusterapi.SetProxyConfigInKubeadmConfigTemplateForUbuntu(kct, clusterSpec.Cluster); err != nil { return nil, err } if err := clusterapi.SetRegistryMirrorInKubeadmConfigTemplateForUbuntu(kct, clusterSpec.Cluster.Spec.RegistryMirrorConfiguration); err != nil { return nil, err } clusterapi.CreateContainerdConfigFileInKubeadmConfigTemplate(kct, clusterSpec.Cluster) clusterapi.RestartContainerdInKubeadmConfigTemplate(kct, clusterSpec.Cluster) default: log.Info("Warning: unsupported OS family when setting up KubeadmConfigTemplate", "OS family", osFamily) } return kct, nil } func machineDeployment(clusterSpec *cluster.Spec, workerNodeGroupConfig v1alpha1.WorkerNodeGroupConfiguration, kubeadmConfigTemplate *bootstrapv1.KubeadmConfigTemplate, snowMachineTemplate *snowv1.AWSSnowMachineTemplate) *clusterv1.MachineDeployment { return clusterapi.MachineDeployment(clusterSpec, workerNodeGroupConfig, kubeadmConfigTemplate, snowMachineTemplate) } // EtcdadmCluster builds an etcdadmCluster based on an eks-a cluster spec and snowMachineTemplate. func EtcdadmCluster(log logr.Logger, clusterSpec *cluster.Spec, snowMachineTemplate *snowv1.AWSSnowMachineTemplate) *etcdv1.EtcdadmCluster { etcd := clusterapi.EtcdadmCluster(clusterSpec, snowMachineTemplate) machineConfig := clusterSpec.SnowMachineConfig(clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name) osFamily := machineConfig.OSFamily() switch osFamily { case v1alpha1.Bottlerocket: clusterapi.SetBottlerocketInEtcdCluster(etcd, clusterSpec.VersionsBundle) clusterapi.SetBottlerocketAdminContainerImageInEtcdCluster(etcd, clusterSpec.VersionsBundle.BottleRocketHostContainers.Admin) clusterapi.SetBottlerocketControlContainerImageInEtcdCluster(etcd, clusterSpec.VersionsBundle.BottleRocketHostContainers.Control) addBottlerocketBootstrapSnowInEtcdCluster(etcd, clusterSpec.VersionsBundle.Snow.BottlerocketBootstrapSnow) clusterapi.SetBottlerocketHostConfigInEtcdCluster(etcd, machineConfig.Spec.HostOSConfiguration) case v1alpha1.Ubuntu: clusterapi.SetUbuntuConfigInEtcdCluster(etcd, clusterSpec.VersionsBundle.KubeDistro.EtcdVersion) etcd.Spec.EtcdadmConfigSpec.PreEtcdadmCommands = append(etcd.Spec.EtcdadmConfigSpec.PreEtcdadmCommands, "/etc/eks/bootstrap.sh", ) default: log.Info("Warning: unsupported OS family when setting up EtcdadmCluster", "OS family", osFamily) } return etcd } func SnowCluster(clusterSpec *cluster.Spec, credentialsSecret *v1.Secret) *snowv1.AWSSnowCluster { cluster := &snowv1.AWSSnowCluster{ TypeMeta: metav1.TypeMeta{ APIVersion: clusterapi.InfrastructureAPIVersion(), Kind: SnowClusterKind, }, ObjectMeta: metav1.ObjectMeta{ Name: clusterSpec.Cluster.GetName(), Namespace: constants.EksaSystemNamespace, }, Spec: snowv1.AWSSnowClusterSpec{ Region: "snow", ControlPlaneEndpoint: clusterv1.APIEndpoint{ Host: clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host, Port: 6443, }, IdentityRef: &snowv1.AWSSnowIdentityReference{ Name: credentialsSecret.GetName(), Kind: snowv1.AWSSnowIdentityKind(credentialsSecret.GetObjectKind().GroupVersionKind().Kind), }, }, } return cluster } func CredentialsSecret(name, namespace string, credsB64, certsB64 []byte) *v1.Secret { return &v1.Secret{ TypeMeta: metav1.TypeMeta{ APIVersion: v1.SchemeGroupVersion.String(), Kind: string(snowv1.SecretKind), }, ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, }, Data: map[string][]byte{ v1alpha1.SnowCredentialsKey: credsB64, v1alpha1.SnowCertificatesKey: certsB64, }, Type: v1.SecretTypeOpaque, } } func CAPASCredentialsSecret(clusterSpec *cluster.Spec, credsB64, certsB64 []byte) *v1.Secret { s := CredentialsSecret(CredentialsSecretName(clusterSpec), constants.EksaSystemNamespace, credsB64, certsB64) label := map[string]string{ clusterctlv1.ClusterctlMoveLabel: "true", } s.SetLabels(label) return s } func EksaCredentialsSecret(datacenter *v1alpha1.SnowDatacenterConfig, credsB64, certsB64 []byte) *v1.Secret { return CredentialsSecret(datacenter.Spec.IdentityRef.Name, datacenter.GetNamespace(), credsB64, certsB64) } // CAPASIPPools defines a set of CAPAS AWSSnowPool objects. type CAPASIPPools map[string]*snowv1.AWSSnowIPPool func (p CAPASIPPools) addPools(dnis []v1alpha1.SnowDirectNetworkInterface, m map[string]*v1alpha1.SnowIPPool) { for _, dni := range dnis { if dni.IPPoolRef != nil { p[dni.IPPoolRef.Name] = toAWSSnowIPPool(m[dni.IPPoolRef.Name]) } } } func buildSnowIPPool(pool v1alpha1.IPPool) snowv1.IPPool { return snowv1.IPPool{ IPStart: &pool.IPStart, IPEnd: &pool.IPEnd, Gateway: &pool.Gateway, Subnet: &pool.Subnet, } } func toAWSSnowIPPool(pool *v1alpha1.SnowIPPool) *snowv1.AWSSnowIPPool { snowPools := make([]snowv1.IPPool, 0, len(pool.Spec.Pools)) for _, p := range pool.Spec.Pools { snowPools = append(snowPools, buildSnowIPPool(p)) } return &snowv1.AWSSnowIPPool{ TypeMeta: metav1.TypeMeta{ APIVersion: clusterapi.InfrastructureAPIVersion(), Kind: SnowIPPoolKind, }, ObjectMeta: metav1.ObjectMeta{ Name: pool.GetName(), Namespace: constants.EksaSystemNamespace, }, Spec: snowv1.AWSSnowIPPoolSpec{ IPPools: snowPools, }, } } func buildDNI(dni v1alpha1.SnowDirectNetworkInterface, capasPools CAPASIPPools) snowv1.AWSSnowDirectNetworkInterface { var ipPoolRef *v1.ObjectReference if dni.IPPoolRef != nil { ipPool := capasPools[dni.IPPoolRef.Name] ipPoolRef = &v1.ObjectReference{ Kind: ipPool.Kind, Name: ipPool.Name, } } return snowv1.AWSSnowDirectNetworkInterface{ Index: dni.Index, VlanID: dni.VlanID, DHCP: dni.DHCP, Primary: dni.Primary, IPPool: ipPoolRef, } } // MachineTemplate builds a snowMachineTemplate based on an eks-a snowMachineConfig and a capasIPPool. func MachineTemplate(name string, machineConfig *v1alpha1.SnowMachineConfig, capasPools CAPASIPPools) *snowv1.AWSSnowMachineTemplate { dnis := make([]snowv1.AWSSnowDirectNetworkInterface, 0, len(machineConfig.Spec.Network.DirectNetworkInterfaces)) for _, dni := range machineConfig.Spec.Network.DirectNetworkInterfaces { dnis = append(dnis, buildDNI(dni, capasPools)) } networkConnector := string(machineConfig.Spec.PhysicalNetworkConnector) m := &snowv1.AWSSnowMachineTemplate{ TypeMeta: metav1.TypeMeta{ APIVersion: clusterapi.InfrastructureAPIVersion(), Kind: SnowMachineTemplateKind, }, ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: constants.EksaSystemNamespace, }, Spec: snowv1.AWSSnowMachineTemplateSpec{ Template: snowv1.AWSSnowMachineTemplateResource{ Spec: snowv1.AWSSnowMachineSpec{ IAMInstanceProfile: "control-plane.cluster-api-provider-aws.sigs.k8s.io", InstanceType: string(machineConfig.Spec.InstanceType), SSHKeyName: &machineConfig.Spec.SshKeyName, AMI: snowv1.AWSResourceReference{ ID: &machineConfig.Spec.AMIID, }, CloudInit: snowv1.CloudInit{ InsecureSkipSecretsManager: true, }, PhysicalNetworkConnectorType: &networkConnector, Devices: machineConfig.Spec.Devices, ContainersVolume: machineConfig.Spec.ContainersVolume, NonRootVolumes: machineConfig.Spec.NonRootVolumes, Network: snowv1.AWSSnowNetwork{ DirectNetworkInterfaces: dnis, }, OSFamily: (*snowv1.OSFamily)(&machineConfig.Spec.OSFamily), }, }, }, } if machineConfig.Spec.OSFamily == v1alpha1.Bottlerocket { m.Spec.Template.Spec.ImageLookupBaseOS = string(v1alpha1.Bottlerocket) } return m }
334
eks-anywhere
aws
Go
package snow_test import ( "testing" etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" "github.com/go-logr/logr" . "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/clusterapi" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/providers/snow" snowv1 "github.com/aws/eks-anywhere/pkg/providers/snow/api/v1beta1" "github.com/aws/eks-anywhere/pkg/utils/ptr" ) type apiBuilerTest struct { *WithT clusterSpec *cluster.Spec machineConfigs map[string]*v1alpha1.SnowMachineConfig logger logr.Logger } func newApiBuilerTest(t *testing.T) apiBuilerTest { return apiBuilerTest{ WithT: NewWithT(t), clusterSpec: givenClusterSpec(), machineConfigs: givenMachineConfigs(), logger: test.NewNullLogger(), } } func wantCAPICluster() *clusterv1.Cluster { return &clusterv1.Cluster{ TypeMeta: metav1.TypeMeta{ APIVersion: "cluster.x-k8s.io/v1beta1", Kind: "Cluster", }, ObjectMeta: metav1.ObjectMeta{ Name: "snow-test", Namespace: "eksa-system", Labels: map[string]string{ "cluster.x-k8s.io/cluster-name": "snow-test", "cluster.anywhere.eks.amazonaws.com/cluster-name": "snow-test", "cluster.anywhere.eks.amazonaws.com/cluster-namespace": "test-namespace", }, }, Spec: clusterv1.ClusterSpec{ ClusterNetwork: &clusterv1.ClusterNetwork{ Pods: &clusterv1.NetworkRanges{ CIDRBlocks: []string{ "10.1.0.0/16", }, }, Services: &clusterv1.NetworkRanges{ CIDRBlocks: []string{ "10.96.0.0/12", }, }, }, ControlPlaneRef: &v1.ObjectReference{ APIVersion: "controlplane.cluster.x-k8s.io/v1beta1", Kind: "KubeadmControlPlane", Name: "snow-test", }, InfrastructureRef: &v1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", Kind: "AWSSnowCluster", Name: "snow-test", }, }, } } func wantCAPIClusterUnstackedEtcd() *clusterv1.Cluster { cluster := wantCAPICluster() cluster.Spec.ManagedExternalEtcdRef = &v1.ObjectReference{ Kind: "EtcdadmCluster", APIVersion: "etcdcluster.cluster.x-k8s.io/v1beta1", Namespace: "eksa-system", Name: "snow-test-etcd", } return cluster } func TestCAPICluster(t *testing.T) { tt := newApiBuilerTest(t) snowCluster := snow.SnowCluster(tt.clusterSpec, wantSnowCredentialsSecret()) controlPlaneMachineTemplate := snow.MachineTemplate("snow-test-control-plane-1", tt.machineConfigs[tt.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name], nil) kubeadmControlPlane, err := snow.KubeadmControlPlane(tt.logger, tt.clusterSpec, controlPlaneMachineTemplate) tt.Expect(err).To(Succeed()) got := snow.CAPICluster(tt.clusterSpec, snowCluster, kubeadmControlPlane, nil) tt.Expect(got).To(Equal(wantCAPICluster())) } func wantKubeadmControlPlane() *controlplanev1.KubeadmControlPlane { wantReplicas := int32(3) return &controlplanev1.KubeadmControlPlane{ TypeMeta: metav1.TypeMeta{ APIVersion: "controlplane.cluster.x-k8s.io/v1beta1", Kind: "KubeadmControlPlane", }, ObjectMeta: metav1.ObjectMeta{ Name: "snow-test", Namespace: "eksa-system", }, Spec: controlplanev1.KubeadmControlPlaneSpec{ MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ InfrastructureRef: v1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", Kind: "AWSSnowMachineTemplate", Name: "snow-test-control-plane-1", }, }, KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ ImageRepository: "public.ecr.aws/eks-distro/kubernetes", DNS: bootstrapv1.DNS{ ImageMeta: bootstrapv1.ImageMeta{ ImageRepository: "public.ecr.aws/eks-distro/coredns", ImageTag: "v1.8.4-eks-1-21-9", }, }, Etcd: bootstrapv1.Etcd{ Local: &bootstrapv1.LocalEtcd{ ImageMeta: bootstrapv1.ImageMeta{ ImageRepository: "public.ecr.aws/eks-distro/etcd-io", ImageTag: "v3.4.16-eks-1-21-9", }, ExtraArgs: map[string]string{ "cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "listen-peer-urls": "https://0.0.0.0:2380", "listen-client-urls": "https://0.0.0.0:2379", }, }, }, APIServer: bootstrapv1.APIServer{ ControlPlaneComponent: bootstrapv1.ControlPlaneComponent{ ExtraArgs: map[string]string{}, ExtraVolumes: []bootstrapv1.HostPathMount{}, }, }, ControllerManager: bootstrapv1.ControlPlaneComponent{ ExtraArgs: tlsCipherSuitesArgs(), ExtraVolumes: []bootstrapv1.HostPathMount{}, }, Scheduler: bootstrapv1.ControlPlaneComponent{ ExtraArgs: map[string]string{}, ExtraVolumes: []bootstrapv1.HostPathMount{}, }, }, InitConfiguration: &bootstrapv1.InitConfiguration{ NodeRegistration: bootstrapv1.NodeRegistrationOptions{ KubeletExtraArgs: map[string]string{ "provider-id": "aws-snow:////'{{ ds.meta_data.instance_id }}'", "tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", }, }, }, JoinConfiguration: &bootstrapv1.JoinConfiguration{ NodeRegistration: bootstrapv1.NodeRegistrationOptions{ KubeletExtraArgs: map[string]string{ "provider-id": "aws-snow:////'{{ ds.meta_data.instance_id }}'", "tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", }, }, }, PreKubeadmCommands: []string{ "/etc/eks/bootstrap.sh", }, PostKubeadmCommands: []string{}, Files: []bootstrapv1.File{ { Path: "/etc/kubernetes/manifests/kube-vip.yaml", Owner: "root:root", Content: test.KubeVipTemplate, }, }, }, Replicas: &wantReplicas, Version: "v1.21.5-eks-1-21-9", }, } } func wantKubeadmControlPlaneUnstackedEtcd() *controlplanev1.KubeadmControlPlane { kcp := wantKubeadmControlPlane() kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd = bootstrapv1.Etcd{ External: &bootstrapv1.ExternalEtcd{ Endpoints: []string{}, CAFile: "/etc/kubernetes/pki/etcd/ca.crt", CertFile: "/etc/kubernetes/pki/apiserver-etcd-client.crt", KeyFile: "/etc/kubernetes/pki/apiserver-etcd-client.key", }, } return kcp } func wantRegistryMirrorCommands() []string { return []string{ "cat /etc/containerd/config_append.toml >> /etc/containerd/config.toml", "sudo systemctl daemon-reload", "sudo systemctl restart containerd", } } func TestKubeadmControlPlane(t *testing.T) { tt := newApiBuilerTest(t) controlPlaneMachineTemplate := snow.MachineTemplate("snow-test-control-plane-1", tt.machineConfigs[tt.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name], nil) got, err := snow.KubeadmControlPlane(tt.logger, tt.clusterSpec, controlPlaneMachineTemplate) tt.Expect(err).To(Succeed()) want := wantKubeadmControlPlane() want.Spec.KubeadmConfigSpec.JoinConfiguration.NodeRegistration.IgnorePreflightErrors = []string{"DirAvailable--etc-kubernetes-manifests"} tt.Expect(got).To(BeComparableTo(want)) } var registryMirrorTests = []struct { name string registryMirrorConfig *v1alpha1.RegistryMirrorConfiguration wantFiles []bootstrapv1.File wantRegistryConfig bootstrapv1.RegistryMirrorConfiguration }{ { name: "with namespace", registryMirrorConfig: &v1alpha1.RegistryMirrorConfiguration{ Endpoint: "1.2.3.4", Port: "443", OCINamespaces: []v1alpha1.OCINamespace{ { Registry: "public.ecr.aws", Namespace: "eks-anywhere", }, }, CACertContent: "xyz", }, wantFiles: []bootstrapv1.File{ { Path: "/etc/containerd/config_append.toml", Owner: "root:root", Content: `[plugins."io.containerd.grpc.v1.cri".registry.mirrors] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."public.ecr.aws"] endpoint = ["https://1.2.3.4:443/v2/eks-anywhere"] [plugins."io.containerd.grpc.v1.cri".registry.configs."1.2.3.4:443".tls] ca_file = "/etc/containerd/certs.d/1.2.3.4:443/ca.crt"`, }, { Path: "/etc/containerd/certs.d/1.2.3.4:443/ca.crt", Owner: "root:root", Content: "xyz", }, }, wantRegistryConfig: bootstrapv1.RegistryMirrorConfiguration{ Endpoint: "1.2.3.4:443/v2/eks-anywhere", CACert: "xyz", }, }, { name: "with ca cert", registryMirrorConfig: &v1alpha1.RegistryMirrorConfiguration{ Endpoint: "1.2.3.4", Port: "443", OCINamespaces: []v1alpha1.OCINamespace{ { Registry: "public.ecr.aws", Namespace: "eks-anywhere", }, { Registry: "783794618700.dkr.ecr.us-west-2.amazonaws.com", Namespace: "curated-packages", }, }, CACertContent: "xyz", }, wantFiles: []bootstrapv1.File{ { Path: "/etc/containerd/config_append.toml", Owner: "root:root", Content: `[plugins."io.containerd.grpc.v1.cri".registry.mirrors] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."783794618700.dkr.ecr.*.amazonaws.com"] endpoint = ["https://1.2.3.4:443/v2/curated-packages"] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."public.ecr.aws"] endpoint = ["https://1.2.3.4:443/v2/eks-anywhere"] [plugins."io.containerd.grpc.v1.cri".registry.configs."1.2.3.4:443".tls] ca_file = "/etc/containerd/certs.d/1.2.3.4:443/ca.crt"`, }, { Path: "/etc/containerd/certs.d/1.2.3.4:443/ca.crt", Owner: "root:root", Content: "xyz", }, }, wantRegistryConfig: bootstrapv1.RegistryMirrorConfiguration{ Endpoint: "1.2.3.4:443/v2/eks-anywhere", CACert: "xyz", }, }, { name: "with insecure skip", registryMirrorConfig: &v1alpha1.RegistryMirrorConfiguration{ Endpoint: "1.2.3.4", Port: "443", InsecureSkipVerify: true, }, wantFiles: []bootstrapv1.File{ { Path: "/etc/containerd/config_append.toml", Owner: "root:root", Content: `[plugins."io.containerd.grpc.v1.cri".registry.mirrors] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."public.ecr.aws"] endpoint = ["https://1.2.3.4:443"] [plugins."io.containerd.grpc.v1.cri".registry.configs."1.2.3.4:443".tls] insecure_skip_verify = true`, }, }, wantRegistryConfig: bootstrapv1.RegistryMirrorConfiguration{ Endpoint: "1.2.3.4:443", }, }, { name: "without ca cert", registryMirrorConfig: &v1alpha1.RegistryMirrorConfiguration{ Endpoint: "1.2.3.4", Port: "443", }, wantFiles: []bootstrapv1.File{ { Path: "/etc/containerd/config_append.toml", Owner: "root:root", Content: `[plugins."io.containerd.grpc.v1.cri".registry.mirrors] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."public.ecr.aws"] endpoint = ["https://1.2.3.4:443"]`, }, }, wantRegistryConfig: bootstrapv1.RegistryMirrorConfiguration{ Endpoint: "1.2.3.4:443", }, }, { name: "with ca cert and insecure skip", registryMirrorConfig: &v1alpha1.RegistryMirrorConfiguration{ Endpoint: "1.2.3.4", Port: "443", CACertContent: "xyz", InsecureSkipVerify: true, }, wantFiles: []bootstrapv1.File{ { Path: "/etc/containerd/config_append.toml", Owner: "root:root", Content: `[plugins."io.containerd.grpc.v1.cri".registry.mirrors] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."public.ecr.aws"] endpoint = ["https://1.2.3.4:443"] [plugins."io.containerd.grpc.v1.cri".registry.configs."1.2.3.4:443".tls] ca_file = "/etc/containerd/certs.d/1.2.3.4:443/ca.crt" insecure_skip_verify = true`, }, { Path: "/etc/containerd/certs.d/1.2.3.4:443/ca.crt", Owner: "root:root", Content: "xyz", }, }, wantRegistryConfig: bootstrapv1.RegistryMirrorConfiguration{ Endpoint: "1.2.3.4:443", CACert: "xyz", }, }, } func TestKubeadmControlPlaneWithRegistryMirrorUbuntu(t *testing.T) { for _, tt := range registryMirrorTests { t.Run(tt.name, func(t *testing.T) { g := newApiBuilerTest(t) g.clusterSpec.Cluster.Spec.RegistryMirrorConfiguration = tt.registryMirrorConfig controlPlaneMachineTemplate := snow.MachineTemplate("snow-test-control-plane-1", g.machineConfigs[g.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name], nil) got, err := snow.KubeadmControlPlane(g.logger, g.clusterSpec, controlPlaneMachineTemplate) g.Expect(err).To(Succeed()) want := wantKubeadmControlPlane() want.Spec.KubeadmConfigSpec.Files = append(want.Spec.KubeadmConfigSpec.Files, tt.wantFiles...) want.Spec.KubeadmConfigSpec.PreKubeadmCommands = append(want.Spec.KubeadmConfigSpec.PreKubeadmCommands, wantRegistryMirrorCommands()...) want.Spec.KubeadmConfigSpec.JoinConfiguration.NodeRegistration.IgnorePreflightErrors = []string{"DirAvailable--etc-kubernetes-manifests"} g.Expect(got).To(BeComparableTo(want)) }) } } var pause = bootstrapv1.Pause{ ImageMeta: bootstrapv1.ImageMeta{ ImageRepository: "public.ecr.aws/eks-distro/kubernetes/pause", ImageTag: "0.0.1", }, } var bootstrap = bootstrapv1.BottlerocketBootstrap{ ImageMeta: bootstrapv1.ImageMeta{ ImageRepository: "public.ecr.aws/eks-anywhere/bottlerocket-bootstrap", ImageTag: "0.0.1", }, } var admin = bootstrapv1.BottlerocketAdmin{ ImageMeta: bootstrapv1.ImageMeta{ ImageRepository: "public.ecr.aws/eks-anywhere/bottlerocket-admin", ImageTag: "0.0.1", }, } var control = bootstrapv1.BottlerocketControl{ ImageMeta: bootstrapv1.ImageMeta{ ImageRepository: "public.ecr.aws/eks-anywhere/bottlerocket-control", ImageTag: "0.0.1", }, } var bootstrapCustom = []bootstrapv1.BottlerocketBootstrapContainer{ { Name: "bottlerocket-bootstrap-snow", ImageMeta: bootstrapv1.ImageMeta{ ImageRepository: "public.ecr.aws/l0g8r8j6/bottlerocket-bootstrap-snow", ImageTag: "v1-20-22-eks-a-v0.0.0-dev-build.4984", }, Mode: "always", }, } func TestKubeadmControlPlaneWithRegistryMirrorBottlerocket(t *testing.T) { for _, tt := range registryMirrorTests { t.Run(tt.name, func(t *testing.T) { g := newApiBuilerTest(t) g.clusterSpec.Cluster.Spec.RegistryMirrorConfiguration = tt.registryMirrorConfig g.clusterSpec.SnowMachineConfig("test-cp").Spec.OSFamily = v1alpha1.Bottlerocket controlPlaneMachineTemplate := snow.MachineTemplate("snow-test-control-plane-1", g.machineConfigs["test-cp"], nil) got, err := snow.KubeadmControlPlane(g.logger, g.clusterSpec, controlPlaneMachineTemplate) g.Expect(err).To(Succeed()) want := wantKubeadmControlPlane() want.Spec.KubeadmConfigSpec.Format = "bottlerocket" want.Spec.KubeadmConfigSpec.PreKubeadmCommands = []string{} want.Spec.KubeadmConfigSpec.ClusterConfiguration.BottlerocketBootstrap = bootstrap want.Spec.KubeadmConfigSpec.ClusterConfiguration.BottlerocketAdmin = admin want.Spec.KubeadmConfigSpec.ClusterConfiguration.BottlerocketControl = control want.Spec.KubeadmConfigSpec.ClusterConfiguration.BottlerocketCustomBootstrapContainers = bootstrapCustom want.Spec.KubeadmConfigSpec.ClusterConfiguration.Pause = pause want.Spec.KubeadmConfigSpec.JoinConfiguration.BottlerocketBootstrap = bootstrap want.Spec.KubeadmConfigSpec.JoinConfiguration.BottlerocketAdmin = admin want.Spec.KubeadmConfigSpec.JoinConfiguration.BottlerocketControl = control want.Spec.KubeadmConfigSpec.JoinConfiguration.BottlerocketCustomBootstrapContainers = bootstrapCustom want.Spec.KubeadmConfigSpec.JoinConfiguration.Pause = pause want.Spec.KubeadmConfigSpec.ClusterConfiguration.RegistryMirror = tt.wantRegistryConfig want.Spec.KubeadmConfigSpec.JoinConfiguration.RegistryMirror = tt.wantRegistryConfig want.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager.ExtraVolumes = append(want.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager.ExtraVolumes, bootstrapv1.HostPathMount{ HostPath: "/var/lib/kubeadm/controller-manager.conf", MountPath: "/etc/kubernetes/controller-manager.conf", Name: "kubeconfig", PathType: "File", ReadOnly: true, }, ) want.Spec.KubeadmConfigSpec.ClusterConfiguration.Scheduler.ExtraVolumes = append(want.Spec.KubeadmConfigSpec.ClusterConfiguration.Scheduler.ExtraVolumes, bootstrapv1.HostPathMount{ HostPath: "/var/lib/kubeadm/scheduler.conf", MountPath: "/etc/kubernetes/scheduler.conf", Name: "kubeconfig", PathType: "File", ReadOnly: true, }, ) want.Spec.KubeadmConfigSpec.ClusterConfiguration.CertificatesDir = "/var/lib/kubeadm/pki" g.Expect(got).To(BeComparableTo(want)) }) } } func wantProxyConfigCommands() []string { return []string{ "sudo systemctl daemon-reload", "sudo systemctl restart containerd", } } var proxyTests = []struct { name string proxy *v1alpha1.ProxyConfiguration wantFiles []bootstrapv1.File wantProxyConfig bootstrapv1.ProxyConfiguration }{ { name: "with proxy, pods cidr, service cidr, cp endpoint", proxy: &v1alpha1.ProxyConfiguration{ HttpProxy: "1.2.3.4:8888", HttpsProxy: "1.2.3.4:8888", NoProxy: []string{ "1.2.3.4/0", "1.2.3.5/0", }, }, wantFiles: []bootstrapv1.File{ { Path: "/etc/systemd/system/containerd.service.d/http-proxy.conf", Owner: "root:root", Content: `[Service] Environment="HTTP_PROXY=1.2.3.4:8888" Environment="HTTPS_PROXY=1.2.3.4:8888" Environment="NO_PROXY=10.1.0.0/16,10.96.0.0/12,1.2.3.4/0,1.2.3.5/0,localhost,127.0.0.1,.svc,1.2.3.4"`, }, }, wantProxyConfig: bootstrapv1.ProxyConfiguration{ HTTPSProxy: "1.2.3.4:8888", NoProxy: []string{ "10.1.0.0/16", "10.96.0.0/12", "1.2.3.4/0", "1.2.3.5/0", "localhost", "127.0.0.1", ".svc", "1.2.3.4", }, }, }, } func TestKubeadmControlPlaneWithProxyConfigUbuntu(t *testing.T) { for _, tt := range proxyTests { t.Run(tt.name, func(t *testing.T) { g := newApiBuilerTest(t) g.clusterSpec.Cluster.Spec.ProxyConfiguration = tt.proxy controlPlaneMachineTemplate := snow.MachineTemplate("snow-test-control-plane-1", g.machineConfigs[g.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name], nil) got, err := snow.KubeadmControlPlane(g.logger, g.clusterSpec, controlPlaneMachineTemplate) g.Expect(err).To(Succeed()) want := wantKubeadmControlPlane() want.Spec.KubeadmConfigSpec.Files = append(want.Spec.KubeadmConfigSpec.Files, tt.wantFiles...) want.Spec.KubeadmConfigSpec.PreKubeadmCommands = append(want.Spec.KubeadmConfigSpec.PreKubeadmCommands, wantProxyConfigCommands()...) want.Spec.KubeadmConfigSpec.JoinConfiguration.NodeRegistration.IgnorePreflightErrors = []string{"DirAvailable--etc-kubernetes-manifests"} g.Expect(got).To(BeComparableTo(want)) }) } } func TestKubeadmControlPlaneWithProxyConfigBottlerocket(t *testing.T) { for _, tt := range proxyTests { t.Run(tt.name, func(t *testing.T) { g := newApiBuilerTest(t) g.clusterSpec.Cluster.Spec.ProxyConfiguration = tt.proxy g.clusterSpec.SnowMachineConfig("test-cp").Spec.OSFamily = v1alpha1.Bottlerocket controlPlaneMachineTemplate := snow.MachineTemplate("snow-test-control-plane-1", g.machineConfigs["test-cp"], nil) got, err := snow.KubeadmControlPlane(g.logger, g.clusterSpec, controlPlaneMachineTemplate) g.Expect(err).To(Succeed()) want := wantKubeadmControlPlane() want.Spec.KubeadmConfigSpec.Format = "bottlerocket" want.Spec.KubeadmConfigSpec.PreKubeadmCommands = []string{} want.Spec.KubeadmConfigSpec.ClusterConfiguration.BottlerocketBootstrap = bootstrap want.Spec.KubeadmConfigSpec.ClusterConfiguration.BottlerocketAdmin = admin want.Spec.KubeadmConfigSpec.ClusterConfiguration.BottlerocketControl = control want.Spec.KubeadmConfigSpec.ClusterConfiguration.BottlerocketCustomBootstrapContainers = bootstrapCustom want.Spec.KubeadmConfigSpec.ClusterConfiguration.Pause = pause want.Spec.KubeadmConfigSpec.JoinConfiguration.BottlerocketBootstrap = bootstrap want.Spec.KubeadmConfigSpec.JoinConfiguration.BottlerocketAdmin = admin want.Spec.KubeadmConfigSpec.JoinConfiguration.BottlerocketControl = control want.Spec.KubeadmConfigSpec.JoinConfiguration.BottlerocketCustomBootstrapContainers = bootstrapCustom want.Spec.KubeadmConfigSpec.JoinConfiguration.Pause = pause want.Spec.KubeadmConfigSpec.ClusterConfiguration.Proxy = tt.wantProxyConfig want.Spec.KubeadmConfigSpec.JoinConfiguration.Proxy = tt.wantProxyConfig want.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager.ExtraVolumes = append(want.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager.ExtraVolumes, bootstrapv1.HostPathMount{ HostPath: "/var/lib/kubeadm/controller-manager.conf", MountPath: "/etc/kubernetes/controller-manager.conf", Name: "kubeconfig", PathType: "File", ReadOnly: true, }, ) want.Spec.KubeadmConfigSpec.ClusterConfiguration.Scheduler.ExtraVolumes = append(want.Spec.KubeadmConfigSpec.ClusterConfiguration.Scheduler.ExtraVolumes, bootstrapv1.HostPathMount{ HostPath: "/var/lib/kubeadm/scheduler.conf", MountPath: "/etc/kubernetes/scheduler.conf", Name: "kubeconfig", PathType: "File", ReadOnly: true, }, ) want.Spec.KubeadmConfigSpec.ClusterConfiguration.CertificatesDir = "/var/lib/kubeadm/pki" g.Expect(got).To(Equal(want)) }) } } var bottlerocketAdditionalSettingsTests = []struct { name string settings *v1alpha1.HostOSConfiguration wantConfig *bootstrapv1.BottlerocketSettings }{ { name: "with kernel sysctl settings", settings: &v1alpha1.HostOSConfiguration{ BottlerocketConfiguration: &v1alpha1.BottlerocketConfiguration{ Kernel: &bootstrapv1.BottlerocketKernelSettings{ SysctlSettings: map[string]string{ "foo": "bar", }, }, }, }, wantConfig: &bootstrapv1.BottlerocketSettings{ Kernel: &bootstrapv1.BottlerocketKernelSettings{ SysctlSettings: map[string]string{ "foo": "bar", }, }, }, }, { name: "with boot kernel settings", settings: &v1alpha1.HostOSConfiguration{ BottlerocketConfiguration: &v1alpha1.BottlerocketConfiguration{ Boot: &bootstrapv1.BottlerocketBootSettings{ BootKernelParameters: map[string][]string{ "foo": { "abc", "def", }, }, }, }, }, wantConfig: &bootstrapv1.BottlerocketSettings{ Boot: &bootstrapv1.BottlerocketBootSettings{ BootKernelParameters: map[string][]string{ "foo": { "abc", "def", }, }, }, }, }, { name: "with both empty", settings: &v1alpha1.HostOSConfiguration{ BottlerocketConfiguration: &v1alpha1.BottlerocketConfiguration{ Boot: &bootstrapv1.BottlerocketBootSettings{}, Kernel: &bootstrapv1.BottlerocketKernelSettings{}, }, }, wantConfig: &bootstrapv1.BottlerocketSettings{ Boot: &bootstrapv1.BottlerocketBootSettings{}, Kernel: &bootstrapv1.BottlerocketKernelSettings{}, }, }, } func TestKubeadmControlPlaneWithBottlerocketAdditionalSettings(t *testing.T) { for _, tt := range bottlerocketAdditionalSettingsTests { t.Run(tt.name, func(t *testing.T) { g := newApiBuilerTest(t) g.clusterSpec.SnowMachineConfig("test-cp").Spec.HostOSConfiguration = tt.settings g.clusterSpec.SnowMachineConfig("test-cp").Spec.OSFamily = v1alpha1.Bottlerocket controlPlaneMachineTemplate := snow.MachineTemplate("snow-test-control-plane-1", g.machineConfigs["test-cp"], nil) got, err := snow.KubeadmControlPlane(g.logger, g.clusterSpec, controlPlaneMachineTemplate) g.Expect(err).To(Succeed()) want := wantKubeadmControlPlane() want.Spec.KubeadmConfigSpec.Format = "bottlerocket" want.Spec.KubeadmConfigSpec.PreKubeadmCommands = []string{} want.Spec.KubeadmConfigSpec.ClusterConfiguration.BottlerocketBootstrap = bootstrap want.Spec.KubeadmConfigSpec.ClusterConfiguration.BottlerocketAdmin = admin want.Spec.KubeadmConfigSpec.ClusterConfiguration.BottlerocketControl = control want.Spec.KubeadmConfigSpec.ClusterConfiguration.BottlerocketCustomBootstrapContainers = bootstrapCustom want.Spec.KubeadmConfigSpec.ClusterConfiguration.Pause = pause want.Spec.KubeadmConfigSpec.JoinConfiguration.BottlerocketBootstrap = bootstrap want.Spec.KubeadmConfigSpec.JoinConfiguration.BottlerocketAdmin = admin want.Spec.KubeadmConfigSpec.JoinConfiguration.BottlerocketControl = control want.Spec.KubeadmConfigSpec.JoinConfiguration.BottlerocketCustomBootstrapContainers = bootstrapCustom want.Spec.KubeadmConfigSpec.JoinConfiguration.Pause = pause want.Spec.KubeadmConfigSpec.ClusterConfiguration.Bottlerocket = tt.wantConfig want.Spec.KubeadmConfigSpec.JoinConfiguration.Bottlerocket = tt.wantConfig want.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager.ExtraVolumes = append(want.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager.ExtraVolumes, bootstrapv1.HostPathMount{ HostPath: "/var/lib/kubeadm/controller-manager.conf", MountPath: "/etc/kubernetes/controller-manager.conf", Name: "kubeconfig", PathType: "File", ReadOnly: true, }, ) want.Spec.KubeadmConfigSpec.ClusterConfiguration.Scheduler.ExtraVolumes = append(want.Spec.KubeadmConfigSpec.ClusterConfiguration.Scheduler.ExtraVolumes, bootstrapv1.HostPathMount{ HostPath: "/var/lib/kubeadm/scheduler.conf", MountPath: "/etc/kubernetes/scheduler.conf", Name: "kubeconfig", PathType: "File", ReadOnly: true, }, ) want.Spec.KubeadmConfigSpec.ClusterConfiguration.CertificatesDir = "/var/lib/kubeadm/pki" g.Expect(got).To(Equal(want)) }) } } func wantKubeadmConfigTemplate() *bootstrapv1.KubeadmConfigTemplate { return &bootstrapv1.KubeadmConfigTemplate{ TypeMeta: metav1.TypeMeta{ APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", Kind: "KubeadmConfigTemplate", }, ObjectMeta: metav1.ObjectMeta{ Name: "snow-test-md-0-1", Namespace: "eksa-system", }, Spec: bootstrapv1.KubeadmConfigTemplateSpec{ Template: bootstrapv1.KubeadmConfigTemplateResource{ Spec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{ ControllerManager: bootstrapv1.ControlPlaneComponent{ ExtraArgs: map[string]string{}, }, APIServer: bootstrapv1.APIServer{ ControlPlaneComponent: bootstrapv1.ControlPlaneComponent{ ExtraArgs: map[string]string{}, }, }, }, JoinConfiguration: &bootstrapv1.JoinConfiguration{ NodeRegistration: bootstrapv1.NodeRegistrationOptions{ KubeletExtraArgs: map[string]string{ "provider-id": "aws-snow:////'{{ ds.meta_data.instance_id }}'", }, }, }, PreKubeadmCommands: []string{ "/etc/eks/bootstrap.sh", }, PostKubeadmCommands: []string{}, Files: []bootstrapv1.File{}, }, }, }, } } func TestKubeadmConfigTemplate(t *testing.T) { g := newApiBuilerTest(t) workerNodeGroupConfig := g.clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0] g.clusterSpec.SnowMachineConfigs["test-wn"].Spec.ContainersVolume = &snowv1.Volume{Size: 8} got, err := snow.KubeadmConfigTemplate(g.logger, g.clusterSpec, workerNodeGroupConfig) g.Expect(err).To(Succeed()) want := wantKubeadmConfigTemplate() g.Expect(got).To(Equal(want)) } func wantMachineDeployment() *clusterv1.MachineDeployment { wantVersion := "v1.21.5-eks-1-21-9" wantReplicas := int32(3) return &clusterv1.MachineDeployment{ TypeMeta: metav1.TypeMeta{ APIVersion: "cluster.x-k8s.io/v1beta1", Kind: "MachineDeployment", }, ObjectMeta: metav1.ObjectMeta{ Name: "snow-test-md-0", Namespace: "eksa-system", Labels: map[string]string{ "cluster.x-k8s.io/cluster-name": "snow-test", "cluster.anywhere.eks.amazonaws.com/cluster-name": "snow-test", "cluster.anywhere.eks.amazonaws.com/cluster-namespace": "test-namespace", }, Annotations: map[string]string{}, }, Spec: clusterv1.MachineDeploymentSpec{ ClusterName: "snow-test", Selector: metav1.LabelSelector{ MatchLabels: map[string]string{}, }, Template: clusterv1.MachineTemplateSpec{ ObjectMeta: clusterv1.ObjectMeta{ Labels: map[string]string{ "cluster.x-k8s.io/cluster-name": "snow-test", }, }, Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ ConfigRef: &v1.ObjectReference{ APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", Kind: "KubeadmConfigTemplate", Name: "snow-test-md-0-1", }, }, ClusterName: "snow-test", InfrastructureRef: v1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", Kind: "AWSSnowMachineTemplate", Name: "snow-test-md-0-1", }, Version: &wantVersion, }, }, Replicas: &wantReplicas, }, } } func wantSnowCluster() *snowv1.AWSSnowCluster { return &snowv1.AWSSnowCluster{ TypeMeta: metav1.TypeMeta{ Kind: "AWSSnowCluster", APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", }, ObjectMeta: metav1.ObjectMeta{ Name: "snow-test", Namespace: "eksa-system", }, Spec: snowv1.AWSSnowClusterSpec{ Region: "snow", ControlPlaneEndpoint: clusterv1.APIEndpoint{ Host: "1.2.3.4", Port: 6443, }, IdentityRef: &snowv1.AWSSnowIdentityReference{ Name: "snow-test-snow-credentials", Kind: "Secret", }, }, } } func TestSnowCluster(t *testing.T) { tt := newApiBuilerTest(t) got := snow.SnowCluster(tt.clusterSpec, wantSnowCredentialsSecret()) tt.Expect(got).To(Equal(wantSnowCluster())) } func TestSnowCredentialsSecret(t *testing.T) { tt := newApiBuilerTest(t) got := snow.CAPASCredentialsSecret(tt.clusterSpec, []byte("creds"), []byte("certs")) want := wantSnowCredentialsSecret() tt.Expect(got).To(Equal(want)) } func wantSnowCredentialsSecret() *v1.Secret { return &v1.Secret{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "Secret", }, ObjectMeta: metav1.ObjectMeta{ Name: "snow-test-snow-credentials", Namespace: "eksa-system", Labels: map[string]string{ "clusterctl.cluster.x-k8s.io/move": "true", }, }, Data: map[string][]byte{ "credentials": []byte("creds"), "ca-bundle": []byte("certs"), }, Type: "Opaque", } } func wantEksaCredentialsSecret() *v1.Secret { return &v1.Secret{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "Secret", }, ObjectMeta: metav1.ObjectMeta{ Name: "test-snow-credentials", Namespace: "test-namespace", }, Data: map[string][]byte{ "credentials": []byte("creds"), "ca-bundle": []byte("certs"), }, Type: "Opaque", } } func wantSnowMachineTemplate() *snowv1.AWSSnowMachineTemplate { wantAMIID := "eks-d-v1-21-5-ubuntu-ami-02833ca9a8f29c2ea" wantSSHKey := "default" wantPhysicalNetworkConnector := "SFP_PLUS" osFamily := snowv1.Ubuntu return &snowv1.AWSSnowMachineTemplate{ TypeMeta: metav1.TypeMeta{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", Kind: "AWSSnowMachineTemplate", }, ObjectMeta: metav1.ObjectMeta{ Name: "snow-test-md-0-1", Namespace: "eksa-system", }, Spec: snowv1.AWSSnowMachineTemplateSpec{ Template: snowv1.AWSSnowMachineTemplateResource{ Spec: snowv1.AWSSnowMachineSpec{ IAMInstanceProfile: "control-plane.cluster-api-provider-aws.sigs.k8s.io", InstanceType: "sbe-c.xlarge", SSHKeyName: &wantSSHKey, AMI: snowv1.AWSResourceReference{ ID: &wantAMIID, }, CloudInit: snowv1.CloudInit{ InsecureSkipSecretsManager: true, }, PhysicalNetworkConnectorType: &wantPhysicalNetworkConnector, Devices: []string{ "1.2.3.4", "1.2.3.5", }, OSFamily: &osFamily, Network: snowv1.AWSSnowNetwork{ DirectNetworkInterfaces: []snowv1.AWSSnowDirectNetworkInterface{ { Index: 1, DHCP: true, Primary: true, }, }, }, }, }, }, } } func wantSnowIPPool() *snowv1.AWSSnowIPPool { return &snowv1.AWSSnowIPPool{ TypeMeta: metav1.TypeMeta{ APIVersion: clusterapi.InfrastructureAPIVersion(), Kind: snow.SnowIPPoolKind, }, ObjectMeta: metav1.ObjectMeta{ Name: "ip-pool-1", Namespace: constants.EksaSystemNamespace, }, Spec: snowv1.AWSSnowIPPoolSpec{ IPPools: []snowv1.IPPool{ { IPStart: ptr.String("start"), IPEnd: ptr.String("end"), Gateway: ptr.String("gateway"), Subnet: ptr.String("subnet"), }, }, }, } } func TestSnowMachineTemplate(t *testing.T) { tt := newApiBuilerTest(t) mc := tt.machineConfigs["test-cp"] mc.Spec.NonRootVolumes = []*snowv1.Volume{ { DeviceName: "/dev/sdc", Size: 10, }, } got := snow.MachineTemplate("snow-test-control-plane-1", mc, nil) want := wantSnowMachineTemplate() want.SetName("snow-test-control-plane-1") want.Spec.Template.Spec.InstanceType = "sbe-c.large" want.Spec.Template.Spec.NonRootVolumes = mc.Spec.NonRootVolumes tt.Expect(got).To(Equal(want)) } func TestSnowMachineTemplateWithNetwork(t *testing.T) { tt := newApiBuilerTest(t) network := snowv1.AWSSnowNetwork{ DirectNetworkInterfaces: []snowv1.AWSSnowDirectNetworkInterface{ { Index: 1, DHCP: false, IPPool: &v1.ObjectReference{ Kind: "AWSSnowIPPool", Name: "ip-pool", }, Primary: true, }, }, } tt.machineConfigs["test-cp"].Spec.Network = v1alpha1.SnowNetwork{ DirectNetworkInterfaces: []v1alpha1.SnowDirectNetworkInterface{ { Index: 1, DHCP: false, IPPoolRef: &v1alpha1.Ref{ Kind: "SnowIPPool", Name: "ip-pool", }, Primary: true, }, }, } capasPools := snow.CAPASIPPools{ "ip-pool": &snowv1.AWSSnowIPPool{ TypeMeta: metav1.TypeMeta{ Kind: "AWSSnowIPPool", }, ObjectMeta: metav1.ObjectMeta{ Name: "ip-pool", }, }, } got := snow.MachineTemplate("snow-test-control-plane-1", tt.machineConfigs["test-cp"], capasPools) want := wantSnowMachineTemplate() want.SetName("snow-test-control-plane-1") want.Spec.Template.Spec.InstanceType = "sbe-c.large" want.Spec.Template.Spec.Network = network tt.Expect(got).To(BeComparableTo(want)) } func tlsCipherSuitesArgs() map[string]string { return map[string]string{"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"} } func wantEtcdCluster() *etcdv1.EtcdadmCluster { replicas := int32(3) return &etcdv1.EtcdadmCluster{ TypeMeta: metav1.TypeMeta{ APIVersion: "etcdcluster.cluster.x-k8s.io/v1beta1", Kind: "EtcdadmCluster", }, ObjectMeta: metav1.ObjectMeta{ Name: "snow-test-etcd", Namespace: "eksa-system", }, Spec: etcdv1.EtcdadmClusterSpec{ Replicas: &replicas, EtcdadmConfigSpec: etcdbootstrapv1.EtcdadmConfigSpec{ EtcdadmBuiltin: true, CipherSuites: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", PreEtcdadmCommands: []string{}, }, InfrastructureTemplate: v1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", Kind: "AWSSnowMachineTemplate", Name: "test-etcd", }, }, } } func wantEtcdClusterUbuntu() *etcdv1.EtcdadmCluster { etcd := wantEtcdCluster() etcd.Spec.EtcdadmConfigSpec.Format = etcdbootstrapv1.Format("cloud-config") etcd.Spec.EtcdadmConfigSpec.CloudInitConfig = &etcdbootstrapv1.CloudInitConfig{ Version: "3.4.16", InstallDir: "/usr/bin", } etcd.Spec.EtcdadmConfigSpec.PreEtcdadmCommands = []string{ "/etc/eks/bootstrap.sh", } return etcd } func wantEtcdClusterBottlerocket() *etcdv1.EtcdadmCluster { etcd := wantEtcdCluster() etcd.Spec.EtcdadmConfigSpec.Format = etcdbootstrapv1.Format("bottlerocket") etcd.Spec.EtcdadmConfigSpec.BottlerocketConfig = &etcdbootstrapv1.BottlerocketConfig{ EtcdImage: "public.ecr.aws/eks-distro/etcd-io/etcd:0.0.1", BootstrapImage: "public.ecr.aws/eks-anywhere/bottlerocket-bootstrap:0.0.1", PauseImage: "public.ecr.aws/eks-distro/kubernetes/pause:0.0.1", AdminImage: "public.ecr.aws/eks-anywhere/bottlerocket-admin:0.0.1", ControlImage: "public.ecr.aws/eks-anywhere/bottlerocket-control:0.0.1", CustomBootstrapContainers: []etcdbootstrapv1.BottlerocketBootstrapContainer{ { Name: "bottlerocket-bootstrap-snow", Image: "public.ecr.aws/l0g8r8j6/bottlerocket-bootstrap-snow:v1-20-22-eks-a-v0.0.0-dev-build.4984", Essential: false, Mode: "always", }, }, Kernel: &bootstrapv1.BottlerocketKernelSettings{ SysctlSettings: map[string]string{ "foo": "bar", }, }, Boot: &bootstrapv1.BottlerocketBootSettings{ BootKernelParameters: map[string][]string{ "foo": { "abc", "def", }, }, }, } return etcd } func TestEtcdadmClusterUbuntu(t *testing.T) { tt := newApiBuilerTest(t) tt.clusterSpec.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{ Count: 3, MachineGroupRef: &v1alpha1.Ref{ Kind: v1alpha1.SnowMachineConfigKind, Name: "test-etcd", }, } tt.clusterSpec.SnowMachineConfigs["test-etcd"] = &v1alpha1.SnowMachineConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "test-etcd", Namespace: "test-namespace", }, Spec: v1alpha1.SnowMachineConfigSpec{ OSFamily: "ubuntu", }, } tt.machineConfigs["test-etcd"] = tt.clusterSpec.SnowMachineConfigs["test-etcd"] etcdMachineTemplates := snow.MachineTemplate("test-etcd", tt.machineConfigs["test-etcd"], nil) got := snow.EtcdadmCluster(tt.logger, tt.clusterSpec, etcdMachineTemplates) want := wantEtcdClusterUbuntu() tt.Expect(got).To(Equal(want)) } func TestEtcdadmClusterBottlerocket(t *testing.T) { tt := newApiBuilerTest(t) tt.clusterSpec.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{ Count: 3, MachineGroupRef: &v1alpha1.Ref{ Kind: v1alpha1.SnowMachineConfigKind, Name: "test-etcd", }, } tt.clusterSpec.SnowMachineConfigs["test-etcd"] = &v1alpha1.SnowMachineConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "test-etcd", Namespace: "test-namespace", }, Spec: v1alpha1.SnowMachineConfigSpec{ OSFamily: "bottlerocket", HostOSConfiguration: &v1alpha1.HostOSConfiguration{ BottlerocketConfiguration: &v1alpha1.BottlerocketConfiguration{ Kernel: &bootstrapv1.BottlerocketKernelSettings{ SysctlSettings: map[string]string{ "foo": "bar", }, }, Boot: &bootstrapv1.BottlerocketBootSettings{ BootKernelParameters: map[string][]string{ "foo": { "abc", "def", }, }, }, }, }, }, } tt.machineConfigs["test-etcd"] = tt.clusterSpec.SnowMachineConfigs["test-etcd"] etcdMachineTemplates := snow.MachineTemplate("test-etcd", tt.machineConfigs["test-etcd"], nil) got := snow.EtcdadmCluster(tt.logger, tt.clusterSpec, etcdMachineTemplates) want := wantEtcdClusterBottlerocket() tt.Expect(got).To(Equal(want)) } func TestEtcdadmClusterUnsupportedOS(t *testing.T) { tt := newApiBuilerTest(t) tt.clusterSpec.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{ Count: 3, MachineGroupRef: &v1alpha1.Ref{ Kind: v1alpha1.SnowMachineConfigKind, Name: "test-etcd", }, } tt.clusterSpec.SnowMachineConfigs["test-etcd"] = &v1alpha1.SnowMachineConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "test-etcd", Namespace: "test-namespace", }, Spec: v1alpha1.SnowMachineConfigSpec{ OSFamily: "unsupported", }, } tt.machineConfigs["test-etcd"] = tt.clusterSpec.SnowMachineConfigs["test-etcd"] etcdMachineTemplates := snow.MachineTemplate("test-etcd", tt.machineConfigs["test-etcd"], nil) got := snow.EtcdadmCluster(tt.logger, tt.clusterSpec, etcdMachineTemplates) want := wantEtcdCluster() tt.Expect(got).To(Equal(want)) }
1,194
eks-anywhere
aws
Go
package snow import ( "context" "github.com/aws/eks-anywhere/pkg/aws" ) type AwsClient interface { EC2ImageExists(ctx context.Context, imageID string) (bool, error) EC2KeyNameExists(ctx context.Context, keyName string) (bool, error) EC2ImportKeyPair(ctx context.Context, keyName string, keyMaterial []byte) error EC2InstanceTypes(ctx context.Context) ([]aws.EC2InstanceType, error) IsSnowballDeviceUnlocked(ctx context.Context) (bool, error) SnowballDeviceSoftwareVersion(ctx context.Context) (string, error) } // LocalIMDSClient contains methods that fetch metadata from the local imds. type LocalIMDSClient interface { EC2InstanceIP(ctx context.Context) (string, error) } type AwsClientMap map[string]AwsClient func NewAwsClientMap(awsClients aws.Clients) AwsClientMap { c := make(AwsClientMap, len(awsClients)) for profile, client := range awsClients { c[profile] = client } return c }
32
eks-anywhere
aws
Go
package snow import ( etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1" etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1" ) const bottlerocketBootstrapImage = "bottlerocket-bootstrap-snow" func bottlerocketBootstrapSnow(image releasev1.Image) bootstrapv1.BottlerocketBootstrapContainer { return bootstrapv1.BottlerocketBootstrapContainer{ Name: bottlerocketBootstrapImage, ImageMeta: bootstrapv1.ImageMeta{ ImageRepository: image.Image(), ImageTag: image.Tag(), }, Mode: "always", } } func addBottlerocketBootstrapSnowInKubeadmControlPlane(kcp *controlplanev1.KubeadmControlPlane, image releasev1.Image) { b := bottlerocketBootstrapSnow(image) kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.BottlerocketCustomBootstrapContainers = []bootstrapv1.BottlerocketBootstrapContainer{b} kcp.Spec.KubeadmConfigSpec.JoinConfiguration.BottlerocketCustomBootstrapContainers = []bootstrapv1.BottlerocketBootstrapContainer{b} } func addBottlerocketBootstrapSnowInKubeadmConfigTemplate(kct *bootstrapv1.KubeadmConfigTemplate, image releasev1.Image) { kct.Spec.Template.Spec.JoinConfiguration.BottlerocketCustomBootstrapContainers = []bootstrapv1.BottlerocketBootstrapContainer{bottlerocketBootstrapSnow(image)} } func addBottlerocketBootstrapSnowInEtcdCluster(etcd *etcdv1.EtcdadmCluster, image releasev1.Image) { etcd.Spec.EtcdadmConfigSpec.BottlerocketConfig.CustomBootstrapContainers = []etcdbootstrapv1.BottlerocketBootstrapContainer{ { Name: bottlerocketBootstrapImage, Image: image.VersionedImage(), Mode: "always", }, } }
44
eks-anywhere
aws
Go
package snow import ( "context" "fmt" "github.com/aws/eks-anywhere/pkg/aws" ) type ClientRegistry interface { Get(ctx context.Context) (AwsClientMap, error) } type AwsClientRegistry struct { deviceClientMap AwsClientMap } func NewAwsClientRegistry() *AwsClientRegistry { return &AwsClientRegistry{} } // Build creates the device client map based on the filepaths specified. // This method must be called before any Get operations. func (b *AwsClientRegistry) Build(ctx context.Context) error { clients, err := aws.BuildClients(ctx) if err != nil { return err } b.deviceClientMap = NewAwsClientMap(clients) return nil } func (b *AwsClientRegistry) Get(ctx context.Context) (AwsClientMap, error) { if b.deviceClientMap == nil { return nil, fmt.Errorf("aws clients for snow not initialized") } return b.deviceClientMap, nil }
39
eks-anywhere
aws
Go
package snow_test import ( "context" "testing" . "github.com/onsi/gomega" "github.com/aws/eks-anywhere/pkg/aws" "github.com/aws/eks-anywhere/pkg/providers/snow" ) func TestGetSnowAwsClientMapSuccess(t *testing.T) { g := NewWithT(t) ctx := context.Background() clientBuilder := snow.NewAwsClientRegistry() t.Setenv(aws.EksaAwsCredentialsFileKey, credsFilePath) t.Setenv(aws.EksaAwsCABundlesFileKey, certsFilePath) err := clientBuilder.Build(ctx) g.Expect(err).To(Succeed()) clientMap, err := clientBuilder.Get(ctx) g.Expect(err).To(Succeed()) g.Expect(clientMap).NotTo(BeNil()) } func TestBuildSnowAwsClientMapFailure(t *testing.T) { g := NewWithT(t) t.Setenv(credsFileEnvVar, "") ctx := context.Background() clientBuilder := snow.NewAwsClientRegistry() err := clientBuilder.Build(ctx) g.Expect(err).To(MatchError(ContainSubstring("fetching aws credentials from env"))) } func TestGetSnowAwsClientMapFailure(t *testing.T) { g := NewWithT(t) ctx := context.Background() clientBuilder := snow.NewAwsClientRegistry() _, err := clientBuilder.Get(ctx) g.Expect(err).To(MatchError(ContainSubstring("aws clients for snow not initialized"))) }
46
eks-anywhere
aws
Go
package snow import ( "context" "fmt" "github.com/aws/etcdadm-controller/api/v1beta1" "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/clusterapi" snowv1 "github.com/aws/eks-anywhere/pkg/providers/snow/api/v1beta1" ) // BaseControlPlane represents a CAPI Snow control plane. type BaseControlPlane = clusterapi.ControlPlane[*snowv1.AWSSnowCluster, *snowv1.AWSSnowMachineTemplate] // ControlPlane holds the Snow specific objects for a CAPI snow control plane. type ControlPlane struct { BaseControlPlane Secret *corev1.Secret CAPASIPPools CAPASIPPools } // Objects returns the control plane objects associated with the snow cluster. func (c ControlPlane) Objects() []kubernetes.Object { o := c.BaseControlPlane.Objects() o = append(o, c.Secret) for _, p := range c.CAPASIPPools { o = append(o, p) } return o } // ControlPlaneSpec builds a snow ControlPlane definition based on an eks-a cluster spec. func ControlPlaneSpec(ctx context.Context, logger logr.Logger, client kubernetes.Client, clusterSpec *cluster.Spec) (*ControlPlane, error) { capasCredentialsSecret, err := capasCredentialsSecret(clusterSpec) if err != nil { return nil, err } snowCluster := SnowCluster(clusterSpec, capasCredentialsSecret) cpMachineConfig := clusterSpec.SnowMachineConfigs[clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name] capasPools := CAPASIPPools{} capasPools.addPools(cpMachineConfig.Spec.Network.DirectNetworkInterfaces, clusterSpec.SnowIPPools) cpMachineTemplate := MachineTemplate(clusterapi.ControlPlaneMachineTemplateName(clusterSpec.Cluster), cpMachineConfig, capasPools) kubeadmControlPlane, err := KubeadmControlPlane(logger, clusterSpec, cpMachineTemplate) if err != nil { return nil, err } var etcdMachineTemplate *snowv1.AWSSnowMachineTemplate var etcdCluster *v1beta1.EtcdadmCluster if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil { etcdMachineConfig := clusterSpec.SnowMachineConfigs[clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name] capasPools.addPools(etcdMachineConfig.Spec.Network.DirectNetworkInterfaces, clusterSpec.SnowIPPools) etcdMachineTemplate = MachineTemplate(clusterapi.EtcdMachineTemplateName(clusterSpec.Cluster), etcdMachineConfig, capasPools) etcdCluster = EtcdadmCluster(logger, clusterSpec, etcdMachineTemplate) } capiCluster := CAPICluster(clusterSpec, snowCluster, kubeadmControlPlane, etcdCluster) cp := &ControlPlane{ BaseControlPlane: BaseControlPlane{ Cluster: capiCluster, ProviderCluster: snowCluster, KubeadmControlPlane: kubeadmControlPlane, ControlPlaneMachineTemplate: cpMachineTemplate, EtcdCluster: etcdCluster, EtcdMachineTemplate: etcdMachineTemplate, }, Secret: capasCredentialsSecret, CAPASIPPools: capasPools, } if err := cp.UpdateImmutableObjectNames(ctx, client, getMachineTemplate, MachineTemplateDeepDerivative); err != nil { return nil, errors.Wrap(err, "updating snow immutable object names") } return cp, nil } // credentialsSecret generates the credentials secret(s) used for provisioning a snow cluster. // - eks-a credentials secret: user managed secret referred from snowdatacenterconfig identityRef // - snow credentials secret: eks-a creates, updates and deletes in eksa-system namespace. this secret is fully managed by eks-a. User shall treat it as a "read-only" object. func capasCredentialsSecret(clusterSpec *cluster.Spec) (*corev1.Secret, error) { if clusterSpec.SnowCredentialsSecret == nil { return nil, errors.New("snowCredentialsSecret in clusterSpec shall not be nil") } // we reconcile the snow credentials secret to be in sync with the eks-a credentials secret user manages. // notice for cli upgrade, we handle the eks-a credentials secret update in a separate step - under provider.UpdateSecrets // which runs before the actual cluster upgrade. // for controller secret, the user is responsible for making sure the eks-a credentials secret is created and up to date. credsB64, ok := clusterSpec.SnowCredentialsSecret.Data["credentials"] if !ok { return nil, fmt.Errorf("unable to retrieve credentials from secret [%s]", clusterSpec.SnowCredentialsSecret.GetName()) } certsB64, ok := clusterSpec.SnowCredentialsSecret.Data["ca-bundle"] if !ok { return nil, fmt.Errorf("unable to retrieve ca-bundle from secret [%s]", clusterSpec.SnowCredentialsSecret.GetName()) } return CAPASCredentialsSecret(clusterSpec, credsB64, certsB64), nil }
114
eks-anywhere
aws
Go
package snow import ( "context" "fmt" "github.com/google/uuid" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/aws" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/filewriter" "github.com/aws/eks-anywhere/pkg/logger" "github.com/aws/eks-anywhere/pkg/providers/common" ) type Defaulters struct { clientRegistry ClientRegistry writer filewriter.FileWriter keyGenerator SshKeyGenerator uuid uuid.UUID } type SshKeyGenerator interface { GenerateSSHAuthKey(filewriter.FileWriter) (string, error) } type DefaultersOpt func(defaulters *Defaulters) func NewDefaulters(clientRegistry ClientRegistry, writer filewriter.FileWriter, opts ...DefaultersOpt) *Defaulters { defaulters := &Defaulters{ clientRegistry: clientRegistry, writer: writer, keyGenerator: common.SshAuthKeyGenerator{}, uuid: uuid.New(), // In the future if we need a cluster wide uuid that is shared, we should move this call to the dependency factory for reuse. } for _, opt := range opts { opt(defaulters) } return defaulters } func WithKeyGenerator(generator SshKeyGenerator) DefaultersOpt { return func(defaulters *Defaulters) { defaulters.keyGenerator = generator } } // WithUUID will set uuid generated outside of constructor. func WithUUID(uuid uuid.UUID) DefaultersOpt { return func(defaulters *Defaulters) { defaulters.uuid = uuid } } // GenerateDefaultSSHKeys generates ssh key if it doesn't exist already. func (d *Defaulters) GenerateDefaultSSHKeys(ctx context.Context, machineConfigs map[string]*v1alpha1.SnowMachineConfig, clusterName string) error { md := NewMachineConfigDefaulters(d) for _, m := range machineConfigs { if m.Spec.SshKeyName == "" { if err := md.SetupDefaultSSHKey(ctx, m, clusterName); err != nil { return err } } } return nil } type MachineConfigDefaulters struct { sshKey string defaulters *Defaulters } func NewMachineConfigDefaulters(d *Defaulters) *MachineConfigDefaulters { return &MachineConfigDefaulters{ defaulters: d, } } // SetupDefaultSSHKey creates and imports a default ssh key to snow devices listed in the snow machine config. // If not exist, a ssh auth key is generated locally first. Then we loop through the devices in the machine config, // and import the key to any device that does not have the key. In the end the default ssh key name is assigned to // the snow machine config. func (md *MachineConfigDefaulters) SetupDefaultSSHKey(ctx context.Context, m *v1alpha1.SnowMachineConfig, clusterName string) error { defaultSSHKeyName := md.defaultSSHKeyName(clusterName) clientMap, err := md.defaulters.clientRegistry.Get(ctx) if err != nil { return err } if len(md.sshKey) <= 0 { logger.V(1).Info("SnowMachineConfig SshKey is empty. Creating default key pair", "default key name", defaultSSHKeyName) md.sshKey, err = md.defaulters.keyGenerator.GenerateSSHAuthKey(md.defaulters.writer) if err != nil { return err } } for _, ip := range m.Spec.Devices { client, ok := clientMap[ip] if !ok { return fmt.Errorf("credentials not found for device [%s]", ip) } keyExists, err := client.EC2KeyNameExists(ctx, defaultSSHKeyName) if err != nil { return fmt.Errorf("describing key pair on snow device [%s]: %v", ip, err) } if keyExists { continue } if err = client.EC2ImportKeyPair(ctx, defaultSSHKeyName, []byte(md.sshKey)); err != nil { return fmt.Errorf("importing key pair on snow device [deviceIP=%s]: %v", ip, err) } } m.Spec.SshKeyName = defaultSSHKeyName return nil } func (md *MachineConfigDefaulters) defaultSSHKeyName(clusterName string) string { return fmt.Sprintf("%s-%s-%s", defaultAwsSshKeyName, clusterName, md.defaulters.uuid.String()) } func SetupEksaCredentialsSecret(c *cluster.Config) error { creds, err := aws.EncodeFileFromEnv(eksaSnowCredentialsFileKey) if err != nil { return fmt.Errorf("setting up snow credentials: %v", err) } certs, err := aws.EncodeFileFromEnv(eksaSnowCABundlesFileKey) if err != nil { return fmt.Errorf("setting up snow certificates: %v", err) } c.SnowCredentialsSecret = EksaCredentialsSecret(c.SnowDatacenter, []byte(creds), []byte(certs)) return nil }
146
eks-anywhere
aws
Go
package snow_test import ( "errors" "testing" . "github.com/onsi/gomega" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" ) const ( sshKey = "ssh-rsa ABCDE" ) func TestGenerateDefaultSSHKeysExists(t *testing.T) { g := newConfigManagerTest(t) err := g.defaulters.GenerateDefaultSSHKeys(g.ctx, map[string]*v1alpha1.SnowMachineConfig{g.machineConfig.Name: g.machineConfig}, g.clusterName) g.Expect(err).To(Succeed()) } func TestGenerateDefaultSSHKeysError(t *testing.T) { g := newConfigManagerTest(t) g.machineConfig.Spec.SshKeyName = "" g.keyGenerator.EXPECT().GenerateSSHAuthKey(g.writer).Return(sshKey, nil) g.aws.EXPECT().EC2KeyNameExists(g.ctx, g.defaultKeyName).Return(false, errors.New("test error")) err := g.defaulters.GenerateDefaultSSHKeys(g.ctx, map[string]*v1alpha1.SnowMachineConfig{g.machineConfig.Name: g.machineConfig}, g.clusterName) g.Expect(err).NotTo(Succeed()) } func TestGenerateDefaultSSHKeysGenerated(t *testing.T) { g := newConfigManagerTest(t) g.machineConfig.Spec.SshKeyName = "" secondMachineConfig := g.machineConfig.DeepCopy() secondMachineConfig.Name = g.machineConfig.Name + "-2" g.keyGenerator.EXPECT().GenerateSSHAuthKey(g.writer).Return(sshKey, nil) g.aws.EXPECT().EC2KeyNameExists(g.ctx, g.defaultKeyName).Return(false, nil).Times(2) g.aws.EXPECT().EC2KeyNameExists(g.ctx, g.defaultKeyName).Return(true, nil).Times(2) g.aws.EXPECT().EC2ImportKeyPair(g.ctx, g.defaultKeyName, []byte(sshKey)).Return(nil).Times(2) err := g.defaulters.GenerateDefaultSSHKeys(g.ctx, map[string]*v1alpha1.SnowMachineConfig{ g.machineConfig.Name: g.machineConfig, secondMachineConfig.Name: secondMachineConfig, }, g.clusterName) g.Expect(err).To(Succeed()) } func TestSetDefaultSSHKey(t *testing.T) { g := newConfigManagerTest(t) g.machineConfig.Spec.SshKeyName = "" g.keyGenerator.EXPECT().GenerateSSHAuthKey(g.writer).Return(sshKey, nil) g.aws.EXPECT().EC2KeyNameExists(g.ctx, g.defaultKeyName).Return(false, nil).Times(2) g.aws.EXPECT().EC2ImportKeyPair(g.ctx, g.defaultKeyName, []byte(sshKey)).Return(nil).Times(2) err := g.machineConfigDefaulters.SetupDefaultSSHKey(g.ctx, g.machineConfig, g.clusterName) g.Expect(err).To(Succeed()) } func TestSetDefaultSSHKeyExistsOnAllDevices(t *testing.T) { g := newConfigManagerTest(t) g.machineConfig.Spec.SshKeyName = "" g.keyGenerator.EXPECT().GenerateSSHAuthKey(g.writer).Return(sshKey, nil) g.aws.EXPECT().EC2KeyNameExists(g.ctx, g.defaultKeyName).Return(true, nil).Times(2) err := g.machineConfigDefaulters.SetupDefaultSSHKey(g.ctx, g.machineConfig, g.clusterName) g.Expect(g.machineConfig.Spec.SshKeyName).To(Equal(g.defaultKeyName)) g.Expect(err).To(Succeed()) } func TestSetDefaultSSHKeyExistsOnPartialDevices(t *testing.T) { g := newConfigManagerTest(t) g.machineConfig.Spec.SshKeyName = "" g.keyGenerator.EXPECT().GenerateSSHAuthKey(g.writer).Return(sshKey, nil) g.aws.EXPECT().EC2KeyNameExists(g.ctx, g.defaultKeyName).Return(true, nil) g.aws.EXPECT().EC2KeyNameExists(g.ctx, g.defaultKeyName).Return(false, nil) g.aws.EXPECT().EC2ImportKeyPair(g.ctx, g.defaultKeyName, []byte(sshKey)).Return(nil) err := g.machineConfigDefaulters.SetupDefaultSSHKey(g.ctx, g.machineConfig, g.clusterName) g.Expect(err).To(Succeed()) } func TestSetDefaultSSHKeyImportKeyError(t *testing.T) { g := newConfigManagerTest(t) g.machineConfig.Spec.SshKeyName = "" g.keyGenerator.EXPECT().GenerateSSHAuthKey(g.writer).Return(sshKey, nil) g.aws.EXPECT().EC2KeyNameExists(g.ctx, g.defaultKeyName).Return(false, nil) g.aws.EXPECT().EC2ImportKeyPair(g.ctx, g.defaultKeyName, []byte(sshKey)).Return(errors.New("error")) err := g.machineConfigDefaulters.SetupDefaultSSHKey(g.ctx, g.machineConfig, g.clusterName) g.Expect(err).NotTo(Succeed()) } func TestSetDefaultSSHKeyClientMapError(t *testing.T) { g := newConfigManagerTestClientMapError(t) g.machineConfig.Spec.SshKeyName = "" err := g.machineConfigDefaulters.SetupDefaultSSHKey(g.ctx, g.machineConfig, g.clusterName) g.Expect(err).NotTo(Succeed()) } func TestSetDefaultSSHKeyDeviceNotFoundInClientMap(t *testing.T) { g := newConfigManagerTest(t) g.machineConfig.Spec.SshKeyName = "" g.machineConfig.Spec.Devices = []string{"device-not-exist"} g.keyGenerator.EXPECT().GenerateSSHAuthKey(g.writer).Return(sshKey, nil) err := g.machineConfigDefaulters.SetupDefaultSSHKey(g.ctx, g.machineConfig, g.clusterName) g.Expect(err).To(MatchError(ContainSubstring("credentials not found for device"))) }
103
eks-anywhere
aws
Go
package snow import ( "context" "github.com/aws/eks-anywhere/pkg/cluster" ) type ConfigManager struct { validator *Validator defaulters *Defaulters } // NewConfigManager returns a new snow config manager. func NewConfigManager(defaulters *Defaulters, validators *Validator) *ConfigManager { return &ConfigManager{ validator: validators, defaulters: defaulters, } } func (cm *ConfigManager) SetDefaultsAndValidate(ctx context.Context, config *cluster.Config) error { configManager := cluster.NewConfigManager() if err := configManager.Register(cm.snowEntry(ctx)); err != nil { return err } if err := configManager.SetDefaults(config); err != nil { return err } if err := configManager.Validate(config); err != nil { return err } return nil } func (cm *ConfigManager) snowEntry(ctx context.Context) *cluster.ConfigManagerEntry { return &cluster.ConfigManagerEntry{ Defaulters: []cluster.Defaulter{ func(c *cluster.Config) error { return cm.defaulters.GenerateDefaultSSHKeys(ctx, c.SnowMachineConfigs, c.Cluster.Name) }, func(c *cluster.Config) error { return SetupEksaCredentialsSecret(c) }, }, Validations: []cluster.Validation{ func(c *cluster.Config) error { for _, m := range c.SnowMachineConfigs { if err := cm.validator.ValidateEC2ImageExistsOnDevice(ctx, m); err != nil { return err } } return nil }, func(c *cluster.Config) error { for _, m := range c.SnowMachineConfigs { if err := cm.validator.ValidateEC2SshKeyNameExists(ctx, m); err != nil { return err } } return nil }, func(c *cluster.Config) error { for _, m := range c.SnowMachineConfigs { if err := cm.validator.ValidateDeviceIsUnlocked(ctx, m); err != nil { return err } } return nil }, func(c *cluster.Config) error { for _, m := range c.SnowMachineConfigs { if err := cm.validator.ValidateInstanceType(ctx, m); err != nil { return err } } return nil }, func(c *cluster.Config) error { for _, m := range c.SnowMachineConfigs { if err := cm.validator.ValidateDeviceSoftware(ctx, m); err != nil { return err } } return nil }, func(c *cluster.Config) error { return cm.validator.ValidateControlPlaneIP(ctx, c.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host) }, }, } }
96
eks-anywhere
aws
Go
package snow import ( controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" ) func addStackedEtcdExtraArgsInKubeadmControlPlane(kcp *controlplanev1.KubeadmControlPlane, externalEtcdConfig *v1alpha1.ExternalEtcdConfiguration) { if externalEtcdConfig != nil { return } stackedEtcdExtraArgs := kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local.ExtraArgs stackedEtcdExtraArgs["listen-peer-urls"] = "https://0.0.0.0:2380" stackedEtcdExtraArgs["listen-client-urls"] = "https://0.0.0.0:2379" }
18
eks-anywhere
aws
Go
package snow import ( "context" "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/constants" snowv1 "github.com/aws/eks-anywhere/pkg/providers/snow/api/v1beta1" ) func oldWorkerMachineTemplate(ctx context.Context, kubeClient kubernetes.Client, md *clusterv1.MachineDeployment) (*snowv1.AWSSnowMachineTemplate, error) { if md == nil { return nil, nil } mt := &snowv1.AWSSnowMachineTemplate{} err := kubeClient.Get(ctx, md.Spec.Template.Spec.InfrastructureRef.Name, constants.EksaSystemNamespace, mt) if apierrors.IsNotFound(err) { return nil, nil } if err != nil { return nil, err } return mt, nil } func getMachineTemplate(ctx context.Context, client kubernetes.Client, name, namespace string) (*snowv1.AWSSnowMachineTemplate, error) { m := &snowv1.AWSSnowMachineTemplate{} if err := client.Get(ctx, name, namespace, m); err != nil { return nil, errors.Wrap(err, "fetching snowMachineTemplate") } return m, nil }
39
eks-anywhere
aws
Go
package snow import ( "fmt" "github.com/aws/eks-anywhere/pkg/cluster" ) func CredentialsSecretName(clusterSpec *cluster.Spec) string { return fmt.Sprintf("%s-snow-credentials", clusterSpec.Cluster.GetName()) }
12
eks-anywhere
aws
Go
package snow import ( "context" "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/api/equality" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/cluster" snowv1 "github.com/aws/eks-anywhere/pkg/providers/snow/api/v1beta1" ) // ControlPlaneObjects generates the control plane objects for snow provider from clusterSpec. func ControlPlaneObjects(ctx context.Context, log logr.Logger, clusterSpec *cluster.Spec, kubeClient kubernetes.Client) ([]kubernetes.Object, error) { cp, err := ControlPlaneSpec(ctx, log, kubeClient, clusterSpec) if err != nil { return nil, err } return cp.Objects(), nil } // WorkersObjects generates all the objects that compose a Snow specific CAPI spec for the worker nodes of an eks-a cluster. func WorkersObjects(ctx context.Context, log logr.Logger, clusterSpec *cluster.Spec, kubeClient kubernetes.Client) ([]kubernetes.Object, error) { w, err := WorkersSpec(ctx, log, clusterSpec, kubeClient) if err != nil { return nil, err } return w.Objects(), nil } // MachineTemplateDeepDerivative compares two awssnowmachinetemplates to determine if their spec fields are equal. // DeepDerivative is used so that unset fields in new object are not compared. Although DeepDerivative treats // new subset slice equal to the original slice. i.e. DeepDerivative([]int{1}, []int{1, 2}) returns true. // Custom logic is added to justify this usecase since removing a device from the devices list shall trigger machine // rollout and recreate or the snow cluster goes into a state where the machines on the removed device can’t be deleted. func MachineTemplateDeepDerivative(new, old *snowv1.AWSSnowMachineTemplate) bool { if len(new.Spec.Template.Spec.Devices) != len(old.Spec.Template.Spec.Devices) { return false } return equality.Semantic.DeepDerivative(new.Spec, old.Spec) }
45
eks-anywhere
aws
Go
package snow_test import ( "context" "errors" "testing" "github.com/aws/etcdadm-controller/api/v1beta1" . "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/providers/snow" snowv1 "github.com/aws/eks-anywhere/pkg/providers/snow/api/v1beta1" ) func TestControlPlaneObjects(t *testing.T) { g := newSnowTest(t) mt := wantSnowMachineTemplate() g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test", constants.EksaSystemNamespace, &controlplanev1.KubeadmControlPlane{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *controlplanev1.KubeadmControlPlane) error { obj.Spec.MachineTemplate.InfrastructureRef.Name = "test-cp-1" return nil }) g.kubeconfigClient.EXPECT(). Get( g.ctx, "test-cp-1", constants.EksaSystemNamespace, &snowv1.AWSSnowMachineTemplate{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *snowv1.AWSSnowMachineTemplate) error { mt.DeepCopyInto(obj) obj.SetName("test-cp-1") obj.Spec.Template.Spec.InstanceType = "updated-instance-type" return nil }) wantMachineTemplateName := "test-cp-2" mt.SetName(wantMachineTemplateName) mt.Spec.Template.Spec.InstanceType = "sbe-c.large" kcp := wantKubeadmControlPlane() kcp.Spec.MachineTemplate.InfrastructureRef.Name = wantMachineTemplateName kcp.Spec.KubeadmConfigSpec.JoinConfiguration.NodeRegistration.IgnorePreflightErrors = []string{"DirAvailable--etc-kubernetes-manifests"} got, err := snow.ControlPlaneObjects(g.ctx, g.logger, g.clusterSpec, g.kubeconfigClient) g.Expect(err).To(Succeed()) g.Expect(got).To(BeComparableTo([]kubernetes.Object{wantCAPICluster(), kcp, wantSnowCluster(), mt, wantSnowCredentialsSecret()})) } func TestControlPlaneObjectsWithIPPools(t *testing.T) { g := newSnowTest(t) g.clusterSpec.SnowMachineConfig("test-cp").Spec.Network = anywherev1.SnowNetwork{ DirectNetworkInterfaces: []anywherev1.SnowDirectNetworkInterface{ { Index: 1, IPPoolRef: &anywherev1.Ref{ Kind: anywherev1.SnowIPPoolKind, Name: "ip-pool-1", }, Primary: true, }, }, } mt := wantSnowMachineTemplate() mt.Spec.Template.Spec.Network = snowv1.AWSSnowNetwork{ DirectNetworkInterfaces: []snowv1.AWSSnowDirectNetworkInterface{ { Index: 1, IPPool: &v1.ObjectReference{ Kind: snow.SnowIPPoolKind, Name: "ip-pool-1", }, Primary: true, }, }, } g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test", constants.EksaSystemNamespace, &controlplanev1.KubeadmControlPlane{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *controlplanev1.KubeadmControlPlane) error { obj.Spec.MachineTemplate.InfrastructureRef.Name = "test-cp-1" return nil }) g.kubeconfigClient.EXPECT(). Get( g.ctx, "test-cp-1", constants.EksaSystemNamespace, &snowv1.AWSSnowMachineTemplate{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *snowv1.AWSSnowMachineTemplate) error { mt.DeepCopyInto(obj) obj.SetName("test-cp-1") obj.Spec.Template.Spec.InstanceType = "updated-instance-type" return nil }) wantMachineTemplateName := "test-cp-2" mt.SetName(wantMachineTemplateName) mt.Spec.Template.Spec.InstanceType = "sbe-c.large" kcp := wantKubeadmControlPlane() kcp.Spec.MachineTemplate.InfrastructureRef.Name = wantMachineTemplateName kcp.Spec.KubeadmConfigSpec.JoinConfiguration.NodeRegistration.IgnorePreflightErrors = []string{"DirAvailable--etc-kubernetes-manifests"} got, err := snow.ControlPlaneObjects(g.ctx, g.logger, g.clusterSpec, g.kubeconfigClient) g.Expect(err).To(Succeed()) g.Expect(got).To(BeComparableTo([]kubernetes.Object{wantCAPICluster(), kcp, wantSnowCluster(), mt, wantSnowCredentialsSecret(), wantSnowIPPool()})) } func TestControlPlaneObjectsUnstackedEtcd(t *testing.T) { g := newSnowTest(t) g.clusterSpec.Cluster.Spec.ExternalEtcdConfiguration = &anywherev1.ExternalEtcdConfiguration{ Count: 3, MachineGroupRef: &anywherev1.Ref{ Kind: "SnowMachineConfig", Name: "test-etcd", }, } g.clusterSpec.SnowMachineConfigs["test-etcd"] = &anywherev1.SnowMachineConfig{ TypeMeta: metav1.TypeMeta{ Kind: "SnowMachineConfig", }, ObjectMeta: metav1.ObjectMeta{ Name: "test-etcd", Namespace: "test-namespace", }, Spec: anywherev1.SnowMachineConfigSpec{ AMIID: "eks-d-v1-21-5-ubuntu-ami-02833ca9a8f29c2ea", InstanceType: "sbe-c.xlarge", SshKeyName: "default", PhysicalNetworkConnector: "SFP_PLUS", Devices: []string{ "1.2.3.4", "1.2.3.5", }, OSFamily: anywherev1.Ubuntu, Network: anywherev1.SnowNetwork{ DirectNetworkInterfaces: []anywherev1.SnowDirectNetworkInterface{ { Index: 1, DHCP: true, Primary: true, }, }, }, }, } mtCp := wantSnowMachineTemplate() mtEtcd := wantSnowMachineTemplate() g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test", constants.EksaSystemNamespace, &controlplanev1.KubeadmControlPlane{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *controlplanev1.KubeadmControlPlane) error { obj.Spec.MachineTemplate.InfrastructureRef.Name = "test-cp-1" return nil }) g.kubeconfigClient.EXPECT(). Get( g.ctx, "test-cp-1", constants.EksaSystemNamespace, &snowv1.AWSSnowMachineTemplate{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *snowv1.AWSSnowMachineTemplate) error { mtCp.DeepCopyInto(obj) obj.SetName("test-cp-1") obj.Spec.Template.Spec.InstanceType = "updated-instance-type" return nil }) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-etcd", constants.EksaSystemNamespace, &v1beta1.EtcdadmCluster{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *v1beta1.EtcdadmCluster) error { obj.Spec.InfrastructureTemplate.Name = "test-etcd-1" return nil }) g.kubeconfigClient.EXPECT(). Get( g.ctx, "test-etcd-1", constants.EksaSystemNamespace, &snowv1.AWSSnowMachineTemplate{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *snowv1.AWSSnowMachineTemplate) error { mtCp.DeepCopyInto(obj) obj.SetName("test-etcd-1") obj.Spec.Template.Spec.InstanceType = "updated-instance-type" return nil }) mtCpName := "test-cp-2" mtCp.SetName(mtCpName) mtCp.Spec.Template.Spec.InstanceType = "sbe-c.large" kcp := wantKubeadmControlPlaneUnstackedEtcd() kcp.Spec.MachineTemplate.InfrastructureRef.Name = mtCpName kcp.Spec.KubeadmConfigSpec.JoinConfiguration.NodeRegistration.IgnorePreflightErrors = []string{"DirAvailable--etc-kubernetes-manifests"} mtEtcdName := "test-etcd-2" mtEtcd.SetName(mtEtcdName) etcdCluster := wantEtcdClusterUbuntu() etcdCluster.Spec.InfrastructureTemplate.Name = mtEtcdName got, err := snow.ControlPlaneObjects(g.ctx, g.logger, g.clusterSpec, g.kubeconfigClient) g.Expect(err).To(Succeed()) g.Expect(got).To(BeComparableTo([]kubernetes.Object{wantCAPIClusterUnstackedEtcd(), kcp, wantSnowCluster(), mtCp, etcdCluster, mtEtcd, wantSnowCredentialsSecret()})) } func TestControlPlaneObjectsCredentialsNil(t *testing.T) { g := newSnowTest(t) g.clusterSpec.SnowCredentialsSecret = nil _, err := snow.ControlPlaneObjects(g.ctx, g.logger, g.clusterSpec, g.kubeconfigClient) g.Expect(err).To(MatchError(ContainSubstring("snowCredentialsSecret in clusterSpec shall not be nil"))) } func TestControlPlaneObjectsSecretMissCredentialsKey(t *testing.T) { g := newSnowTest(t) g.clusterSpec.SnowCredentialsSecret.Data = map[string][]byte{ "ca-bundle": []byte("eksa-certs"), } _, err := snow.ControlPlaneObjects(g.ctx, g.logger, g.clusterSpec, g.kubeconfigClient) g.Expect(err).To(MatchError(ContainSubstring("unable to retrieve credentials from secret"))) } func TestControlPlaneObjectsSecretMissCertificatesKey(t *testing.T) { g := newSnowTest(t) g.clusterSpec.SnowCredentialsSecret.Data = map[string][]byte{ "credentials": []byte("eksa-creds"), } _, err := snow.ControlPlaneObjects(g.ctx, g.logger, g.clusterSpec, g.kubeconfigClient) g.Expect(err).To(MatchError(ContainSubstring("unable to retrieve ca-bundle from secret"))) } func TestControlPlaneObjectsOldControlPlaneNotExists(t *testing.T) { g := newSnowTest(t) mt := wantSnowMachineTemplate() g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test", constants.EksaSystemNamespace, &controlplanev1.KubeadmControlPlane{}, ). Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, "")) mt.SetName("snow-test-control-plane-1") mt.Spec.Template.Spec.InstanceType = "sbe-c.large" kcp := wantKubeadmControlPlane() kcp.Spec.KubeadmConfigSpec.JoinConfiguration.NodeRegistration.IgnorePreflightErrors = []string{"DirAvailable--etc-kubernetes-manifests"} got, err := snow.ControlPlaneObjects(g.ctx, g.logger, g.clusterSpec, g.kubeconfigClient) g.Expect(err).To(Succeed()) g.Expect(got).To(Equal([]kubernetes.Object{wantCAPICluster(), kcp, wantSnowCluster(), mt, wantSnowCredentialsSecret()})) } func TestControlPlaneObjectsOldMachineTemplateNotExists(t *testing.T) { g := newSnowTest(t) mt := wantSnowMachineTemplate() g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test", constants.EksaSystemNamespace, &controlplanev1.KubeadmControlPlane{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *controlplanev1.KubeadmControlPlane) error { obj.Spec.MachineTemplate.InfrastructureRef.Name = "snow-test-control-plane-1" return nil }) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-control-plane-1", constants.EksaSystemNamespace, &snowv1.AWSSnowMachineTemplate{}, ). Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, "")) mt.SetName("snow-test-control-plane-1") mt.Spec.Template.Spec.InstanceType = "sbe-c.large" kcp := wantKubeadmControlPlane() kcp.Spec.KubeadmConfigSpec.JoinConfiguration.NodeRegistration.IgnorePreflightErrors = []string{"DirAvailable--etc-kubernetes-manifests"} got, err := snow.ControlPlaneObjects(g.ctx, g.logger, g.clusterSpec, g.kubeconfigClient) g.Expect(err).To(Succeed()) g.Expect(got).To(Equal([]kubernetes.Object{wantCAPICluster(), kcp, wantSnowCluster(), mt, wantSnowCredentialsSecret()})) } func TestControlPlaneObjectsGetOldControlPlaneError(t *testing.T) { g := newSnowTest(t) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test", constants.EksaSystemNamespace, &controlplanev1.KubeadmControlPlane{}, ). Return(errors.New("get cp error")) _, err := snow.ControlPlaneObjects(g.ctx, g.logger, g.clusterSpec, g.kubeconfigClient) g.Expect(err).NotTo(Succeed()) } func TestControlPlaneObjectsGetOldMachineTemplateError(t *testing.T) { g := newSnowTest(t) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test", constants.EksaSystemNamespace, &controlplanev1.KubeadmControlPlane{}, ). Return(nil) g.kubeconfigClient.EXPECT(). Get( g.ctx, "", constants.EksaSystemNamespace, &snowv1.AWSSnowMachineTemplate{}, ). Return(errors.New("get mt error")) _, err := snow.ControlPlaneObjects(g.ctx, g.logger, g.clusterSpec, g.kubeconfigClient) g.Expect(err).NotTo(Succeed()) } func TestWorkersObjects(t *testing.T) { g := newSnowTest(t) mt := wantSnowMachineTemplate() g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0", constants.EksaSystemNamespace, &clusterv1.MachineDeployment{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *clusterv1.MachineDeployment) error { wantMachineDeployment().DeepCopyInto(obj) obj.Spec.Template.Spec.InfrastructureRef.Name = "snow-test-md-0-1" obj.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "snow-test-md-0-1" return nil }) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0-1", constants.EksaSystemNamespace, &bootstrapv1.KubeadmConfigTemplate{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *bootstrapv1.KubeadmConfigTemplate) error { wantKubeadmConfigTemplate().DeepCopyInto(obj) return nil }) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0-1", constants.EksaSystemNamespace, &snowv1.AWSSnowMachineTemplate{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *snowv1.AWSSnowMachineTemplate) error { mt.DeepCopyInto(obj) obj.SetName("snow-test-md-0-1") obj.Spec.Template.Spec.InstanceType = "updated-instance-type" return nil }) wantMachineTemplateName := "snow-test-md-0-2" mt.SetName(wantMachineTemplateName) md := wantMachineDeployment() md.Spec.Template.Spec.InfrastructureRef.Name = wantMachineTemplateName got, err := snow.WorkersObjects(g.ctx, g.logger, g.clusterSpec, g.kubeconfigClient) g.Expect(err).To(Succeed()) g.Expect(got).To(ConsistOf([]kubernetes.Object{md, wantKubeadmConfigTemplate(), mt})) } func TestWorkersObjectsWithIPPools(t *testing.T) { g := newSnowTest(t) g.clusterSpec.SnowMachineConfig("test-wn").Spec.Network = anywherev1.SnowNetwork{ DirectNetworkInterfaces: []anywherev1.SnowDirectNetworkInterface{ { Index: 1, IPPoolRef: &anywherev1.Ref{ Kind: anywherev1.SnowIPPoolKind, Name: "ip-pool-1", }, Primary: true, }, }, } mt := wantSnowMachineTemplate() mt.Spec.Template.Spec.Network = snowv1.AWSSnowNetwork{ DirectNetworkInterfaces: []snowv1.AWSSnowDirectNetworkInterface{ { Index: 1, IPPool: &v1.ObjectReference{ Kind: snow.SnowIPPoolKind, Name: "ip-pool-1", }, Primary: true, }, }, } g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0", constants.EksaSystemNamespace, &clusterv1.MachineDeployment{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *clusterv1.MachineDeployment) error { wantMachineDeployment().DeepCopyInto(obj) obj.Spec.Template.Spec.InfrastructureRef.Name = "snow-test-md-0-1" obj.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "snow-test-md-0-1" return nil }) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0-1", constants.EksaSystemNamespace, &bootstrapv1.KubeadmConfigTemplate{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *bootstrapv1.KubeadmConfigTemplate) error { wantKubeadmConfigTemplate().DeepCopyInto(obj) return nil }) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0-1", constants.EksaSystemNamespace, &snowv1.AWSSnowMachineTemplate{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *snowv1.AWSSnowMachineTemplate) error { mt.DeepCopyInto(obj) obj.SetName("snow-test-md-0-1") obj.Spec.Template.Spec.InstanceType = "updated-instance-type" return nil }) wantMachineTemplateName := "snow-test-md-0-2" mt.SetName(wantMachineTemplateName) md := wantMachineDeployment() md.Spec.Template.Spec.InfrastructureRef.Name = wantMachineTemplateName got, err := snow.WorkersObjects(g.ctx, g.logger, g.clusterSpec, g.kubeconfigClient) g.Expect(err).To(Succeed()) g.Expect(got).To(ConsistOf([]kubernetes.Object{md, wantKubeadmConfigTemplate(), mt, wantSnowIPPool()})) } func TestWorkersObjectsOldMachineDeploymentNotExists(t *testing.T) { g := newSnowTest(t) mt := wantSnowMachineTemplate() g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0", constants.EksaSystemNamespace, &clusterv1.MachineDeployment{}, ). Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, "")) got, err := snow.WorkersObjects(g.ctx, g.logger, g.clusterSpec, g.kubeconfigClient) g.Expect(err).To(Succeed()) g.Expect(got).To(ConsistOf([]kubernetes.Object{wantMachineDeployment(), wantKubeadmConfigTemplate(), mt})) } func TestWorkersObjectsOldKubeadmConfigTemplateNotExists(t *testing.T) { g := newSnowTest(t) mt := wantSnowMachineTemplate() g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0", constants.EksaSystemNamespace, &clusterv1.MachineDeployment{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *clusterv1.MachineDeployment) error { wantMachineDeployment().DeepCopyInto(obj) obj.Spec.Template.Spec.InfrastructureRef.Name = "snow-test-md-0-1" obj.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "snow-test-md-0-1" return nil }) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0-1", constants.EksaSystemNamespace, &bootstrapv1.KubeadmConfigTemplate{}, ). Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, "")) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0-1", constants.EksaSystemNamespace, &snowv1.AWSSnowMachineTemplate{}, ). Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, "")) got, err := snow.WorkersObjects(g.ctx, g.logger, g.clusterSpec, g.kubeconfigClient) g.Expect(err).To(Succeed()) g.Expect(got).To(ConsistOf([]kubernetes.Object{wantMachineDeployment(), wantKubeadmConfigTemplate(), mt})) } func TestWorkersObjectsOldMachineTemplateNotExists(t *testing.T) { g := newSnowTest(t) mt := wantSnowMachineTemplate() g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0", constants.EksaSystemNamespace, &clusterv1.MachineDeployment{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *clusterv1.MachineDeployment) error { wantMachineDeployment().DeepCopyInto(obj) obj.Spec.Template.Spec.InfrastructureRef.Name = "snow-test-md-0-1" obj.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "snow-test-md-0-1" return nil }) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0-1", constants.EksaSystemNamespace, &bootstrapv1.KubeadmConfigTemplate{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *bootstrapv1.KubeadmConfigTemplate) error { wantKubeadmConfigTemplate().DeepCopyInto(obj) return nil }) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0-1", constants.EksaSystemNamespace, &snowv1.AWSSnowMachineTemplate{}, ). Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, "")) got, err := snow.WorkersObjects(g.ctx, g.logger, g.clusterSpec, g.kubeconfigClient) g.Expect(err).To(Succeed()) g.Expect(got).To(ConsistOf([]kubernetes.Object{wantMachineDeployment(), wantKubeadmConfigTemplate(), mt})) } func TestWorkersObjectsTaintsUpdated(t *testing.T) { g := newSnowTest(t) mt := wantSnowMachineTemplate() g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0", constants.EksaSystemNamespace, &clusterv1.MachineDeployment{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *clusterv1.MachineDeployment) error { wantMachineDeployment().DeepCopyInto(obj) obj.Spec.Template.Spec.InfrastructureRef.Name = "snow-test-md-0-1" obj.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "snow-test-md-0-1" return nil }) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0-1", constants.EksaSystemNamespace, &bootstrapv1.KubeadmConfigTemplate{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *bootstrapv1.KubeadmConfigTemplate) error { wantKubeadmConfigTemplate().DeepCopyInto(obj) obj.Spec.Template.Spec.JoinConfiguration.NodeRegistration.Taints = []v1.Taint{ { Key: "key1", Value: "val1", Effect: v1.TaintEffectNoExecute, }, } return nil }) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0-1", constants.EksaSystemNamespace, &snowv1.AWSSnowMachineTemplate{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *snowv1.AWSSnowMachineTemplate) error { mt.DeepCopyInto(obj) return nil }) got, err := snow.WorkersObjects(g.ctx, g.logger, g.clusterSpec, g.kubeconfigClient) md := wantMachineDeployment() md.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "snow-test-md-0-2" kct := wantKubeadmConfigTemplate() kct.SetName("snow-test-md-0-2") g.Expect(err).To(Succeed()) g.Expect(got).To(BeComparableTo([]kubernetes.Object{kct, md, mt})) } func TestWorkersObjectsLabelsUpdated(t *testing.T) { g := newSnowTest(t) mt := wantSnowMachineTemplate() g.clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Labels = map[string]string{ "label1": "val1", "label2": "val2", } g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0", constants.EksaSystemNamespace, &clusterv1.MachineDeployment{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *clusterv1.MachineDeployment) error { wantMachineDeployment().DeepCopyInto(obj) obj.Spec.Template.Spec.InfrastructureRef.Name = "snow-test-md-0-1" obj.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "snow-test-md-0-1" return nil }) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0-1", constants.EksaSystemNamespace, &bootstrapv1.KubeadmConfigTemplate{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *bootstrapv1.KubeadmConfigTemplate) error { wantKubeadmConfigTemplate().DeepCopyInto(obj) obj.Spec.Template.Spec.JoinConfiguration.NodeRegistration.KubeletExtraArgs = map[string]string{ "provider-id": "aws-snow:////'{{ ds.meta_data.instance_id }}'", "node-labels": "label1=val2,label2=val1", } return nil }) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0-1", constants.EksaSystemNamespace, &snowv1.AWSSnowMachineTemplate{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *snowv1.AWSSnowMachineTemplate) error { mt.DeepCopyInto(obj) return nil }) got, err := snow.WorkersObjects(g.ctx, g.logger, g.clusterSpec, g.kubeconfigClient) md := wantMachineDeployment() md.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "snow-test-md-0-2" md.Spec.Template.Spec.InfrastructureRef.Name = "snow-test-md-0-2" kct := wantKubeadmConfigTemplate() kct.SetName("snow-test-md-0-2") kct.Spec.Template.Spec.JoinConfiguration.NodeRegistration.KubeletExtraArgs = map[string]string{ "provider-id": "aws-snow:////'{{ ds.meta_data.instance_id }}'", "node-labels": "label1=val1,label2=val2", } mt.SetName("snow-test-md-0-2") g.Expect(err).To(Succeed()) g.Expect(got).To(ContainElement(kct)) } func TestWorkersObjectsGetMachineDeploymentError(t *testing.T) { g := newSnowTest(t) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0", constants.EksaSystemNamespace, &clusterv1.MachineDeployment{}, ). Return(errors.New("get md error")) _, err := snow.WorkersObjects(g.ctx, g.logger, g.clusterSpec, g.kubeconfigClient) g.Expect(err).NotTo(Succeed()) } func TestWorkersObjectsGetKubeadmConfigTemplateError(t *testing.T) { g := newSnowTest(t) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0", constants.EksaSystemNamespace, &clusterv1.MachineDeployment{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *clusterv1.MachineDeployment) error { wantMachineDeployment().DeepCopyInto(obj) obj.Spec.Template.Spec.InfrastructureRef.Name = "snow-test-md-0-1" obj.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "snow-test-md-0-1" return nil }) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0-1", constants.EksaSystemNamespace, &bootstrapv1.KubeadmConfigTemplate{}, ). Return(errors.New("get kct error")) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0-1", constants.EksaSystemNamespace, &snowv1.AWSSnowMachineTemplate{}, ). Return(nil) _, err := snow.WorkersObjects(g.ctx, g.logger, g.clusterSpec, g.kubeconfigClient) g.Expect(err).NotTo(Succeed()) } func TestWorkersObjectsGetMachineTemplateError(t *testing.T) { g := newSnowTest(t) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0", constants.EksaSystemNamespace, &clusterv1.MachineDeployment{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *clusterv1.MachineDeployment) error { wantMachineDeployment().DeepCopyInto(obj) obj.Spec.Template.Spec.InfrastructureRef.Name = "snow-test-md-0-1" obj.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "snow-test-md-0-1" return nil }) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0-1", constants.EksaSystemNamespace, &snowv1.AWSSnowMachineTemplate{}, ). Return(errors.New("get mt error")) _, err := snow.WorkersObjects(g.ctx, g.logger, g.clusterSpec, g.kubeconfigClient) g.Expect(err).NotTo(Succeed()) } func TestWorkersObjectsWithRegistryMirror(t *testing.T) { for _, tt := range registryMirrorTests { t.Run(tt.name, func(t *testing.T) { g := newSnowTest(t) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0", constants.EksaSystemNamespace, &clusterv1.MachineDeployment{}, ). Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, "")) g.clusterSpec.Cluster.Spec.RegistryMirrorConfiguration = tt.registryMirrorConfig kct := wantKubeadmConfigTemplate() kct.Spec.Template.Spec.Files = tt.wantFiles kct.Spec.Template.Spec.PreKubeadmCommands = append(kct.Spec.Template.Spec.PreKubeadmCommands, wantRegistryMirrorCommands()...) got, err := snow.WorkersObjects(g.ctx, g.logger, g.clusterSpec, g.kubeconfigClient) g.Expect(err).To(Succeed()) g.Expect(got).To(ConsistOf([]kubernetes.Object{wantMachineDeployment(), kct, wantSnowMachineTemplate()})) }) } } func TestWorkersObjectsWithProxy(t *testing.T) { for _, tt := range proxyTests { t.Run(tt.name, func(t *testing.T) { g := newSnowTest(t) g.kubeconfigClient.EXPECT(). Get( g.ctx, "snow-test-md-0", constants.EksaSystemNamespace, &clusterv1.MachineDeployment{}, ). Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, "")) g.clusterSpec.Cluster.Spec.ProxyConfiguration = tt.proxy kct := wantKubeadmConfigTemplate() kct.Spec.Template.Spec.Files = tt.wantFiles kct.Spec.Template.Spec.PreKubeadmCommands = append(kct.Spec.Template.Spec.PreKubeadmCommands, wantProxyConfigCommands()...) got, err := snow.WorkersObjects(g.ctx, g.logger, g.clusterSpec, g.kubeconfigClient) g.Expect(err).To(Succeed()) g.Expect(got).To(ConsistOf([]kubernetes.Object{wantMachineDeployment(), kct, wantSnowMachineTemplate()})) }) } }
828
eks-anywhere
aws
Go
package snow import ( "context" "fmt" "time" "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/bootstrapper" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/logger" "github.com/aws/eks-anywhere/pkg/providers" providerValidator "github.com/aws/eks-anywhere/pkg/providers/validator" "github.com/aws/eks-anywhere/pkg/retrier" "github.com/aws/eks-anywhere/pkg/templater" "github.com/aws/eks-anywhere/pkg/types" releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1" ) const ( eksaSnowCredentialsFileKey = "EKSA_AWS_CREDENTIALS_FILE" eksaSnowCABundlesFileKey = "EKSA_AWS_CA_BUNDLES_FILE" snowCredentialsKey = "AWS_B64ENCODED_CREDENTIALS" snowCertsKey = "AWS_B64ENCODED_CA_BUNDLES" maxRetries = 30 backOffPeriod = 5 * time.Second ) var ( snowDatacenterResourceType = fmt.Sprintf("snowdatacenterconfigs.%s", v1alpha1.GroupVersion.Group) snowMachineResourceType = fmt.Sprintf("snowmachineconfigs.%s", v1alpha1.GroupVersion.Group) ) type SnowProvider struct { kubeUnAuthClient KubeUnAuthClient retrier *retrier.Retrier configManager *ConfigManager ipValidator *providerValidator.IPValidator skipIpCheck bool log logr.Logger } type KubeUnAuthClient interface { KubeconfigClient(kubeconfig string) kubernetes.Client Apply(ctx context.Context, kubeconfig string, obj runtime.Object) error } func NewProvider(kubeUnAuthClient KubeUnAuthClient, configManager *ConfigManager, skipIpCheck bool) *SnowProvider { retrier := retrier.NewWithMaxRetries(maxRetries, backOffPeriod) return &SnowProvider{ kubeUnAuthClient: kubeUnAuthClient, retrier: retrier, configManager: configManager, ipValidator: providerValidator.NewIPValidator(), skipIpCheck: skipIpCheck, log: logger.Get(), } } func (p *SnowProvider) Name() string { return constants.SnowProviderName } func (p *SnowProvider) SetupAndValidateCreateCluster(ctx context.Context, clusterSpec *cluster.Spec) error { if err := p.validateUpgradeRolloutStrategy(clusterSpec); err != nil { return fmt.Errorf("failed setup and validations: %v", err) } if err := p.configManager.SetDefaultsAndValidate(ctx, clusterSpec.Config); err != nil { return fmt.Errorf("setting defaults and validate snow config: %v", err) } if !p.skipIpCheck { if err := p.ipValidator.ValidateControlPlaneIPUniqueness(clusterSpec.Cluster); err != nil { return err } } else { logger.Info("Skipping check for whether control plane ip is in use") } return nil } func (p *SnowProvider) SetupAndValidateUpgradeCluster(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, _ *cluster.Spec) error { if err := p.validateUpgradeRolloutStrategy(clusterSpec); err != nil { return fmt.Errorf("failed setup and validations: %v", err) } if err := p.configManager.SetDefaultsAndValidate(ctx, clusterSpec.Config); err != nil { return fmt.Errorf("setting defaults and validate snow config: %v", err) } return nil } func (p *SnowProvider) SetupAndValidateDeleteCluster(ctx context.Context, _ *types.Cluster, clusterSpec *cluster.Spec) error { if err := p.validateUpgradeRolloutStrategy(clusterSpec); err != nil { return fmt.Errorf("failed setup and validations: %v", err) } if err := SetupEksaCredentialsSecret(clusterSpec.Config); err != nil { return fmt.Errorf("setting up credentials: %v", err) } return nil } func (p *SnowProvider) UpdateSecrets(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error { if err := p.kubeUnAuthClient.Apply(ctx, cluster.KubeconfigFile, clusterSpec.SnowCredentialsSecret); err != nil { return fmt.Errorf("applying eks-a snow credentials secret in cluster: %v", err) } return nil } // CAPIObjects generates the control plane and worker nodes objects for snow provider from clusterSpec. func CAPIObjects(ctx context.Context, log logr.Logger, clusterSpec *cluster.Spec, kubeClient kubernetes.Client) (controlPlaneSpec, workersSpec []byte, err error) { controlPlaneObjs, err := ControlPlaneObjects(ctx, log, clusterSpec, kubeClient) if err != nil { return nil, nil, err } controlPlaneSpec, err = templater.ObjectsToYaml(kubernetesToRuntimeObjects(controlPlaneObjs)...) if err != nil { return nil, nil, err } workersObjs, err := WorkersObjects(ctx, log, clusterSpec, kubeClient) if err != nil { return nil, nil, err } workersSpec, err = templater.ObjectsToYaml(kubernetesToRuntimeObjects(workersObjs)...) if err != nil { return nil, nil, err } return controlPlaneSpec, workersSpec, nil } func kubernetesToRuntimeObjects(objs []kubernetes.Object) []runtime.Object { runtimeObjs := make([]runtime.Object, 0, len(objs)) for _, o := range objs { runtimeObjs = append(runtimeObjs, o) } return runtimeObjs } func (p *SnowProvider) generateCAPISpec(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error) { kubeconfigClient := p.kubeUnAuthClient.KubeconfigClient(cluster.KubeconfigFile) return CAPIObjects(ctx, p.log, clusterSpec, kubeconfigClient) } func (p *SnowProvider) GenerateCAPISpecForCreate(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error) { return p.generateCAPISpec(ctx, cluster, clusterSpec) } func (p *SnowProvider) GenerateCAPISpecForUpgrade(ctx context.Context, bootstrapCluster, _ *types.Cluster, _ *cluster.Spec, clusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error) { return p.generateCAPISpec(ctx, bootstrapCluster, clusterSpec) } func (p *SnowProvider) PreCAPIInstallOnBootstrap(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error { return nil } func (p *SnowProvider) PostBootstrapSetup(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error { return nil } func (p *SnowProvider) PostBootstrapDeleteForUpgrade(ctx context.Context) error { return nil } func (p *SnowProvider) PostBootstrapSetupUpgrade(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error { return nil } func (p *SnowProvider) PostWorkloadInit(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error { return nil } func (p *SnowProvider) BootstrapClusterOpts(_ *cluster.Spec) ([]bootstrapper.BootstrapClusterOption, error) { return nil, nil } func (p *SnowProvider) UpdateKubeConfig(content *[]byte, clusterName string) error { return nil } func (p *SnowProvider) Version(clusterSpec *cluster.Spec) string { return clusterSpec.VersionsBundle.Snow.Version } func (p *SnowProvider) EnvMap(clusterSpec *cluster.Spec) (map[string]string, error) { envMap := make(map[string]string) envMap[snowCredentialsKey] = string(clusterSpec.SnowCredentialsSecret.Data[v1alpha1.SnowCredentialsKey]) envMap[snowCertsKey] = string(clusterSpec.SnowCredentialsSecret.Data[v1alpha1.SnowCertificatesKey]) envMap["SNOW_CONTROLLER_IMAGE"] = clusterSpec.VersionsBundle.Snow.Manager.VersionedImage() return envMap, nil } func (p *SnowProvider) GetDeployments() map[string][]string { return map[string][]string{ constants.CapasSystemNamespace: {"capas-controller-manager"}, } } func (p *SnowProvider) GetInfrastructureBundle(clusterSpec *cluster.Spec) *types.InfrastructureBundle { bundle := clusterSpec.VersionsBundle folderName := fmt.Sprintf("infrastructure-snow/%s/", bundle.Snow.Version) infraBundle := types.InfrastructureBundle{ FolderName: folderName, Manifests: []releasev1alpha1.Manifest{ bundle.Snow.Components, bundle.Snow.Metadata, }, } return &infraBundle } func (p *SnowProvider) DatacenterConfig(clusterSpec *cluster.Spec) providers.DatacenterConfig { return clusterSpec.SnowDatacenter } func (p *SnowProvider) DatacenterResourceType() string { return snowDatacenterResourceType } func (p *SnowProvider) MachineResourceType() string { return snowMachineResourceType } func (p *SnowProvider) MachineConfigs(clusterSpec *cluster.Spec) []providers.MachineConfig { configs := make([]providers.MachineConfig, 0, len(clusterSpec.SnowMachineConfigs)) for _, mc := range clusterSpec.SnowMachineConfigs { configs = append(configs, mc) } return configs } func (p *SnowProvider) ValidateNewSpec(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error { return nil } func (p *SnowProvider) ChangeDiff(currentSpec, newSpec *cluster.Spec) *types.ComponentChangeDiff { if currentSpec.VersionsBundle.Snow.Version == newSpec.VersionsBundle.Snow.Version { return nil } return &types.ComponentChangeDiff{ ComponentName: constants.SnowProviderName, NewVersion: newSpec.VersionsBundle.Snow.Version, OldVersion: currentSpec.VersionsBundle.Snow.Version, } } func (p *SnowProvider) RunPostControlPlaneUpgrade(ctx context.Context, oldClusterSpec *cluster.Spec, clusterSpec *cluster.Spec, workloadCluster *types.Cluster, managementCluster *types.Cluster) error { return nil } func bundleImagesEqual(new, old releasev1alpha1.SnowBundle) bool { return new.Manager.ImageDigest == old.Manager.ImageDigest && new.KubeVip.ImageDigest == old.KubeVip.ImageDigest } func (p *SnowProvider) machineConfigsChanged(ctx context.Context, cluster *types.Cluster, spec *cluster.Spec) (bool, error) { client := p.kubeUnAuthClient.KubeconfigClient(cluster.KubeconfigFile) for _, new := range spec.SnowMachineConfigs { old := &v1alpha1.SnowMachineConfig{} err := client.Get(ctx, new.Name, namespaceOrDefault(new), old) if apierrors.IsNotFound(err) { return true, nil } if err != nil { return false, err } if len(new.Spec.Devices) != len(old.Spec.Devices) || !equality.Semantic.DeepDerivative(new.Spec, old.Spec) { return true, nil } } return false, nil } func (p *SnowProvider) datacenterChanged(ctx context.Context, cluster *types.Cluster, spec *cluster.Spec) (bool, error) { client := p.kubeUnAuthClient.KubeconfigClient(cluster.KubeconfigFile) new := spec.SnowDatacenter old := &v1alpha1.SnowDatacenterConfig{} err := client.Get(ctx, new.Name, namespaceOrDefault(new), old) if apierrors.IsNotFound(err) { return true, nil } if err != nil { return false, err } return !equality.Semantic.DeepDerivative(new.Spec, old.Spec), nil } // namespaceOrDefault return the object namespace or default if it's empty. func namespaceOrDefault(obj client.Object) string { ns := obj.GetNamespace() if ns == "" { ns = "default" } return ns } func (p *SnowProvider) validateUpgradeRolloutStrategy(clusterSpec *cluster.Spec) error { if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy != nil { return fmt.Errorf("Upgrade rollout strategy customization is not supported for snow provider") } for _, workerNodeGroupConfiguration := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations { if workerNodeGroupConfiguration.UpgradeRolloutStrategy != nil { return fmt.Errorf("Upgrade rollout strategy customization is not supported for snow provider") } } return nil } // UpgradeNeeded compares the new snow version bundle and objects with the existing ones in the cluster and decides whether // to trigger a cluster upgrade or not. // TODO: revert the change once cluster.BuildSpec is used in cluster_manager to replace the deprecated cluster.BuildSpecForCluster func (p *SnowProvider) UpgradeNeeded(ctx context.Context, newSpec, oldSpec *cluster.Spec, cluster *types.Cluster) (bool, error) { if !bundleImagesEqual(newSpec.VersionsBundle.Snow, oldSpec.VersionsBundle.Snow) { return true, nil } datacenterChanged, err := p.datacenterChanged(ctx, cluster, newSpec) if err != nil { return false, err } if datacenterChanged { return true, nil } return p.machineConfigsChanged(ctx, cluster, newSpec) } func (p *SnowProvider) DeleteResources(ctx context.Context, clusterSpec *cluster.Spec) error { client := p.kubeUnAuthClient.KubeconfigClient(clusterSpec.ManagementCluster.KubeconfigFile) for _, mc := range clusterSpec.SnowMachineConfigs { mc.Namespace = namespaceOrDefault(mc) if err := client.Delete(ctx, mc); err != nil && !apierrors.IsNotFound(err) { return err } } clusterSpec.SnowDatacenter.Namespace = namespaceOrDefault(clusterSpec.SnowDatacenter) if err := client.Delete(ctx, clusterSpec.SnowDatacenter); err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("deleting snow datacenter: %v", err) } return nil } func (p *SnowProvider) PostClusterDeleteValidate(_ context.Context, _ *types.Cluster) error { // No validations return nil } func (p *SnowProvider) PostMoveManagementToBootstrap(_ context.Context, _ *types.Cluster) error { // NOOP return nil } func (p *SnowProvider) InstallCustomProviderComponents(ctx context.Context, kubeconfigFile string) error { return nil } // PreCoreComponentsUpgrade staisfies the Provider interface. func (p *SnowProvider) PreCoreComponentsUpgrade( ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, ) error { return nil }
386
eks-anywhere
aws
Go
package snow_test import ( "context" "errors" "os" "testing" "github.com/go-logr/logr" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/aws" kubemock "github.com/aws/eks-anywhere/pkg/clients/kubernetes/mocks" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/providers/snow" snowv1 "github.com/aws/eks-anywhere/pkg/providers/snow/api/v1beta1" "github.com/aws/eks-anywhere/pkg/providers/snow/mocks" "github.com/aws/eks-anywhere/pkg/types" "github.com/aws/eks-anywhere/pkg/utils/ptr" releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1" ) const ( expectedSnowProviderName = "snow" credsFileEnvVar = "EKSA_AWS_CREDENTIALS_FILE" credsFilePath = "testdata/credentials" certsFileEnvVar = "EKSA_AWS_CA_BUNDLES_FILE" certsFilePath = "testdata/certificates" ) type snowTest struct { *WithT ctx context.Context ctrl *gomock.Controller kubeUnAuthClient *mocks.MockKubeUnAuthClient kubeconfigClient *kubemock.MockClient aws *mocks.MockAwsClient imds *mocks.MockLocalIMDSClient provider *snow.SnowProvider cluster *types.Cluster clusterSpec *cluster.Spec logger logr.Logger } func newSnowTest(t *testing.T) snowTest { ctrl := gomock.NewController(t) ctx := context.Background() mockKubeUnAuthClient := mocks.NewMockKubeUnAuthClient(ctrl) mockKubeconfigClient := kubemock.NewMockClient(ctrl) mockaws := mocks.NewMockAwsClient(ctrl) mockimds := mocks.NewMockLocalIMDSClient(ctrl) cluster := &types.Cluster{ Name: "cluster", } provider := newProvider(ctx, t, mockKubeUnAuthClient, mockaws, mockimds, ctrl) return snowTest{ WithT: NewWithT(t), ctx: ctx, ctrl: ctrl, kubeUnAuthClient: mockKubeUnAuthClient, kubeconfigClient: mockKubeconfigClient, aws: mockaws, imds: mockimds, provider: provider, cluster: cluster, clusterSpec: givenClusterSpec(), logger: test.NewNullLogger(), } } func givenClusterSpec() *cluster.Spec { return test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster = givenClusterConfig() s.SnowDatacenter = givenDatacenterConfig() s.SnowCredentialsSecret = wantEksaCredentialsSecret() s.SnowMachineConfigs = givenMachineConfigs() s.SnowIPPools = givenIPPools() s.VersionsBundle = givenVersionsBundle() s.ManagementCluster = givenManagementCluster() }) } func givenClusterSpecWithCPUpgradeStrategy() *cluster.Spec { return test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster = givenClusterConfigWithCPUpgradeStrategy() s.SnowDatacenter = givenDatacenterConfig() s.SnowCredentialsSecret = wantEksaCredentialsSecret() s.SnowMachineConfigs = givenMachineConfigs() s.VersionsBundle = givenVersionsBundle() s.ManagementCluster = givenManagementCluster() }) } func givenClusterSpecWithMDUpgradeStrategy() *cluster.Spec { return test.NewClusterSpec(func(s *cluster.Spec) { s.Cluster = givenClusterConfigWithMDUpgradeStrategy() s.SnowDatacenter = givenDatacenterConfig() s.SnowCredentialsSecret = wantEksaCredentialsSecret() s.SnowMachineConfigs = givenMachineConfigs() s.VersionsBundle = givenVersionsBundle() s.ManagementCluster = givenManagementCluster() }) } func givenVersionsBundle() *cluster.VersionsBundle { return &cluster.VersionsBundle{ KubeDistro: &cluster.KubeDistro{ Kubernetes: cluster.VersionedRepository{ Repository: "public.ecr.aws/eks-distro/kubernetes", Tag: "v1.21.5-eks-1-21-9", }, CoreDNS: cluster.VersionedRepository{ Repository: "public.ecr.aws/eks-distro/coredns", Tag: "v1.8.4-eks-1-21-9", }, Etcd: cluster.VersionedRepository{ Repository: "public.ecr.aws/eks-distro/etcd-io", Tag: "v3.4.16-eks-1-21-9", }, EtcdImage: releasev1alpha1.Image{ URI: "public.ecr.aws/eks-distro/etcd-io/etcd:0.0.1", }, Pause: releasev1alpha1.Image{ URI: "public.ecr.aws/eks-distro/kubernetes/pause:0.0.1", }, EtcdVersion: "3.4.16", }, VersionsBundle: &releasev1alpha1.VersionsBundle{ KubeVersion: "1.21", Snow: releasev1alpha1.SnowBundle{ Version: "v1.0.2", KubeVip: releasev1alpha1.Image{ Name: "kube-vip", OS: "linux", URI: "public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.1433", ImageDigest: "sha256:cf324971db7696810effd5c6c95e34b2c115893e1fbcaeb8877355dc74768ef1", Description: "Container image for kube-vip image", Arch: []string{"amd64"}, }, Manager: releasev1alpha1.Image{ Name: "cluster-api-snow-controller", OS: "linux", URI: "public.ecr.aws/l0g8r8j6/aws/cluster-api-provider-aws-snow/manager:v0.1.4-eks-a-v0.0.0-dev-build.2216", ImageDigest: "sha256:59da9c726c4816c29d119e77956c6391e2dff451daf36aeb60e5d6425eb88018", Description: "Container image for cluster-api-snow-controller image", Arch: []string{"amd64"}, }, BottlerocketBootstrapSnow: releasev1alpha1.Image{ Name: "bottlerocket-bootstrap-snow", OS: "linux", URI: "public.ecr.aws/l0g8r8j6/bottlerocket-bootstrap-snow:v1-20-22-eks-a-v0.0.0-dev-build.4984", ImageDigest: "sha256:59da9c726c4816c29d119e77956c6391e2dff451daf36aeb60e5d6425eb88018", Description: "Container image for bottlerocket-bootstrap-snow image", Arch: []string{"amd64"}, }, }, BottleRocketHostContainers: releasev1alpha1.BottlerocketHostContainersBundle{ Admin: releasev1alpha1.Image{ URI: "public.ecr.aws/eks-anywhere/bottlerocket-admin:0.0.1", }, Control: releasev1alpha1.Image{ URI: "public.ecr.aws/eks-anywhere/bottlerocket-control:0.0.1", }, KubeadmBootstrap: releasev1alpha1.Image{ URI: "public.ecr.aws/eks-anywhere/bottlerocket-bootstrap:0.0.1", }, }, }, } } func givenManagementCluster() *types.Cluster { return &types.Cluster{ Name: "test-snow", KubeconfigFile: "management.kubeconfig", } } func givenClusterConfigWithCPUpgradeStrategy() *v1alpha1.Cluster { return &v1alpha1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "snow-test", Namespace: "test-namespace", }, Spec: v1alpha1.ClusterSpec{ ClusterNetwork: v1alpha1.ClusterNetwork{ CNI: v1alpha1.Cilium, Pods: v1alpha1.Pods{ CidrBlocks: []string{ "10.1.0.0/16", }, }, Services: v1alpha1.Services{ CidrBlocks: []string{ "10.96.0.0/12", }, }, }, ControlPlaneConfiguration: v1alpha1.ControlPlaneConfiguration{ Count: 3, Endpoint: &v1alpha1.Endpoint{ Host: "1.2.3.4", }, MachineGroupRef: &v1alpha1.Ref{ Kind: "SnowMachineConfig", Name: "test-cp", }, UpgradeRolloutStrategy: &v1alpha1.ControlPlaneUpgradeRolloutStrategy{ Type: "RollingUpdate", RollingUpdate: v1alpha1.ControlPlaneRollingUpdateParams{ MaxSurge: 1, }, }, }, KubernetesVersion: "1.21", WorkerNodeGroupConfigurations: []v1alpha1.WorkerNodeGroupConfiguration{ { Name: "md-0", Count: ptr.Int(3), MachineGroupRef: &v1alpha1.Ref{ Kind: "SnowMachineConfig", Name: "test-wn", }, }, }, DatacenterRef: v1alpha1.Ref{ Kind: "SnowDatacenterConfig", Name: "test", }, }, } } func givenClusterConfigWithMDUpgradeStrategy() *v1alpha1.Cluster { return &v1alpha1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "snow-test", Namespace: "test-namespace", }, Spec: v1alpha1.ClusterSpec{ ClusterNetwork: v1alpha1.ClusterNetwork{ CNI: v1alpha1.Cilium, Pods: v1alpha1.Pods{ CidrBlocks: []string{ "10.1.0.0/16", }, }, Services: v1alpha1.Services{ CidrBlocks: []string{ "10.96.0.0/12", }, }, }, ControlPlaneConfiguration: v1alpha1.ControlPlaneConfiguration{ Count: 3, Endpoint: &v1alpha1.Endpoint{ Host: "1.2.3.4", }, MachineGroupRef: &v1alpha1.Ref{ Kind: "SnowMachineConfig", Name: "test-cp", }, }, KubernetesVersion: "1.21", WorkerNodeGroupConfigurations: []v1alpha1.WorkerNodeGroupConfiguration{ { Name: "md-0", Count: ptr.Int(3), MachineGroupRef: &v1alpha1.Ref{ Kind: "SnowMachineConfig", Name: "test-wn", }, UpgradeRolloutStrategy: &v1alpha1.WorkerNodesUpgradeRolloutStrategy{ Type: "RollingUpdate", RollingUpdate: v1alpha1.WorkerNodesRollingUpdateParams{ MaxSurge: 1, MaxUnavailable: 0, }, }, }, }, DatacenterRef: v1alpha1.Ref{ Kind: "SnowDatacenterConfig", Name: "test", }, }, } } func givenClusterConfig() *v1alpha1.Cluster { return &v1alpha1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "snow-test", Namespace: "test-namespace", }, Spec: v1alpha1.ClusterSpec{ ClusterNetwork: v1alpha1.ClusterNetwork{ CNI: v1alpha1.Cilium, Pods: v1alpha1.Pods{ CidrBlocks: []string{ "10.1.0.0/16", }, }, Services: v1alpha1.Services{ CidrBlocks: []string{ "10.96.0.0/12", }, }, }, ControlPlaneConfiguration: v1alpha1.ControlPlaneConfiguration{ Count: 3, Endpoint: &v1alpha1.Endpoint{ Host: "1.2.3.4", }, MachineGroupRef: &v1alpha1.Ref{ Kind: "SnowMachineConfig", Name: "test-cp", }, }, KubernetesVersion: "1.21", WorkerNodeGroupConfigurations: []v1alpha1.WorkerNodeGroupConfiguration{ { Name: "md-0", Count: ptr.Int(3), MachineGroupRef: &v1alpha1.Ref{ Kind: "SnowMachineConfig", Name: "test-wn", }, }, }, DatacenterRef: v1alpha1.Ref{ Kind: "SnowDatacenterConfig", Name: "test", }, }, } } func givenDatacenterConfig() *v1alpha1.SnowDatacenterConfig { return &v1alpha1.SnowDatacenterConfig{ TypeMeta: metav1.TypeMeta{ Kind: "SnowDatacenterConfig", APIVersion: "anywhere.eks.amazonaws.com/v1alpha1", }, ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "test-namespace", }, Spec: v1alpha1.SnowDatacenterConfigSpec{ IdentityRef: v1alpha1.Ref{ Kind: "Secret", Name: "test-snow-credentials", }, }, } } func givenMachineConfigs() map[string]*v1alpha1.SnowMachineConfig { return map[string]*v1alpha1.SnowMachineConfig{ "test-cp": { TypeMeta: metav1.TypeMeta{ Kind: "SnowMachineConfig", }, ObjectMeta: metav1.ObjectMeta{ Name: "test-cp", Namespace: "test-namespace", }, Spec: v1alpha1.SnowMachineConfigSpec{ AMIID: "eks-d-v1-21-5-ubuntu-ami-02833ca9a8f29c2ea", InstanceType: "sbe-c.large", SshKeyName: "default", PhysicalNetworkConnector: "SFP_PLUS", Devices: []string{ "1.2.3.4", "1.2.3.5", }, OSFamily: v1alpha1.Ubuntu, Network: v1alpha1.SnowNetwork{ DirectNetworkInterfaces: []v1alpha1.SnowDirectNetworkInterface{ { Index: 1, DHCP: true, Primary: true, }, }, }, }, }, "test-wn": { TypeMeta: metav1.TypeMeta{ Kind: "SnowMachineConfig", }, ObjectMeta: metav1.ObjectMeta{ Name: "test-wn", Namespace: "test-namespace", }, Spec: v1alpha1.SnowMachineConfigSpec{ AMIID: "eks-d-v1-21-5-ubuntu-ami-02833ca9a8f29c2ea", InstanceType: "sbe-c.xlarge", SshKeyName: "default", PhysicalNetworkConnector: "SFP_PLUS", Devices: []string{ "1.2.3.4", "1.2.3.5", }, OSFamily: v1alpha1.Ubuntu, Network: v1alpha1.SnowNetwork{ DirectNetworkInterfaces: []v1alpha1.SnowDirectNetworkInterface{ { Index: 1, DHCP: true, Primary: true, }, }, }, }, }, } } func givenIPPools() map[string]*v1alpha1.SnowIPPool { return map[string]*v1alpha1.SnowIPPool{ "ip-pool-1": { TypeMeta: metav1.TypeMeta{ Kind: snow.SnowIPPoolKind, }, ObjectMeta: metav1.ObjectMeta{ Name: "ip-pool-1", Namespace: "test-namespace", }, Spec: v1alpha1.SnowIPPoolSpec{ Pools: []v1alpha1.IPPool{ { IPStart: "start", IPEnd: "end", Gateway: "gateway", Subnet: "subnet", }, }, }, }, } } func givenProvider(t *testing.T) *snow.SnowProvider { return newProvider(context.Background(), t, nil, nil, nil, gomock.NewController(t)) } func givenEmptyClusterSpec() *cluster.Spec { return test.NewClusterSpec(func(s *cluster.Spec) { s.VersionsBundle.KubeVersion = "1.21" }) } func newProvider(ctx context.Context, t *testing.T, kubeUnAuthClient snow.KubeUnAuthClient, mockaws *mocks.MockAwsClient, mockimds *mocks.MockLocalIMDSClient, ctrl *gomock.Controller) *snow.SnowProvider { awsClients := snow.AwsClientMap{ "1.2.3.4": mockaws, "1.2.3.5": mockaws, } mockClientRegistry := mocks.NewMockClientRegistry(ctrl) mockClientRegistry.EXPECT().Get(ctx).Return(awsClients, nil).AnyTimes() validator := snow.NewValidator(mockClientRegistry, snow.WithIMDS(mockimds)) defaulters := snow.NewDefaulters(mockClientRegistry, nil) configManager := snow.NewConfigManager(defaulters, validator) return snow.NewProvider( kubeUnAuthClient, configManager, false, ) } func setupContext(t *testing.T) { t.Setenv(credsFileEnvVar, credsFilePath) t.Setenv(certsFileEnvVar, certsFilePath) } func TestName(t *testing.T) { tt := newSnowTest(t) tt.Expect(tt.provider.Name()).To(Equal(expectedSnowProviderName)) } func wantEksaCredentialsSecretWithEnvCreds() *v1.Secret { secret := wantEksaCredentialsSecret() secret.Data["credentials"] = []byte("WzEuMi4zLjRdCmF3c19hY2Nlc3Nfa2V5X2lkID0gQUJDREVGR0hJSktMTU5PUFFSMlQKYXdzX3NlY3JldF9hY2Nlc3Nfa2V5ID0gQWZTRDdzWXovVEJadHprUmVCbDZQdXVJU3pKMld0TmtlZVB3K25OekoKcmVnaW9uID0gc25vdw==") secret.Data["ca-bundle"] = []byte("LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURYakNDQWthZ0F3SUJBZ0lJYjVtMFJsakpDTUV3RFFZSktvWklodmNOQVFFTkJRQXdPREUyTURRR0ExVUUKQXd3dFNrbEVMVEl3TmpnME16UXlNREF3TWkweE9USXRNVFk0TFRFdE1qTTFMVEl5TFRBeExUQTJMVEl5TFRBMApNQjRYRFRJeE1ERXhNVEl5TURjMU9Gb1hEVEkxTVRJeE5qSXlNRGMxT0Zvd09ERTJNRFFHQTFVRUF3d3RTa2xFCkxUSXdOamcwTXpReU1EQXdNaTB4T1RJdE1UWTRMVEV0TWpNMUxUSXlMVEF4TFRBMkxUSXlMVEEwTUlJQklqQU4KQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBbU9UUURCZkJ0UGNWREZnL2E1OWRrK3JZclBSVQpmNXpsN0pnRkFFdzFuODJTa2JObTRzcndsb2o4cEN1RDFuSkFsTiszTEtvaWJ5OWpVOFpxb1FLcXBwSmFLMVFLCmR2MjdKWU5sV29yRzlyNktyRmtpRVRuMmN4dUF3Y1JCdnE0VUY3NldkTnI3ekZqSTEwOGJ5UHA5UGQwbXhLaVEKNldWYXhjS1g5QUVjYXJCL0dmaWRITzk1QWF5NnRpQlUxU1F2QkpybzNMMS9VRnU1U1RTcFphaTl6eCtWa1dUSgpEMEpYaDdlTEY0eUwwTjFvVTBoWDJDR0R4RHo0VmxKbUJPdmJuUnV3c09ydVJNdFVGUlV5NTljUHpyLy80ZmpkCjRTN0FZYmVPVlB3RVA3cTE5Tlo2K1A3RTcxalRxMXJ6OFJoQW5XL0pjYlRLUzBLcWdCVVB6MFU0cVFJREFRQUIKbzJ3d2FqQU1CZ05WSFJNRUJUQURBUUgvTUIwR0ExVWREZ1FXQkJRVGFaekwyZ29xcTcvTWJKRWZOUnV6YndpaAprVEE3QmdOVkhSRUVOREF5aGpCSlJEcEtTVVF0TWpBMk9EUXpOREl3TURBeUxURTVNaTB4TmpndE1TMHlNelV0Ck1qSXRNREV0TURZdE1qSXRNRFF3RFFZSktvWklodmNOQVFFTkJRQURnZ0VCQUV6ZWwrVXNwaFV4NDlFVkF5V0IKUHpTem9FN1g2MmZnL2I0Z1U3aWZGSHBXcFlwQVBzYmFwejkvVHl3YzRUR1JJdGZjdFhZWnNqY2hKS2l1dEdVMgp6WDRydDFOU0hreDcyaU1sM29iUTJqUW1URDhmOUx5Q3F5YStRTTRDQTc0a2s2djJuZzFFaXdNWXZRbFR2V1k0CkZFV3YyMXlOUnMyeWlSdUhXalJZSDRURjU0Y0NvRFFHcEZwc09GaTBMNFYveW8xWHVpbVNMeDJ2dktaMGxDTnQKS3hDMW9DZ0N4eE5rT2EvNmlMazZxVkFOb1g1S0lWc2F0YVZodkdLKzltd1duOCtkbk1GbmVNaVdkL2p2aStkaApleXdsZFZFTEJXUktFTERkQmM5WGI0aTVCRVRGNmRVbG12cFdncE9YWE8zdUpsSVJHWkNWRkxzZ1E1MTFvTXhNCnJFQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==") return secret } func supportedInstanceTypes() []aws.EC2InstanceType { return []aws.EC2InstanceType{ { Name: "sbe-c.large", DefaultVCPU: ptr.Int32(2), }, { Name: "sbe-c.xlarge", DefaultVCPU: ptr.Int32(4), }, } } func TestSetupAndValidateCreateClusterSuccess(t *testing.T) { tt := newSnowTest(t) setupContext(t) tt.aws.EXPECT().EC2ImageExists(tt.ctx, gomock.Any()).Return(true, nil).Times(4) tt.aws.EXPECT().EC2KeyNameExists(tt.ctx, gomock.Any()).Return(true, nil).Times(4) tt.aws.EXPECT().EC2InstanceTypes(tt.ctx).Return(supportedInstanceTypes(), nil).Times(4) tt.aws.EXPECT().IsSnowballDeviceUnlocked(tt.ctx).Return(true, nil).Times(4) tt.aws.EXPECT().SnowballDeviceSoftwareVersion(tt.ctx).Return("102", nil).Times(4) tt.imds.EXPECT().EC2InstanceIP(tt.ctx).Return("1.2.3.5", nil) err := tt.provider.SetupAndValidateCreateCluster(tt.ctx, tt.clusterSpec) tt.Expect(tt.clusterSpec.SnowCredentialsSecret).To(Equal(wantEksaCredentialsSecretWithEnvCreds())) tt.Expect(err).To(Succeed()) } func TestSetupAndValidateCreateClusterIMDSNotInitialized(t *testing.T) { tt := newSnowTest(t) setupContext(t) tt.provider = newProvider(tt.ctx, t, tt.kubeUnAuthClient, tt.aws, nil, tt.ctrl) tt.aws.EXPECT().EC2ImageExists(tt.ctx, gomock.Any()).Return(true, nil).Times(4) tt.aws.EXPECT().EC2KeyNameExists(tt.ctx, gomock.Any()).Return(true, nil).Times(4) tt.aws.EXPECT().EC2InstanceTypes(tt.ctx).Return(supportedInstanceTypes(), nil).Times(4) tt.aws.EXPECT().IsSnowballDeviceUnlocked(tt.ctx).Return(true, nil).Times(4) tt.aws.EXPECT().SnowballDeviceSoftwareVersion(tt.ctx).Return("102", nil).Times(4) err := tt.provider.SetupAndValidateCreateCluster(tt.ctx, tt.clusterSpec) tt.Expect(tt.clusterSpec.SnowCredentialsSecret).To(Equal(wantEksaCredentialsSecretWithEnvCreds())) tt.Expect(err).To(MatchError(ContainSubstring("imds client is not initialized"))) } func TestSetupAndValidateCreateClusterCPIPInvalid(t *testing.T) { tt := newSnowTest(t) setupContext(t) tt.aws.EXPECT().EC2ImageExists(tt.ctx, gomock.Any()).Return(true, nil).Times(4) tt.aws.EXPECT().EC2KeyNameExists(tt.ctx, gomock.Any()).Return(true, nil).Times(4) tt.aws.EXPECT().EC2InstanceTypes(tt.ctx).Return(supportedInstanceTypes(), nil).Times(4) tt.aws.EXPECT().IsSnowballDeviceUnlocked(tt.ctx).Return(true, nil).Times(4) tt.aws.EXPECT().SnowballDeviceSoftwareVersion(tt.ctx).Return("102", nil).Times(4) tt.imds.EXPECT().EC2InstanceIP(tt.ctx).Return("1.2.3.4", nil) err := tt.provider.SetupAndValidateCreateCluster(tt.ctx, tt.clusterSpec) tt.Expect(tt.clusterSpec.SnowCredentialsSecret).To(Equal(wantEksaCredentialsSecretWithEnvCreds())) tt.Expect(err).To(MatchError(ContainSubstring("control plane host ip cannot be same as the admin instance ip"))) } func TestSetupAndValidateCreateClusterGetInstanceIPError(t *testing.T) { tt := newSnowTest(t) setupContext(t) tt.aws.EXPECT().EC2ImageExists(tt.ctx, gomock.Any()).Return(true, nil).Times(4) tt.aws.EXPECT().EC2KeyNameExists(tt.ctx, gomock.Any()).Return(true, nil).Times(4) tt.aws.EXPECT().EC2InstanceTypes(tt.ctx).Return(supportedInstanceTypes(), nil).Times(4) tt.aws.EXPECT().IsSnowballDeviceUnlocked(tt.ctx).Return(true, nil).Times(4) tt.aws.EXPECT().SnowballDeviceSoftwareVersion(tt.ctx).Return("102", nil).Times(4) tt.imds.EXPECT().EC2InstanceIP(tt.ctx).Return("", errors.New("fetch instance ip error")) err := tt.provider.SetupAndValidateCreateCluster(tt.ctx, tt.clusterSpec) tt.Expect(tt.clusterSpec.SnowCredentialsSecret).To(Equal(wantEksaCredentialsSecretWithEnvCreds())) tt.Expect(err).To(MatchError(ContainSubstring("fetch instance ip error"))) } func TestSetupAndValidateCreateClusterGetEC2InstanceTypesError(t *testing.T) { tt := newSnowTest(t) setupContext(t) tt.aws.EXPECT().EC2ImageExists(tt.ctx, gomock.Any()).Return(true, nil).Times(4) tt.aws.EXPECT().EC2KeyNameExists(tt.ctx, gomock.Any()).Return(true, nil).Times(4) tt.aws.EXPECT().EC2InstanceTypes(tt.ctx).Return(nil, errors.New("get instance types error")) tt.aws.EXPECT().IsSnowballDeviceUnlocked(tt.ctx).Return(true, nil).Times(4) tt.aws.EXPECT().SnowballDeviceSoftwareVersion(tt.ctx).Return("102", nil).Times(4) tt.imds.EXPECT().EC2InstanceIP(tt.ctx).Return("1.2.3.5", nil) err := tt.provider.SetupAndValidateCreateCluster(tt.ctx, tt.clusterSpec) tt.Expect(tt.clusterSpec.SnowCredentialsSecret).To(Equal(wantEksaCredentialsSecretWithEnvCreds())) tt.Expect(err).To(MatchError(ContainSubstring("fetching supported instance types for device [1.2.3.4]: get instance types error"))) } func TestSetupAndValidateCreateClusterUnsupportedInstanceTypeError(t *testing.T) { tt := newSnowTest(t) instanceTypes := []aws.EC2InstanceType{ { Name: "new-instance-type", }, } setupContext(t) tt.aws.EXPECT().EC2ImageExists(tt.ctx, gomock.Any()).Return(true, nil).Times(4) tt.aws.EXPECT().EC2KeyNameExists(tt.ctx, gomock.Any()).Return(true, nil).Times(4) tt.aws.EXPECT().EC2InstanceTypes(tt.ctx).Return(instanceTypes, nil) tt.aws.EXPECT().IsSnowballDeviceUnlocked(tt.ctx).Return(true, nil).Times(4) tt.aws.EXPECT().SnowballDeviceSoftwareVersion(tt.ctx).Return("102", nil).Times(4) tt.imds.EXPECT().EC2InstanceIP(tt.ctx).Return("1.2.3.5", nil) err := tt.provider.SetupAndValidateCreateCluster(tt.ctx, tt.clusterSpec) tt.Expect(tt.clusterSpec.SnowCredentialsSecret).To(Equal(wantEksaCredentialsSecretWithEnvCreds())) tt.Expect(err).To(MatchError(ContainSubstring("not supported in device [1.2.3.4]"))) } func TestSetupAndValidateCreateClusterInstanceTypeVCPUError(t *testing.T) { tt := newSnowTest(t) instanceTypes := []aws.EC2InstanceType{ { Name: "sbe-c.large", DefaultVCPU: ptr.Int32(1), }, { Name: "sbe-c.xlarge", DefaultVCPU: ptr.Int32(1), }, } setupContext(t) tt.aws.EXPECT().EC2ImageExists(tt.ctx, gomock.Any()).Return(true, nil).Times(4) tt.aws.EXPECT().EC2KeyNameExists(tt.ctx, gomock.Any()).Return(true, nil).Times(4) tt.aws.EXPECT().EC2InstanceTypes(tt.ctx).Return(instanceTypes, nil) tt.aws.EXPECT().IsSnowballDeviceUnlocked(tt.ctx).Return(true, nil).Times(4) tt.aws.EXPECT().SnowballDeviceSoftwareVersion(tt.ctx).Return("102", nil).Times(4) tt.imds.EXPECT().EC2InstanceIP(tt.ctx).Return("1.2.3.5", nil) err := tt.provider.SetupAndValidateCreateCluster(tt.ctx, tt.clusterSpec) tt.Expect(tt.clusterSpec.SnowCredentialsSecret).To(Equal(wantEksaCredentialsSecretWithEnvCreds())) tt.Expect(err).To(MatchError(ContainSubstring("has 1 vCPU. Please choose an instance type with at least 2 default vCPU"))) } func TestSetupAndValidateCreateClusterNoCredsEnv(t *testing.T) { tt := newSnowTest(t) setupContext(t) os.Unsetenv(credsFileEnvVar) err := tt.provider.SetupAndValidateCreateCluster(tt.ctx, tt.clusterSpec) tt.Expect(err).To(MatchError(ContainSubstring("'EKSA_AWS_CREDENTIALS_FILE' is not set or is empty"))) } func TestSetupAndValidateCreateClusterNoCertsEnv(t *testing.T) { tt := newSnowTest(t) setupContext(t) os.Unsetenv(certsFileEnvVar) err := tt.provider.SetupAndValidateCreateCluster(tt.ctx, tt.clusterSpec) tt.Expect(err).To(MatchError(ContainSubstring("'EKSA_AWS_CA_BUNDLES_FILE' is not set or is empty"))) } func TestSetupAndValidateCreateClusterCPUpgradeStrategy(t *testing.T) { tt := newSnowTest(t) setupContext(t) err := tt.provider.SetupAndValidateCreateCluster(tt.ctx, givenClusterSpecWithCPUpgradeStrategy()) tt.Expect(err).To(MatchError(ContainSubstring("failed setup and validations: Upgrade rollout strategy customization is not supported for snow provider"))) } func TestSetupAndValidateCreateClusterMDUpgradeStrategy(t *testing.T) { tt := newSnowTest(t) setupContext(t) err := tt.provider.SetupAndValidateCreateCluster(tt.ctx, givenClusterSpecWithMDUpgradeStrategy()) tt.Expect(err).To(MatchError(ContainSubstring("failed setup and validations: Upgrade rollout strategy customization is not supported for snow provider"))) } func TestSetupAndValidateUpgradeClusterSuccess(t *testing.T) { tt := newSnowTest(t) setupContext(t) tt.aws.EXPECT().EC2ImageExists(tt.ctx, gomock.Any()).Return(true, nil).Times(4) tt.aws.EXPECT().EC2KeyNameExists(tt.ctx, gomock.Any()).Return(true, nil).Times(4) tt.aws.EXPECT().EC2InstanceTypes(tt.ctx).Return(supportedInstanceTypes(), nil).Times(4) tt.aws.EXPECT().IsSnowballDeviceUnlocked(tt.ctx).Return(true, nil).Times(4) tt.aws.EXPECT().SnowballDeviceSoftwareVersion(tt.ctx).Return("102", nil).Times(4) tt.imds.EXPECT().EC2InstanceIP(tt.ctx).Return("1.2.3.5", nil) err := tt.provider.SetupAndValidateUpgradeCluster(tt.ctx, tt.cluster, tt.clusterSpec, tt.clusterSpec) tt.Expect(tt.clusterSpec.SnowCredentialsSecret).To(Equal(wantEksaCredentialsSecretWithEnvCreds())) tt.Expect(err).To(Succeed()) } func TestSetupAndValidateUpgradeClusterNoCredsEnv(t *testing.T) { tt := newSnowTest(t) setupContext(t) os.Unsetenv(credsFileEnvVar) err := tt.provider.SetupAndValidateUpgradeCluster(tt.ctx, tt.cluster, tt.clusterSpec, tt.clusterSpec) tt.Expect(err).To(MatchError(ContainSubstring("'EKSA_AWS_CREDENTIALS_FILE' is not set or is empty"))) } func TestSetupAndValidateUpgradeClusterNoCertsEnv(t *testing.T) { tt := newSnowTest(t) setupContext(t) os.Unsetenv(certsFileEnvVar) err := tt.provider.SetupAndValidateUpgradeCluster(tt.ctx, tt.cluster, tt.clusterSpec, tt.clusterSpec) tt.Expect(err).To(MatchError(ContainSubstring("'EKSA_AWS_CA_BUNDLES_FILE' is not set or is empty"))) } func TestSetupAndValidateUpgradeClusterCPUpgradeStrategy(t *testing.T) { tt := newSnowTest(t) setupContext(t) err := tt.provider.SetupAndValidateUpgradeCluster(tt.ctx, tt.cluster, givenClusterSpecWithCPUpgradeStrategy(), givenClusterSpecWithCPUpgradeStrategy()) tt.Expect(err).To(MatchError(ContainSubstring("failed setup and validations: Upgrade rollout strategy customization is not supported for snow provider"))) } func TestSetupAndValidateUpgradeClusterMDUpgradeStrategy(t *testing.T) { tt := newSnowTest(t) setupContext(t) err := tt.provider.SetupAndValidateUpgradeCluster(tt.ctx, tt.cluster, givenClusterSpecWithMDUpgradeStrategy(), givenClusterSpecWithMDUpgradeStrategy()) tt.Expect(err).To(MatchError(ContainSubstring("failed setup and validations: Upgrade rollout strategy customization is not supported for snow provider"))) } func TestSetupAndValidateDeleteClusterSuccess(t *testing.T) { tt := newSnowTest(t) setupContext(t) err := tt.provider.SetupAndValidateDeleteCluster(tt.ctx, tt.cluster, tt.clusterSpec) tt.Expect(tt.clusterSpec.SnowCredentialsSecret).To(Equal(wantEksaCredentialsSecretWithEnvCreds())) tt.Expect(err).To(Succeed()) } func TestSetupAndValidateDeleteClusterNoCredsEnv(t *testing.T) { tt := newSnowTest(t) setupContext(t) os.Unsetenv(credsFileEnvVar) err := tt.provider.SetupAndValidateDeleteCluster(tt.ctx, tt.cluster, tt.clusterSpec) tt.Expect(err).To(MatchError(ContainSubstring("'EKSA_AWS_CREDENTIALS_FILE' is not set or is empty"))) } func TestSetupAndValidateDeleteClusterNoCertsEnv(t *testing.T) { tt := newSnowTest(t) setupContext(t) os.Unsetenv(certsFileEnvVar) err := tt.provider.SetupAndValidateDeleteCluster(tt.ctx, tt.cluster, tt.clusterSpec) tt.Expect(err).To(MatchError(ContainSubstring("'EKSA_AWS_CA_BUNDLES_FILE' is not set or is empty"))) } func TestSetupAndValidateDeleteClusterCPUpgradeStrategy(t *testing.T) { tt := newSnowTest(t) setupContext(t) err := tt.provider.SetupAndValidateDeleteCluster(tt.ctx, tt.cluster, givenClusterSpecWithCPUpgradeStrategy()) tt.Expect(err).To(MatchError(ContainSubstring("failed setup and validations: Upgrade rollout strategy customization is not supported for snow provider"))) } func TestSetupAndValidateDeleteClusterMDUpgradeStrategy(t *testing.T) { tt := newSnowTest(t) setupContext(t) err := tt.provider.SetupAndValidateDeleteCluster(tt.ctx, tt.cluster, givenClusterSpecWithMDUpgradeStrategy()) tt.Expect(err).To(MatchError(ContainSubstring("failed setup and validations: Upgrade rollout strategy customization is not supported for snow provider"))) } func TestGenerateCAPISpecForCreateUbuntu(t *testing.T) { tt := newSnowTest(t) tt.kubeUnAuthClient.EXPECT().KubeconfigClient(tt.cluster.KubeconfigFile).Return(tt.kubeconfigClient) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, "snow-test", constants.EksaSystemNamespace, &controlplanev1.KubeadmControlPlane{}, ). Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, "")) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, "snow-test-md-0", constants.EksaSystemNamespace, &clusterv1.MachineDeployment{}, ). Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, "")) cp, md, err := tt.provider.GenerateCAPISpecForCreate(tt.ctx, tt.cluster, tt.clusterSpec) tt.Expect(err).To(Succeed()) test.AssertContentToFile(t, string(cp), "testdata/expected_results_main_cp_ubuntu.yaml") test.AssertContentToFile(t, string(md), "testdata/expected_results_main_md_ubuntu.yaml") } func TestGenerateCAPISpecForCreateBottlerocket(t *testing.T) { tt := newSnowTest(t) tt.clusterSpec.SnowMachineConfigs["test-cp"].Spec.OSFamily = v1alpha1.Bottlerocket tt.clusterSpec.SnowMachineConfigs["test-wn"].Spec.OSFamily = v1alpha1.Bottlerocket tt.kubeUnAuthClient.EXPECT().KubeconfigClient(tt.cluster.KubeconfigFile).Return(tt.kubeconfigClient) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, "snow-test", constants.EksaSystemNamespace, &controlplanev1.KubeadmControlPlane{}, ). Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, "")) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, "snow-test-md-0", constants.EksaSystemNamespace, &clusterv1.MachineDeployment{}, ). Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, "")) cp, md, err := tt.provider.GenerateCAPISpecForCreate(tt.ctx, tt.cluster, tt.clusterSpec) tt.Expect(err).To(Succeed()) test.AssertContentToFile(t, string(cp), "testdata/expected_results_main_cp_bottlerocket.yaml") test.AssertContentToFile(t, string(md), "testdata/expected_results_main_md_bottlerocket.yaml") } func TestGenerateCAPISpecForUpgrade(t *testing.T) { tt := newSnowTest(t) mt := wantSnowMachineTemplate() tt.kubeUnAuthClient.EXPECT().KubeconfigClient(tt.cluster.KubeconfigFile).Return(tt.kubeconfigClient) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, "snow-test", constants.EksaSystemNamespace, &controlplanev1.KubeadmControlPlane{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *controlplanev1.KubeadmControlPlane) error { obj.Spec.MachineTemplate.InfrastructureRef.Name = "snow-test-control-plane-1" return nil }) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, "snow-test-control-plane-1", constants.EksaSystemNamespace, &snowv1.AWSSnowMachineTemplate{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *snowv1.AWSSnowMachineTemplate) error { wantSnowMachineTemplate().DeepCopyInto(obj) obj.SetName("snow-test-control-plane-1") obj.Spec.Template.Spec.InstanceType = "sbe-c.large" return nil }) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, "snow-test-md-0", constants.EksaSystemNamespace, &clusterv1.MachineDeployment{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *clusterv1.MachineDeployment) error { wantMachineDeployment().DeepCopyInto(obj) obj.Spec.Template.Spec.InfrastructureRef.Name = "snow-test-md-0-1" obj.Spec.Template.Spec.Bootstrap.ConfigRef.Name = "snow-test-md-0-1" return nil }) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, "snow-test-md-0-1", constants.EksaSystemNamespace, &bootstrapv1.KubeadmConfigTemplate{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *bootstrapv1.KubeadmConfigTemplate) error { wantKubeadmConfigTemplate().DeepCopyInto(obj) return nil }) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, "snow-test-md-0-1", constants.EksaSystemNamespace, &snowv1.AWSSnowMachineTemplate{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *snowv1.AWSSnowMachineTemplate) error { mt.DeepCopyInto(obj) obj.SetName("snow-test-md-0-1") return nil }) gotCp, gotMd, err := tt.provider.GenerateCAPISpecForUpgrade(tt.ctx, tt.cluster, nil, nil, tt.clusterSpec) tt.Expect(err).To(Succeed()) test.AssertContentToFile(t, string(gotCp), "testdata/expected_results_main_cp_ubuntu.yaml") test.AssertContentToFile(t, string(gotMd), "testdata/expected_results_main_md_ubuntu.yaml") } func TestVersion(t *testing.T) { snowVersion := "v1.0.2" provider := givenProvider(t) clusterSpec := givenEmptyClusterSpec() clusterSpec.VersionsBundle.Snow.Version = snowVersion g := NewWithT(t) result := provider.Version(clusterSpec) g.Expect(result).To(Equal(snowVersion)) } func TestGetInfrastructureBundle(t *testing.T) { tt := newSnowTest(t) want := &types.InfrastructureBundle{ FolderName: "infrastructure-snow/v1.0.2/", Manifests: []releasev1alpha1.Manifest{ tt.clusterSpec.VersionsBundle.Snow.Components, tt.clusterSpec.VersionsBundle.Snow.Metadata, }, } got := tt.provider.GetInfrastructureBundle(tt.clusterSpec) tt.Expect(got).To(Equal(want)) } func TestGetDatacenterConfig(t *testing.T) { tt := newSnowTest(t) tt.Expect(tt.provider.DatacenterConfig(tt.clusterSpec).Kind()).To(Equal("SnowDatacenterConfig")) } func TestDatacenterResourceType(t *testing.T) { g := NewWithT(t) provider := givenProvider(t) g.Expect(provider.DatacenterResourceType()).To(Equal("snowdatacenterconfigs.anywhere.eks.amazonaws.com")) } func TestMachineResourceType(t *testing.T) { g := NewWithT(t) provider := givenProvider(t) g.Expect(provider.MachineResourceType()).To(Equal("snowmachineconfigs.anywhere.eks.amazonaws.com")) } func TestMachineConfigs(t *testing.T) { tt := newSnowTest(t) want := tt.provider.MachineConfigs(tt.clusterSpec) tt.Expect(len(want)).To(Equal(2)) } func TestDeleteResources(t *testing.T) { tt := newSnowTest(t) tt.kubeUnAuthClient.EXPECT().KubeconfigClient( tt.clusterSpec.ManagementCluster.KubeconfigFile, ).Return(tt.kubeconfigClient) tt.kubeconfigClient.EXPECT().Delete( tt.ctx, tt.clusterSpec.SnowDatacenter, ).Return(nil) tt.kubeconfigClient.EXPECT().Delete( tt.ctx, tt.clusterSpec.SnowMachineConfigs["test-cp"], ).Return(nil) tt.kubeconfigClient.EXPECT().Delete( tt.ctx, tt.clusterSpec.SnowMachineConfigs["test-wn"], ).Return(nil) err := tt.provider.DeleteResources(tt.ctx, tt.clusterSpec) tt.Expect(err).To(Succeed()) } func TestDeleteResourcesWithEmptyNamespace(t *testing.T) { tt := newSnowTest(t) tt.clusterSpec.SnowDatacenter.Namespace = "" for _, m := range tt.clusterSpec.SnowMachineConfigs { m.Namespace = "" } tt.kubeUnAuthClient.EXPECT().KubeconfigClient( tt.clusterSpec.ManagementCluster.KubeconfigFile, ).Return(tt.kubeconfigClient) tt.kubeconfigClient.EXPECT().Delete( tt.ctx, tt.clusterSpec.SnowDatacenter, ).Return(nil) tt.kubeconfigClient.EXPECT().Delete( tt.ctx, tt.clusterSpec.SnowMachineConfigs["test-cp"], ).Return(nil) tt.kubeconfigClient.EXPECT().Delete( tt.ctx, tt.clusterSpec.SnowMachineConfigs["test-wn"], ).Return(nil) err := tt.provider.DeleteResources(tt.ctx, tt.clusterSpec) tt.Expect(err).To(Succeed()) } func TestDeleteResourcesWhenObjectsDoNotExist(t *testing.T) { tt := newSnowTest(t) client := test.NewFakeKubeClient(tt.clusterSpec.Cluster) tt.kubeUnAuthClient.EXPECT().KubeconfigClient( tt.clusterSpec.ManagementCluster.KubeconfigFile, ).Return(client) err := tt.provider.DeleteResources(tt.ctx, tt.clusterSpec) tt.Expect(err).To(Succeed()) } func TestDeleteResourcesErrorDeletingDatacenter(t *testing.T) { tt := newSnowTest(t) tt.clusterSpec.SnowMachineConfigs = nil client := test.NewFakeKubeClientAlwaysError() tt.kubeUnAuthClient.EXPECT().KubeconfigClient( tt.clusterSpec.ManagementCluster.KubeconfigFile, ).Return(client) err := tt.provider.DeleteResources(tt.ctx, tt.clusterSpec) tt.Expect(err).To(MatchError(ContainSubstring("deleting snow datacenter"))) } func TestUpgradeNeededFalse(t *testing.T) { tt := newSnowTest(t) tt.kubeUnAuthClient.EXPECT().KubeconfigClient(tt.cluster.KubeconfigFile).Return(tt.kubeconfigClient).Times(2) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, "test", "test-namespace", &v1alpha1.SnowDatacenterConfig{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *v1alpha1.SnowDatacenterConfig) error { tt.clusterSpec.SnowDatacenter.DeepCopyInto(obj) return nil }) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, "test-cp", "test-namespace", &v1alpha1.SnowMachineConfig{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *v1alpha1.SnowMachineConfig) error { tt.clusterSpec.SnowMachineConfig("test-cp").DeepCopyInto(obj) return nil }) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, "test-wn", "test-namespace", &v1alpha1.SnowMachineConfig{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *v1alpha1.SnowMachineConfig) error { tt.clusterSpec.SnowMachineConfig("test-wn").DeepCopyInto(obj) return nil }) got, err := tt.provider.UpgradeNeeded(tt.ctx, tt.clusterSpec, tt.clusterSpec, tt.cluster) tt.Expect(err).To(Succeed()) tt.Expect(got).To(Equal(false)) } func TestUpgradeNeededDatacenterChanged(t *testing.T) { tt := newSnowTest(t) tt.kubeUnAuthClient.EXPECT().KubeconfigClient(tt.cluster.KubeconfigFile).Return(tt.kubeconfigClient) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, "test", "test-namespace", &v1alpha1.SnowDatacenterConfig{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *v1alpha1.SnowDatacenterConfig) error { return nil }) got, err := tt.provider.UpgradeNeeded(tt.ctx, tt.clusterSpec, tt.clusterSpec, tt.cluster) tt.Expect(err).To(Succeed()) tt.Expect(got).To(Equal(true)) } func TestUpgradeNeededDatacenterNil(t *testing.T) { tt := newSnowTest(t) tt.kubeUnAuthClient.EXPECT().KubeconfigClient(tt.cluster.KubeconfigFile).Return(tt.kubeconfigClient) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, "test", "test-namespace", &v1alpha1.SnowDatacenterConfig{}, ). Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, "")) got, err := tt.provider.UpgradeNeeded(tt.ctx, tt.clusterSpec, tt.clusterSpec, tt.cluster) tt.Expect(err).To(Succeed()) tt.Expect(got).To(Equal(true)) } func TestUpgradeNeededDatacenterError(t *testing.T) { tt := newSnowTest(t) tt.kubeUnAuthClient.EXPECT().KubeconfigClient(tt.cluster.KubeconfigFile).Return(tt.kubeconfigClient) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, "test", "test-namespace", &v1alpha1.SnowDatacenterConfig{}, ). Return(errors.New("error get dc")) got, err := tt.provider.UpgradeNeeded(tt.ctx, tt.clusterSpec, tt.clusterSpec, tt.cluster) tt.Expect(err).To(MatchError("error get dc")) tt.Expect(got).To(Equal(false)) } func TestUpgradeNeededMachineConfigNil(t *testing.T) { tt := newSnowTest(t) tt.kubeUnAuthClient.EXPECT().KubeconfigClient(tt.cluster.KubeconfigFile).Return(tt.kubeconfigClient).Times(2) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, "test", "test-namespace", &v1alpha1.SnowDatacenterConfig{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *v1alpha1.SnowDatacenterConfig) error { tt.clusterSpec.SnowDatacenter.DeepCopyInto(obj) return nil }) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, gomock.Any(), "test-namespace", &v1alpha1.SnowMachineConfig{}, ). Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, "")) got, err := tt.provider.UpgradeNeeded(tt.ctx, tt.clusterSpec, tt.clusterSpec, tt.cluster) tt.Expect(err).To(Succeed()) tt.Expect(got).To(Equal(true)) } func TestUpgradeNeededMachineConfigError(t *testing.T) { tt := newSnowTest(t) tt.kubeUnAuthClient.EXPECT().KubeconfigClient(tt.cluster.KubeconfigFile).Return(tt.kubeconfigClient).Times(2) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, "test", "test-namespace", &v1alpha1.SnowDatacenterConfig{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *v1alpha1.SnowDatacenterConfig) error { tt.clusterSpec.SnowDatacenter.DeepCopyInto(obj) return nil }) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, gomock.Any(), "test-namespace", &v1alpha1.SnowMachineConfig{}, ). Return(errors.New("error get mc")) got, err := tt.provider.UpgradeNeeded(tt.ctx, tt.clusterSpec, tt.clusterSpec, tt.cluster) tt.Expect(err).To(MatchError("error get mc")) tt.Expect(got).To(Equal(false)) } func TestUpgradeNeededMachineConfigChanged(t *testing.T) { tt := newSnowTest(t) tt.kubeUnAuthClient.EXPECT().KubeconfigClient(tt.cluster.KubeconfigFile).Return(tt.kubeconfigClient).Times(2) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, "test", "test-namespace", &v1alpha1.SnowDatacenterConfig{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *v1alpha1.SnowDatacenterConfig) error { tt.clusterSpec.SnowDatacenter.DeepCopyInto(obj) return nil }) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, gomock.Any(), "test-namespace", &v1alpha1.SnowMachineConfig{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *v1alpha1.SnowMachineConfig) error { return nil }) got, err := tt.provider.UpgradeNeeded(tt.ctx, tt.clusterSpec, tt.clusterSpec, tt.cluster) tt.Expect(err).To(Succeed()) tt.Expect(got).To(Equal(true)) } func TestUpgradeNeededMachineConfigRemoveDevice(t *testing.T) { tt := newSnowTest(t) delete(tt.clusterSpec.SnowMachineConfigs, "test-wn") tt.kubeUnAuthClient.EXPECT().KubeconfigClient(tt.cluster.KubeconfigFile).Return(tt.kubeconfigClient).Times(2) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, "test", "test-namespace", &v1alpha1.SnowDatacenterConfig{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *v1alpha1.SnowDatacenterConfig) error { tt.clusterSpec.SnowDatacenter.DeepCopyInto(obj) return nil }) tt.kubeconfigClient.EXPECT(). Get( tt.ctx, "test-cp", "test-namespace", &v1alpha1.SnowMachineConfig{}, ). DoAndReturn(func(_ context.Context, _, _ string, obj *v1alpha1.SnowMachineConfig) error { tt.clusterSpec.SnowMachineConfig("test-cp").DeepCopyInto(obj) obj.Spec.Devices = []string{"1.2.3.4"} return nil }) got, err := tt.provider.UpgradeNeeded(tt.ctx, tt.clusterSpec, tt.clusterSpec, tt.cluster) tt.Expect(err).To(Succeed()) tt.Expect(got).To(Equal(true)) } func TestUpgradeNeededBundle(t *testing.T) { tests := []struct { name string bundle releasev1alpha1.SnowBundle want bool }{ { name: "kube-vip image digest diff", bundle: releasev1alpha1.SnowBundle{ Version: "v1.0.2", KubeVip: releasev1alpha1.Image{ Name: "kube-vip", OS: "linux", URI: "public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.1433", ImageDigest: "sha256:diff", Description: "Container image for kube-vip image", Arch: []string{"amd64"}, }, Manager: releasev1alpha1.Image{ Name: "cluster-api-snow-controller", OS: "linux", URI: "public.ecr.aws/l0g8r8j6/aws/cluster-api-provider-aws-snow/manager:v0.1.4-eks-a-v0.0.0-dev-build.2216", ImageDigest: "sha256:59da9c726c4816c29d119e77956c6391e2dff451daf36aeb60e5d6425eb88018", Description: "Container image for cluster-api-snow-controller image", Arch: []string{"amd64"}, }, }, want: true, }, { name: "manager image digest diff", bundle: releasev1alpha1.SnowBundle{ Version: "v1.0.2", KubeVip: releasev1alpha1.Image{ Name: "kube-vip", OS: "linux", URI: "public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.1433", ImageDigest: "sha256:cf324971db7696810effd5c6c95e34b2c115893e1fbcaeb8877355dc74768ef1", Description: "Container image for kube-vip image", Arch: []string{"amd64"}, }, Manager: releasev1alpha1.Image{ Name: "cluster-api-snow-controller", OS: "linux", URI: "public.ecr.aws/l0g8r8j6/aws/cluster-api-provider-aws-snow/manager:v0.1.4-eks-a-v0.0.0-dev-build.2216", ImageDigest: "sha256:diff", Description: "Container image for cluster-api-snow-controller image", Arch: []string{"amd64"}, }, }, want: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := newSnowTest(t) new := g.clusterSpec.DeepCopy() new.VersionsBundle.Snow = tt.bundle new.SnowMachineConfigs = givenMachineConfigs() got, err := g.provider.UpgradeNeeded(g.ctx, new, g.clusterSpec, g.cluster) g.Expect(err).To(Succeed()) g.Expect(got).To(Equal(tt.want)) }) } } func TestChangeDiffNoChange(t *testing.T) { g := NewWithT(t) provider := givenProvider(t) clusterSpec := givenEmptyClusterSpec() g.Expect(provider.ChangeDiff(clusterSpec, clusterSpec)).To(BeNil()) } func TestChangeDiffWithChange(t *testing.T) { g := NewWithT(t) provider := givenProvider(t) clusterSpec := givenEmptyClusterSpec() newClusterSpec := clusterSpec.DeepCopy() clusterSpec.VersionsBundle.Snow.Version = "v1.0.2" newClusterSpec.VersionsBundle.Snow.Version = "v1.0.3" want := &types.ComponentChangeDiff{ ComponentName: "snow", NewVersion: "v1.0.3", OldVersion: "v1.0.2", } g.Expect(provider.ChangeDiff(clusterSpec, newClusterSpec)).To(Equal(want)) } func TestUpdateSecrets(t *testing.T) { tt := newSnowTest(t) tt.kubeUnAuthClient.EXPECT().Apply( tt.ctx, tt.cluster.KubeconfigFile, tt.clusterSpec.SnowCredentialsSecret, ).Return(nil) tt.Expect(tt.provider.UpdateSecrets(tt.ctx, tt.cluster, tt.clusterSpec)).To(Succeed()) } func TestUpdateSecretsApplyError(t *testing.T) { tt := newSnowTest(t) tt.kubeUnAuthClient.EXPECT().Apply( tt.ctx, tt.cluster.KubeconfigFile, tt.clusterSpec.SnowCredentialsSecret, ).Return(errors.New("error")) tt.Expect(tt.provider.UpdateSecrets(tt.ctx, tt.cluster, tt.clusterSpec)).NotTo(Succeed()) }
1,301
eks-anywhere
aws
Go
package snow import ( "context" "fmt" "reflect" "strconv" "strings" "github.com/pkg/errors" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" ) const ( defaultAwsSshKeyName = "eksa-default" snowballMinSoftwareVersion = 102 minimumVCPU = 2 ) // Validator includes a client registry that maintains a snow device aws client map, // and a local imds service that is used to fetch metadata of the host instance. type Validator struct { // clientRegistry maintains a device aws client mapping. clientRegistry ClientRegistry // imds is a local imds client built with the default aws config. This imds client can only // interact with the local instance metata service. imds LocalIMDSClient } // ValidatorOpt updates an Validator. type ValidatorOpt func(*Validator) // WithIMDS returns a ValidatorOpt that sets the imds client. func WithIMDS(imds LocalIMDSClient) ValidatorOpt { return func(c *Validator) { c.imds = imds } } // NewValidator creates a snow validator. func NewValidator(clientRegistry ClientRegistry, opts ...ValidatorOpt) *Validator { v := &Validator{ clientRegistry: clientRegistry, } for _, o := range opts { o(v) } return v } // ValidateEC2SshKeyNameExists validates the ssh key existence in each device in the device list. func (v *Validator) ValidateEC2SshKeyNameExists(ctx context.Context, m *v1alpha1.SnowMachineConfig) error { if m.Spec.SshKeyName == "" { return nil } clientMap, err := v.clientRegistry.Get(ctx) if err != nil { return err } for _, ip := range m.Spec.Devices { client, ok := clientMap[ip] if !ok { return fmt.Errorf("credentials not found for device [%s]", ip) } keyExists, err := client.EC2KeyNameExists(ctx, m.Spec.SshKeyName) if err != nil { return fmt.Errorf("describing key pair on snow device [%s]: %v", ip, err) } if !keyExists { return fmt.Errorf("aws key pair [%s] does not exist on snow device [deviceIP=%s]", m.Spec.SshKeyName, ip) } } return nil } // ValidateEC2ImageExistsOnDevice validates the ami id (if specified) existence in each device in the device list. func (v *Validator) ValidateEC2ImageExistsOnDevice(ctx context.Context, m *v1alpha1.SnowMachineConfig) error { if m.Spec.AMIID == "" { return nil } clientMap, err := v.clientRegistry.Get(ctx) if err != nil { return err } for _, ip := range m.Spec.Devices { client, ok := clientMap[ip] if !ok { return fmt.Errorf("credentials not found for device [%s]", ip) } imageExists, err := client.EC2ImageExists(ctx, m.Spec.AMIID) if err != nil { return fmt.Errorf("describing image on snow device [%s]: %v", ip, err) } if !imageExists { return fmt.Errorf("aws image [%s] does not exist", m.Spec.AMIID) } } return nil } // ValidateDeviceIsUnlocked verifies if all snow devices in the device list are unlocked. func (v *Validator) ValidateDeviceIsUnlocked(ctx context.Context, m *v1alpha1.SnowMachineConfig) error { clientMap, err := v.clientRegistry.Get(ctx) if err != nil { return err } for _, ip := range m.Spec.Devices { client, ok := clientMap[ip] if !ok { return fmt.Errorf("credentials not found for device [%s]", ip) } deviceUnlocked, err := client.IsSnowballDeviceUnlocked(ctx) if err != nil { return fmt.Errorf("checking unlock status for device [%s]: %v", ip, err) } if !deviceUnlocked { return fmt.Errorf("device [%s] is not unlocked. Please unlock the device before you proceed", ip) } } return nil } func validateInstanceTypeInDevice(ctx context.Context, client AwsClient, instanceType, deviceIP string) error { instanceTypes, err := client.EC2InstanceTypes(ctx) if err != nil { return fmt.Errorf("fetching supported instance types for device [%s]: %v", deviceIP, err) } for _, it := range instanceTypes { if instanceType != it.Name { continue } if it.DefaultVCPU != nil && *it.DefaultVCPU < minimumVCPU { return fmt.Errorf("the instance type [%s] has %d vCPU. Please choose an instance type with at least %d default vCPU", instanceType, *it.DefaultVCPU, minimumVCPU) } return nil } return fmt.Errorf("the instance type [%s] is not supported in device [%s]", instanceType, deviceIP) } // ValidateInstanceType validates whether the instance type is compatible to run in each device. func (v *Validator) ValidateInstanceType(ctx context.Context, m *v1alpha1.SnowMachineConfig) error { clientMap, err := v.clientRegistry.Get(ctx) if err != nil { return err } for _, ip := range m.Spec.Devices { client, ok := clientMap[ip] if !ok { return fmt.Errorf("credentials not found for device [%s]", ip) } if err := validateInstanceTypeInDevice(ctx, client, m.Spec.InstanceType, ip); err != nil { return err } } return nil } // ValidateDeviceSoftware validates whether the snow software is compatible to run eks-a in each device. func (v *Validator) ValidateDeviceSoftware(ctx context.Context, m *v1alpha1.SnowMachineConfig) error { clientMap, err := v.clientRegistry.Get(ctx) if err != nil { return err } for _, ip := range m.Spec.Devices { client, ok := clientMap[ip] if !ok { return fmt.Errorf("credentials not found for device [%s]", ip) } version, err := client.SnowballDeviceSoftwareVersion(ctx) if err != nil { return fmt.Errorf("checking software version for device [%s]: %v", ip, err) } versionInt, err := strconv.Atoi(version) if err != nil { return fmt.Errorf("checking software version for device [%s]: %v", ip, err) } if versionInt < snowballMinSoftwareVersion { return fmt.Errorf("the software version installed [%s] on device [%s] is below the minimum supported version [%d]", version, ip, snowballMinSoftwareVersion) } } return nil } // ValidateControlPlaneIP checks whether the control plane ip is valid for creating a snow cluster. func (v *Validator) ValidateControlPlaneIP(ctx context.Context, controlPlaneIP string) error { if v.imds == nil || reflect.ValueOf(v.imds).IsNil() { return errors.New("imds client is not initialized") } instanceIP, err := v.imds.EC2InstanceIP(ctx) if err != nil { if strings.Contains(err.Error(), "404") { // the admin instance is not running inside snow devices or doesn't have a public IP return nil } return fmt.Errorf("fetching host instance ip: %v", err) } if controlPlaneIP == instanceIP { return fmt.Errorf("control plane host ip cannot be same as the admin instance ip") } return nil }
232
eks-anywhere
aws
Go
package snow_test import ( "context" "errors" "fmt" "testing" "github.com/golang/mock/gomock" "github.com/google/uuid" . "github.com/onsi/gomega" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/filewriter" "github.com/aws/eks-anywhere/pkg/providers/snow" "github.com/aws/eks-anywhere/pkg/providers/snow/mocks" ) type configManagerTest struct { *WithT ctx context.Context aws *mocks.MockAwsClient keyGenerator *mocks.MockSshKeyGenerator writer filewriter.FileWriter validator *snow.Validator defaulters *snow.Defaulters machineConfigDefaulters *snow.MachineConfigDefaulters machineConfig *v1alpha1.SnowMachineConfig uuid uuid.UUID clusterName string defaultKeyName string } func newConfigManagerTest(t *testing.T) *configManagerTest { ctx := context.Background() ctrl := gomock.NewController(t) mockaws := mocks.NewMockAwsClient(ctrl) mockKeyGenerator := mocks.NewMockSshKeyGenerator(ctrl) awsClients := snow.AwsClientMap{ "device-1": mockaws, "device-2": mockaws, "device-3": mockaws, } mockClientRegistry := mocks.NewMockClientRegistry(ctrl) mockClientRegistry.EXPECT().Get(ctx).Return(awsClients, nil).AnyTimes() m := &v1alpha1.SnowMachineConfig{ ObjectMeta: v1.ObjectMeta{ Name: "cp-machine", }, Spec: v1alpha1.SnowMachineConfigSpec{ AMIID: "ami-1", SshKeyName: "default", Devices: []string{ "device-1", "device-2", }, }, } _, writer := test.NewWriter(t) validators := snow.NewValidator(mockClientRegistry) uuid := uuid.New() defaulters := snow.NewDefaulters(mockClientRegistry, writer, snow.WithKeyGenerator(mockKeyGenerator), snow.WithUUID(uuid)) return &configManagerTest{ WithT: NewWithT(t), ctx: ctx, aws: mockaws, keyGenerator: mockKeyGenerator, writer: writer, validator: validators, defaulters: defaulters, machineConfigDefaulters: snow.NewMachineConfigDefaulters(defaulters), machineConfig: m, uuid: uuid, clusterName: "test-snow", defaultKeyName: fmt.Sprintf("eksa-default-test-snow-%s", uuid), } } func newConfigManagerTestClientMapError(t *testing.T) *configManagerTest { ctx := context.Background() ctrl := gomock.NewController(t) mockKeyGenerator := mocks.NewMockSshKeyGenerator(ctrl) mockClientRegistry := mocks.NewMockClientRegistry(ctrl) mockClientRegistry.EXPECT().Get(ctx).Return(nil, errors.New("test error")) m := &v1alpha1.SnowMachineConfig{ ObjectMeta: v1.ObjectMeta{ Name: "cp-machine", }, Spec: v1alpha1.SnowMachineConfigSpec{ AMIID: "ami-1", SshKeyName: "default", Devices: []string{ "device-1", "device-2", }, }, } _, writer := test.NewWriter(t) validators := snow.NewValidator(mockClientRegistry) defaulters := snow.NewDefaulters(mockClientRegistry, writer, snow.WithKeyGenerator(mockKeyGenerator)) return &configManagerTest{ WithT: NewWithT(t), ctx: ctx, keyGenerator: mockKeyGenerator, writer: writer, validator: validators, defaulters: defaulters, machineConfigDefaulters: snow.NewMachineConfigDefaulters(defaulters), machineConfig: m, } } func TestValidateEC2SshKeyNameExists(t *testing.T) { g := newConfigManagerTest(t) g.aws.EXPECT().EC2KeyNameExists(g.ctx, g.machineConfig.Spec.SshKeyName).Return(true, nil).Times(2) err := g.validator.ValidateEC2SshKeyNameExists(g.ctx, g.machineConfig) g.Expect(err).To(Succeed()) } func TestValidateEC2SshKeyNameExistsNotExists(t *testing.T) { g := newConfigManagerTest(t) g.aws.EXPECT().EC2KeyNameExists(g.ctx, g.machineConfig.Spec.SshKeyName).Return(false, nil) err := g.validator.ValidateEC2SshKeyNameExists(g.ctx, g.machineConfig) g.Expect(err).To(MatchError(ContainSubstring("does not exist"))) } func TestValidateEC2SshKeyNameExistsError(t *testing.T) { g := newConfigManagerTest(t) g.aws.EXPECT().EC2KeyNameExists(g.ctx, g.machineConfig.Spec.SshKeyName).Return(false, errors.New("error")) err := g.validator.ValidateEC2SshKeyNameExists(g.ctx, g.machineConfig) g.Expect(err).NotTo(Succeed()) } func TestValidateEC2SshKeyNameEmpty(t *testing.T) { g := newConfigManagerTest(t) g.machineConfig.Spec.SshKeyName = "" err := g.validator.ValidateEC2SshKeyNameExists(g.ctx, g.machineConfig) g.Expect(err).To(Succeed()) } func TestValidateEC2SshKeyNameClientMapError(t *testing.T) { g := newConfigManagerTestClientMapError(t) err := g.validator.ValidateEC2SshKeyNameExists(g.ctx, g.machineConfig) g.Expect(err).NotTo(Succeed()) } func TestValidateEC2SshKeyNameDeviceNotFoundInClientMapError(t *testing.T) { g := newConfigManagerTest(t) g.machineConfig.Spec.Devices = []string{"device-not-exist"} err := g.validator.ValidateEC2SshKeyNameExists(g.ctx, g.machineConfig) g.Expect(err).To(MatchError(ContainSubstring("credentials not found for device"))) } func TestValidateEC2ImageExistsOnDevice(t *testing.T) { g := newConfigManagerTest(t) g.aws.EXPECT().EC2ImageExists(g.ctx, g.machineConfig.Spec.AMIID).Return(true, nil).Times(2) err := g.validator.ValidateEC2ImageExistsOnDevice(g.ctx, g.machineConfig) g.Expect(err).To(Succeed()) } func TestValidateEC2ImageExistsOnDeviceAmiIDEmpty(t *testing.T) { g := newConfigManagerTest(t) g.machineConfig.Spec.AMIID = "" err := g.validator.ValidateEC2ImageExistsOnDevice(g.ctx, g.machineConfig) g.Expect(err).To(Succeed()) } func TestValidateEC2ImageExistsOnDeviceNotExists(t *testing.T) { g := newConfigManagerTest(t) g.aws.EXPECT().EC2ImageExists(g.ctx, g.machineConfig.Spec.AMIID).Return(false, nil) err := g.validator.ValidateEC2ImageExistsOnDevice(g.ctx, g.machineConfig) g.Expect(err).To(MatchError(ContainSubstring("does not exist"))) } func TestValidateEC2ImageExistsOnDeviceError(t *testing.T) { g := newConfigManagerTest(t) g.aws.EXPECT().EC2ImageExists(g.ctx, g.machineConfig.Spec.AMIID).Return(false, errors.New("error")) err := g.validator.ValidateEC2ImageExistsOnDevice(g.ctx, g.machineConfig) g.Expect(err).NotTo(Succeed()) } func TestValidateEC2ImageExistsOnDeviceClientMapError(t *testing.T) { g := newConfigManagerTestClientMapError(t) err := g.validator.ValidateEC2ImageExistsOnDevice(g.ctx, g.machineConfig) g.Expect(err).NotTo(Succeed()) } func TestValidateEC2ImageExistsOnDeviceNotFoundInClientMapError(t *testing.T) { g := newConfigManagerTest(t) g.machineConfig.Spec.Devices = []string{"device-not-exist"} err := g.validator.ValidateEC2ImageExistsOnDevice(g.ctx, g.machineConfig) g.Expect(err).To(MatchError(ContainSubstring("credentials not found for device"))) } func TestValidateDeviceIsUnlocked(t *testing.T) { g := newConfigManagerTest(t) g.aws.EXPECT().IsSnowballDeviceUnlocked(g.ctx).Return(true, nil).Times(2) err := g.validator.ValidateDeviceIsUnlocked(g.ctx, g.machineConfig) g.Expect(err).To(Succeed()) } func TestValidateDeviceIsUnlockedLocked(t *testing.T) { g := newConfigManagerTest(t) g.aws.EXPECT().IsSnowballDeviceUnlocked(g.ctx).Return(false, nil) err := g.validator.ValidateDeviceIsUnlocked(g.ctx, g.machineConfig) g.Expect(err).To(MatchError(ContainSubstring("Please unlock the device before you proceed"))) } func TestValidateDeviceIsUnlockedError(t *testing.T) { g := newConfigManagerTest(t) g.aws.EXPECT().IsSnowballDeviceUnlocked(g.ctx).Return(false, errors.New("error")) err := g.validator.ValidateDeviceIsUnlocked(g.ctx, g.machineConfig) g.Expect(err).NotTo(Succeed()) } func TestValidateDeviceIsUnlockedClientMapError(t *testing.T) { g := newConfigManagerTestClientMapError(t) err := g.validator.ValidateDeviceIsUnlocked(g.ctx, g.machineConfig) g.Expect(err).NotTo(Succeed()) } func TestValidateDeviceIsUnlockedNotFoundInClientMapError(t *testing.T) { g := newConfigManagerTest(t) g.machineConfig.Spec.Devices = []string{"device-not-exist"} err := g.validator.ValidateDeviceIsUnlocked(g.ctx, g.machineConfig) g.Expect(err).To(MatchError(ContainSubstring("credentials not found for device"))) } func TestValidateDeviceSoftware(t *testing.T) { g := newConfigManagerTest(t) g.aws.EXPECT().SnowballDeviceSoftwareVersion(g.ctx).Return("1012", nil).Times(2) err := g.validator.ValidateDeviceSoftware(g.ctx, g.machineConfig) g.Expect(err).To(Succeed()) } func TestValidateDeviceSoftwareVersionTooLow(t *testing.T) { g := newConfigManagerTest(t) g.aws.EXPECT().SnowballDeviceSoftwareVersion(g.ctx).Return("101", nil) err := g.validator.ValidateDeviceSoftware(g.ctx, g.machineConfig) g.Expect(err).To(MatchError(ContainSubstring("below the minimum supported version"))) } func TestValidateDeviceSoftwareVersionError(t *testing.T) { g := newConfigManagerTest(t) g.aws.EXPECT().SnowballDeviceSoftwareVersion(g.ctx).Return("", errors.New("error")) err := g.validator.ValidateDeviceSoftware(g.ctx, g.machineConfig) g.Expect(err).NotTo(Succeed()) } func TestValidateDeviceSoftwareClientMapError(t *testing.T) { g := newConfigManagerTestClientMapError(t) err := g.validator.ValidateDeviceSoftware(g.ctx, g.machineConfig) g.Expect(err).NotTo(Succeed()) } func TestValidateDeviceSoftwareNotFoundInClientMapError(t *testing.T) { g := newConfigManagerTest(t) g.machineConfig.Spec.Devices = []string{"device-not-exist"} err := g.validator.ValidateDeviceSoftware(g.ctx, g.machineConfig) g.Expect(err).To(MatchError(ContainSubstring("credentials not found for device"))) } func TestValidateDeviceSoftwareConvertToIntegerError(t *testing.T) { g := newConfigManagerTest(t) g.aws.EXPECT().SnowballDeviceSoftwareVersion(g.ctx).Return("version", nil) err := g.validator.ValidateDeviceSoftware(g.ctx, g.machineConfig) g.Expect(err).To(MatchError(ContainSubstring("invalid syntax"))) }
271
eks-anywhere
aws
Go
package snow import ( "context" "github.com/go-logr/logr" "github.com/pkg/errors" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/clusterapi" snowv1 "github.com/aws/eks-anywhere/pkg/providers/snow/api/v1beta1" ) type ( // BaseWorkers represents the Snow specific CAPI spec for worker nodes. BaseWorkers = clusterapi.Workers[*snowv1.AWSSnowMachineTemplate] baseWorkerGroup = clusterapi.WorkerGroup[*snowv1.AWSSnowMachineTemplate] ) // Workers holds the Snow specific objects for CAPI snow worker groups. type Workers struct { BaseWorkers CAPASIPPools CAPASIPPools } // Objects returns the worker nodes objects associated with the snow cluster. func (w Workers) Objects() []kubernetes.Object { o := w.BaseWorkers.WorkerObjects() for _, p := range w.CAPASIPPools { o = append(o, p) } return o } // WorkersSpec generates a Snow specific CAPI spec for an eks-a cluster worker nodes. // It talks to the cluster with a client to detect changes in immutable objects and generates new // names for them. func WorkersSpec(ctx context.Context, log logr.Logger, spec *cluster.Spec, client kubernetes.Client) (*Workers, error) { w := &Workers{ BaseWorkers: BaseWorkers{ Groups: make([]baseWorkerGroup, 0, len(spec.Cluster.Spec.WorkerNodeGroupConfigurations)), }, } capasPools := CAPASIPPools{} for _, wc := range spec.Cluster.Spec.WorkerNodeGroupConfigurations { machineConfig := spec.SnowMachineConfig(wc.MachineGroupRef.Name) capasPools.addPools(machineConfig.Spec.Network.DirectNetworkInterfaces, spec.SnowIPPools) machineTemplate := MachineTemplate(clusterapi.WorkerMachineTemplateName(spec, wc), spec.SnowMachineConfigs[wc.MachineGroupRef.Name], capasPools) kubeadmConfigTemplate, err := KubeadmConfigTemplate(log, spec, wc) if err != nil { return nil, err } machineDeployment := machineDeployment(spec, wc, kubeadmConfigTemplate, machineTemplate) w.Groups = append(w.Groups, baseWorkerGroup{ MachineDeployment: machineDeployment, KubeadmConfigTemplate: kubeadmConfigTemplate, ProviderMachineTemplate: machineTemplate, }) } if err := w.UpdateImmutableObjectNames(ctx, client, getMachineTemplate, MachineTemplateDeepDerivative); err != nil { return nil, errors.Wrap(err, "updating snow worker immutable object names") } w.CAPASIPPools = capasPools return w, nil }
77
eks-anywhere
aws
Go
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package snow import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) const ( // AWSSnowClusterFinalizer allows ReconcileAWSSnowCluster to clean up AWS Snow resources associated with AWSSnowCluster before // removing it from the apiserver. AWSSnowClusterFinalizer = "awssnowcluster.infrastructure.cluster.x-k8s.io" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. // AWSSnowClusterSpec defines the desired state of AWSSnowCluster type AWSSnowClusterSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file // Foo is an example field of AWSSnowCluster. Edit awssnowcluster_types.go to remove/update // Foo string `json:"foo,omitempty"` // TODO: More to add. // The AWS Region the cluster lives in. Region string `json:"region,omitempty"` // SSHKeyName is the name of the ssh key to attach to the bastion host. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name) // +optional SSHKeyName *string `json:"sshKeyName,omitempty"` // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. // +optional ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` // AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the // ones added by default. // +optional // AdditionalTags Tags `json:"additionalTags,omitempty"` // ImageLookupFormat is the AMI naming format to look up machine images when // a machine does not specify an AMI. When set, this will be used for all // cluster machines unless a machine specifies a different ImageLookupOrg. // Supports substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base // OS and kubernetes version, respectively. The BaseOS will be the value in // ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as // defined by the packages produced by kubernetes/release without v as a // prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default // image format of capas-ami-{{.BaseOS}}-.?{{.K8sVersion}}-* will end up // searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a // Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See // also: https://golang.org/pkg/text/template/ // +optional ImageLookupFormat string `json:"imageLookupFormat,omitempty"` // ImageLookupOrg is the AWS Organization ID to look up machine images when a // machine does not specify an AMI. When set, this will be used for all // cluster machines unless a machine specifies a different ImageLookupOrg. // +optional ImageLookupOrg string `json:"imageLookupOrg,omitempty"` // ImageLookupBaseOS is the name of the base operating system used to look // up machine images when a machine does not specify an AMI. When set, this // will be used for all cluster machines unless a machine specifies a // different ImageLookupBaseOS. ImageLookupBaseOS string `json:"imageLookupBaseOS,omitempty"` // PhysicalNetworkConnectorType is the physical network connector type to use for creating direct network interfaces. Valid values are a physical network connector type (SFP_PLUS or QSFP), or omitted (cluster-api selects a valid physical network interface, default is SFP_PLUS) // +kubebuilder:validation:Enum:=SFP_PLUS;QSFP;RJ45 // +optional PhysicalNetworkConnectorType *string `json:"physicalNetworkConnectorType,omitempty"` // IdentityRef is a reference to a identity to be used when reconciling this cluster // +optional IdentityRef *AWSSnowIdentityReference `json:"identityRef,omitempty"` } // AWSSnowIdentityKind defines allowed AWSSnow identity types. type AWSSnowIdentityKind string var SecretKind = AWSSnowIdentityKind("Secret") // AWSSnowIdentityReference specifies a identity. type AWSSnowIdentityReference struct { // Name of the identity. // +kubebuilder:validation:MinLength=1 Name string `json:"name"` // Kind of the identity. // +kubebuilder:validation:Enum=Secret Kind AWSSnowIdentityKind `json:"kind"` } // AWSSnowClusterStatus defines the observed state of AWSSnowCluster type AWSSnowClusterStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file // +kubebuilder:default=false Ready bool `json:"ready"` // Network Network `json:"network,omitempty"` FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` // Bastion *Instance `json:"bastion,omitempty"` Conditions clusterv1.Conditions `json:"conditions,omitempty"` } //+kubebuilder:object:root=true //+kubebuilder:subresource:status // AWSSnowCluster is the Schema for the awssnowclusters API type AWSSnowCluster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec AWSSnowClusterSpec `json:"spec,omitempty"` Status AWSSnowClusterStatus `json:"status,omitempty"` } //+kubebuilder:object:root=true // AWSSnowClusterList contains a list of AWSSnowCluster type AWSSnowClusterList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []AWSSnowCluster `json:"items"` } func (r *AWSSnowCluster) GetConditions() clusterv1.Conditions { return r.Status.Conditions } func (r *AWSSnowCluster) SetConditions(conditions clusterv1.Conditions) { r.Status.Conditions = conditions } func init() { SchemeBuilder.Register(&AWSSnowCluster{}, &AWSSnowClusterList{}) }
157
eks-anywhere
aws
Go
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package snow import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. // AWSSnowIPPoolSpec defines the desired state of AWSSnowIPPool type AWSSnowIPPoolSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make generate" to regenerate code after modifying this file // IPPools defines a range of ip addresses for static IP configurations. IPPools []IPPool `json:"pools,omitempty"` } // IPPool is the configuration of static ip, it provides a range of ip addresses type IPPool struct { // IPStart is the start ip address of an ip range IPStart *string `json:"ipStart,omitempty"` // IPEnd is the end ip address of an ip range IPEnd *string `json:"ipEnd,omitempty"` // Subnet is customers' network subnet, we can use it to determine whether an ip is in this subnet Subnet *string `json:"subnet,omitempty"` // Gateway is the gateway of this subnet. Used for routing purpose Gateway *string `json:"gateway,omitempty"` } // AWSSnowIPPoolStatus defines the observed state of AWSSnowIPPool type AWSSnowIPPoolStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make generate" to regenerate code after modifying this file } //+kubebuilder:object:root=true //+kubebuilder:subresource:status // AWSSnowIPPool is the Schema for the awssnowippools API type AWSSnowIPPool struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec AWSSnowIPPoolSpec `json:"spec,omitempty"` Status AWSSnowIPPoolStatus `json:"status,omitempty"` } //+kubebuilder:object:root=true // AWSSnowIPPoolList contains a list of AWSSnowIPPool type AWSSnowIPPoolList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []AWSSnowIPPool `json:"items"` } func init() { SchemeBuilder.Register(&AWSSnowIPPool{}, &AWSSnowIPPoolList{}) }
76
eks-anywhere
aws
Go
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package snow import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // AWSSnowMachineTemplateSpec defines the desired state of AWSSnowMachineTemplate type AWSSnowMachineTemplateSpec struct { Template AWSSnowMachineTemplateResource `json:"template"` } // +kubebuilder:object:root=true // +kubebuilder:resource:path=awssnowmachinetemplates,scope=Namespaced,categories=cluster-api,shortName=awssmt // +kubebuilder:storageversion // AWSSnowMachineTemplate is the Schema for the awssnowmachinetemplates API type AWSSnowMachineTemplate struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec AWSSnowMachineTemplateSpec `json:"spec,omitempty"` } // +kubebuilder:object:root=true // AWSSnowMachineTemplateList contains a list of AWSSnowMachineTemplate. type AWSSnowMachineTemplateList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []AWSSnowMachineTemplate `json:"items"` } func init() { SchemeBuilder.Register(&AWSSnowMachineTemplate{}, &AWSSnowMachineTemplateList{}) }
52
eks-anywhere
aws
Go
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package snow import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/errors" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. const ( // AWSSnowMachineFinalizer allows ReconcileAWSSnowMachine to clean up AWS Snow resources associated with AWSSnowMachine before // removing it from the apiserver. AWSSnowMachineFinalizer = "awssnowmachine.infrastructure.cluster.x-k8s.io" // MachineEtcdLabelName is the label set on machines or related objects that are part of an etcd node. MachineEtcdLabelName = "cluster.x-k8s.io/etcd-cluster" ) // AWSSnowMachineSpec defines the desired state of AWSSnowMachine. // Below struct is a full copy of AWSSnowMachineSpec. Fields not needed for now are commented out with TODO added // in comments. type AWSSnowMachineSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file // ProviderID is the unique identifier as specified by the cloud provider. ProviderID *string `json:"providerID,omitempty"` // InstanceID is the EC2 instance ID for this machine. InstanceID *string `json:"instanceID,omitempty"` // AMI is the reference to the AMI from which to create the machine instance. AMI AWSResourceReference `json:"ami,omitempty"` // ImageLookupFormat is the AMI naming format to look up the image for this // machine It will be ignored if an explicit AMI is set. Supports // substitutions for {{.BaseOS}} and {{.K8sVersion}} with the base OS and // kubernetes version, respectively. The BaseOS will be the value in // ImageLookupBaseOS or ubuntu (the default), and the kubernetes version as // defined by the packages produced by kubernetes/release without v as a // prefix: 1.13.0, 1.12.5-mybuild.1, or 1.17.3. For example, the default // image format of capas-ami-{{.BaseOS}}-.?{{.K8sVersion}}-* will end up // searching for AMIs that match the pattern capa-ami-ubuntu-?1.18.0-* for a // Machine that is targeting kubernetes v1.18.0 and the ubuntu base OS. See // also: https://golang.org/pkg/text/template/ // +optional ImageLookupFormat string `json:"imageLookupFormat,omitempty"` // ImageLookupOrg is the AWS Organization ID to use for image lookup if AMI is not set. ImageLookupOrg string `json:"imageLookupOrg,omitempty"` // ImageLookupBaseOS is the name of the base operating system to use for // image lookup the AMI is not set. ImageLookupBaseOS string `json:"imageLookupBaseOS,omitempty"` // InstanceType is the type of instance to create. Example: m4.xlarge InstanceType string `json:"instanceType,omitempty"` // AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the // AWS provider. If both the AWSSnowCluster and the AWSSnowMachine specify the same tag name with different values, the // AWSSnowMachine's value takes precedence. // TODO: Evaluate the need or remove completely. // +optional // AdditionalTags Tags `json:"additionalTags,omitempty"` // IAMInstanceProfile is a name of an IAM instance profile to assign to the instance // +optional IAMInstanceProfile string `json:"iamInstanceProfile,omitempty"` // PublicIP specifies whether the instance should get a public IP. // Precedence for this setting is as follows: // 1. This field if set // 2. Cluster/flavor setting // 3. Subnet default // +optional PublicIP *bool `json:"publicIP,omitempty"` // AdditionalSecurityGroups is an array of references to security groups that should be applied to the // instance. These security groups would be set in addition to any security groups defined // at the cluster level or in the actuator. It is possible to specify either IDs of Filters. Using Filters // will cause additional requests to AWS API and if tags change the attached security groups might change too. // +optional AdditionalSecurityGroups []AWSResourceReference `json:"additionalSecurityGroups,omitempty"` // FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API. // For this infrastructure provider, the ID is equivalent to an AWS Availability Zone. // If multiple subnets are matched for the availability zone, the first one returned is picked. FailureDomain *string `json:"failureDomain,omitempty"` // Subnet is a reference to the subnet to use for this instance. If not specified, // the cluster subnet will be used. // +optional Subnet *AWSResourceReference `json:"subnet,omitempty"` // SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name) // +optional SSHKeyName *string `json:"sshKeyName,omitempty"` // RootVolume encapsulates the configuration options for the root volume // TODO: Evaluate the need or remove completely. // +optional RootVolume *Volume `json:"rootVolume,omitempty"` // Configuration options for the non root storage volumes. // TODO: Evaluate the need or remove completely. // +optional NonRootVolumes []*Volume `json:"nonRootVolumes,omitempty"` // Configuration options for the containers data storage volumes. // +optional ContainersVolume *Volume `json:"containersVolume,omitempty"` // NetworkInterfaces is a list of ENIs to associate with the instance. // A maximum of 2 may be specified. // +optional // +kubebuilder:validation:MaxItems=2 NetworkInterfaces []string `json:"networkInterfaces,omitempty"` // UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance. // cloud-init has built-in support for gzip-compressed user data // user data stored in aws secret manager is always gzip-compressed. // // +optional UncompressedUserData *bool `json:"uncompressedUserData,omitempty"` // CloudInit defines options related to the bootstrapping systems where // CloudInit is used. // +optional CloudInit CloudInit `json:"cloudInit,omitempty"` // PhysicalNetworkConnectorType is the physical network connector type to use for creating direct network interfaces. Valid values are a physical network connector type (SFP_PLUS or QSFP), or omitted (cluster-api selects a valid physical network interface, default is SFP_PLUS) // +optional // +kubebuilder:validation:Enum:=SFP_PLUS;QSFP;RJ45 PhysicalNetworkConnectorType *string `json:"physicalNetworkConnectorType,omitempty"` // Devices is a device ip list which is assigned by customer to provision machines // +kubebuilder:validation:MinItems=1 Devices []string `json:"devices,omitempty"` // OSFamily is the OS flavor which is used as the node instance OS, currently support ubuntu and bottlerocket // +kubebuilder:validation:Enum:=ubuntu;bottlerocket // +optional OSFamily *OSFamily `json:"osFamily,omitempty"` // Network is the DNI and ip address settings for this machine // +kubebuilder:validation:Required Network AWSSnowNetwork `json:"network"` // PrimaryPublicIP is the primary public ip of machine PrimaryPublicIP *string `json:"primaryPublicIP,omitempty"` // SpotMarketOptions allows users to configure instances to be run using AWS Spot instances. // TODO: Evaluate the need or remove completely. // +optional // SpotMarketOptions *SpotMarketOptions `json:"spotMarketOptions,omitempty"` // Tenancy indicates if instance should run on shared or single-tenant hardware. // +optional // TODO: Evaluate the need or remove completely. // +kubebuilder:validation:Enum:=default;dedicated;host // Tenancy string `json:"tenancy,omitempty"` } // AWSSnowNetwork is network configuration including DNI. We can add more in the future if need type AWSSnowNetwork struct { // DirectNetworkInterfaces is a DNI configuration list what customers want // +kubebuilder:validation:Required // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=8 DirectNetworkInterfaces []AWSSnowDirectNetworkInterface `json:"directNetworkInterfaces"` } // AWSSnowDirectNetworkInterface is configuration of DNIs specified by customers. type AWSSnowDirectNetworkInterface struct { // Index is the index number of DNI, usually starts from 1 which can clarify DNIs in the list // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=8 // +optional Index int `json:"index,omitempty"` // VlanID is the vlan ID assigned by the user to this DNI // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=4095 // +optional VlanID *int32 `json:"vlanID,omitempty"` // DHCP shows whether we assign ip using DHCP for this DNI // If DHCP is true, CAPAS will allocate ip address to this DNI using DHCP // +optional DHCP bool `json:"dhcp,omitempty"` // IPPool is an ip pool which provides a range of ip addresses // If IPPool is not nil, we will allocate an ip address from this pool to this DNI // +optional IPPool *corev1.ObjectReference `json:"ipPool,omitempty"` // Primary indicates whether the DNI is primary or not // +optional Primary bool `json:"primary,omitempty"` } // CloudInit defines options related to the bootstrapping systems where // CloudInit is used. // TODO: Right now, this is a full copy of awsmachine_types.go in cluster-api-provider-aws. // We will likely need to trim this down since we don't have secret store supported on // Snow yet. type CloudInit struct { // InsecureSkipSecretsManager, when set to true will not use AWS Secrets Manager // or AWS Systems Manager Parameter Store to ensure privacy of userdata. // By default, a cloud-init boothook shell script is prepended to download // the userdata from Secrets Manager and additionally delete the secret. InsecureSkipSecretsManager bool `json:"insecureSkipSecretsManager,omitempty"` // SecretCount is the number of secrets used to form the complete secret // +optional SecretCount int32 `json:"secretCount,omitempty"` // SecretPrefix is the prefix for the secret name. This is stored // temporarily, and deleted when the machine registers as a node against // the workload cluster. // +optional SecretPrefix string `json:"secretPrefix,omitempty"` // SecureSecretsBackend, when set to parameter-store will utilize the AWS Systems Manager // Parameter Storage to distribute secrets. By default or with the value of secrets-manager, // will use AWS Secrets Manager instead. // TODO: We don't have one for now. Keep it for future reference. Need to delete. // +optional // +kubebuilder:validation:Enum=secrets-manager;ssm-parameter-store // SecureSecretsBackend SecretBackend `json:"secureSecretsBackend,omitempty"` } // AWSSnowMachineStatus defines the observed state of AWSSnowMachine type AWSSnowMachineStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file // Ready is true when the provider resource is ready. // +optional Ready bool `json:"ready"` // Addresses contains the AWS instance associated addresses. Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"` // InstanceState is the state of the AWS instance for this machine. // +optional InstanceState *InstanceState `json:"instanceState,omitempty"` // FailureReason will be set in the event that there is a terminal problem // reconciling the Machine and will contain a succinct value suitable // for machine interpretation. // // This field should not be set for transitive errors that a controller // faces that are expected to be fixed automatically over // time (like service outages), but instead indicate that something is // fundamentally wrong with the Machine's spec or the configuration of // the controller, and that manual intervention is required. Examples // of terminal errors would be invalid combinations of settings in the // spec, values that are unsupported by the controller, or the // responsible controller itself being critically misconfigured. // // Any transient errors that occur during the reconciliation of Machines // can be added as events to the Machine object and/or logged in the // controller's output. // +optional FailureReason *errors.MachineStatusError `json:"failureReason,omitempty"` // FailureMessage will be set in the event that there is a terminal problem // reconciling the Machine and will contain a more verbose string suitable // for logging and human consumption. // // This field should not be set for transitive errors that a controller // faces that are expected to be fixed automatically over // time (like service outages), but instead indicate that something is // fundamentally wrong with the Machine's spec or the configuration of // the controller, and that manual intervention is required. Examples // of terminal errors would be invalid combinations of settings in the // spec, values that are unsupported by the controller, or the // responsible controller itself being critically misconfigured. // // Any transient errors that occur during the reconciliation of Machines // can be added as events to the Machine object and/or logged in the // controller's output. // +optional FailureMessage *string `json:"failureMessage,omitempty"` // Conditions defines current service state of the AWSSnowMachine. // +optional Conditions clusterv1.Conditions `json:"conditions,omitempty"` } //+kubebuilder:object:root=true //+kubebuilder:subresource:status // AWSSnowMachine is the Schema for the awssnowmachines API type AWSSnowMachine struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` DeviceIP string `json:"deviceIP,omitempty"` Spec AWSSnowMachineSpec `json:"spec,omitempty"` Status AWSSnowMachineStatus `json:"status,omitempty"` } //+kubebuilder:object:root=true // AWSSnowMachineList contains a list of AWSSnowMachine type AWSSnowMachineList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []AWSSnowMachine `json:"items"` } func (r *AWSSnowMachine) GetConditions() clusterv1.Conditions { return r.Status.Conditions } func (r *AWSSnowMachine) SetConditions(conditions clusterv1.Conditions) { r.Status.Conditions = conditions } func (r *AWSSnowMachine) IsControlPlane() bool { _, keyExists := r.ObjectMeta.Labels[clusterv1.MachineControlPlaneNameLabel] return keyExists } func (r *AWSSnowMachine) IsEtcd() bool { _, keyExists := r.ObjectMeta.Labels[MachineEtcdLabelName] return keyExists } func init() { SchemeBuilder.Register(&AWSSnowMachine{}, &AWSSnowMachineList{}) }
350
eks-anywhere
aws
Go
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package snow import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" const ( // PrincipalCredentialRetrievedCondition reports on whether Principal credentials could be retrieved successfully. // A possible scenario, where retrieval is unsuccessful, is when SourcePrincipal is not authorized for assume role. PrincipalCredentialRetrievedCondition clusterv1.ConditionType = "PrincipalCredentialRetrieved" // PrincipalCredentialRetrievalFailedReason used when errors occur during identity credential retrieval. PrincipalCredentialRetrievalFailedReason = "PrincipalCredentialRetrievalFailed" // CredentialProviderBuildFailedReason used when errors occur during building providers before trying credential retrieval. CredentialProviderBuildFailedReason = "CredentialProviderBuildFailed" // PrincipalUsageAllowedCondition reports on whether Principal and all the nested source identities are allowed to be used in the AWSCluster namespace. PrincipalUsageAllowedCondition clusterv1.ConditionType = "PrincipalUsageAllowed" // PrincipalUsageUnauthorizedReason used when AWSCluster namespace is not in the identity's allowed namespaces list. PrincipalUsageUnauthorizedReason = "PrincipalUsageUnauthorized" // SourcePrincipalUsageUnauthorizedReason used when AWSCluster is not in the intersection of source identity allowed namespaces // and allowed namespaces of the identities that source identity depends to. SourcePrincipalUsageUnauthorizedReason = "SourcePrincipalUsageUnauthorized" ) const ( // VpcReadyCondition reports on the successful reconciliation of a VPC. VpcReadyCondition clusterv1.ConditionType = "VpcReady" // VpcCreationStartedReason used when attempting to create a VPC for a managed cluster. // Will not be applied to unmanaged clusters. VpcCreationStartedReason = "VpcCreationStarted" // VpcReconciliationFailedReason used when errors occur during VPC reconciliation. VpcReconciliationFailedReason = "VpcReconciliationFailed" ) const ( // SubnetsReadyCondition reports on the successful reconciliation of subnets. SubnetsReadyCondition clusterv1.ConditionType = "SubnetsReady" // SubnetsReconciliationFailedReason used to report failures while reconciling subnets. SubnetsReconciliationFailedReason = "SubnetsReconciliationFailed" ) const ( // InternetGatewayReadyCondition reports on the successful reconciliation of internet gateways. // Only applicable to managed clusters. InternetGatewayReadyCondition clusterv1.ConditionType = "InternetGatewayReady" // InternetGatewayFailedReason used when errors occur during internet gateway reconciliation. InternetGatewayFailedReason = "InternetGatewayFailed" ) const ( // NatGatewaysReadyCondition reports successful reconciliation of NAT gateways. // Only applicable to managed clusters. NatGatewaysReadyCondition clusterv1.ConditionType = "NatGatewaysReady" // NatGatewaysCreationStartedReason set once when creating new NAT gateways. NatGatewaysCreationStartedReason = "NatGatewaysCreationStarted" // NatGatewaysReconciliationFailedReason used when any errors occur during reconciliation of NAT gateways. NatGatewaysReconciliationFailedReason = "NatGatewaysReconciliationFailed" ) const ( // RouteTablesReadyCondition reports successful reconciliation of route tables. // Only applicable to managed clusters. RouteTablesReadyCondition clusterv1.ConditionType = "RouteTablesReady" // RouteTableReconciliationFailedReason used when any errors occur during reconciliation of route tables. RouteTableReconciliationFailedReason = "RouteTableReconciliationFailed" ) const ( // SecondaryCidrsReadyCondition reports successful reconciliation of secondary CIDR blocks. // Only applicable to managed clusters. SecondaryCidrsReadyCondition clusterv1.ConditionType = "SecondaryCidrsReady" // SecondaryCidrReconciliationFailedReason used when any errors occur during reconciliation of secondary CIDR blocks. SecondaryCidrReconciliationFailedReason = "SecondaryCidrReconciliationFailed" ) const ( // ClusterSecurityGroupsReadyCondition reports successful reconciliation of security groups. ClusterSecurityGroupsReadyCondition clusterv1.ConditionType = "ClusterSecurityGroupsReady" // ClusterSecurityGroupReconciliationFailedReason used when any errors occur during reconciliation of security groups. ClusterSecurityGroupReconciliationFailedReason = "SecurityGroupReconciliationFailed" ) const ( // BastionHostReadyCondition reports whether a bastion host is ready. Depending on the configuration, a cluster // may not require a bastion host and this condition will be skipped. BastionHostReadyCondition clusterv1.ConditionType = "BastionHostReady" // BastionCreationStartedReason used when creating a new bastion host. BastionCreationStartedReason = "BastionCreationStarted" // BastionHostFailedReason used when an error occurs during the creation of a bastion host. BastionHostFailedReason = "BastionHostFailed" ) const ( // LoadBalancerReadyCondition reports on whether a control plane load balancer was successfully reconciled. LoadBalancerReadyCondition clusterv1.ConditionType = "LoadBalancerReady" // WaitForDNSNameReason used while waiting for a DNS name for the API server to be populated. WaitForDNSNameReason = "WaitForDNSName" // WaitForDNSNameResolveReason used while waiting for DNS name to resolve. WaitForDNSNameResolveReason = "WaitForDNSNameResolve" // LoadBalancerFailedReason used when an error occurs during load balancer reconciliation. LoadBalancerFailedReason = "LoadBalancerFailed" ) const ( // InstanceReadyCondition reports on current status of the EC2 instance. Ready indicates the instance is in a Running state. InstanceReadyCondition clusterv1.ConditionType = "InstanceReady" // InstanceNotFoundReason used when the instance couldn't be retrieved. InstanceNotFoundReason = "InstanceNotFound" // InstanceTerminatedReason instance is in a terminated state. InstanceTerminatedReason = "InstanceTerminated" // InstanceStoppedReason instance is in a stopped state. InstanceStoppedReason = "InstanceStopped" // InstanceNotReadyReason used when the instance is in a pending state. InstanceNotReadyReason = "InstanceNotReady" // InstanceProvisionStartedReason set when the provisioning of an instance started. InstanceProvisionStartedReason = "InstanceProvisionStarted" // InstanceProvisionFailedReason used for failures during instance provisioning. InstanceProvisionFailedReason = "InstanceProvisionFailed" // WaitingForClusterInfrastructureReason used when machine is waiting for cluster infrastructure to be ready before proceeding. WaitingForClusterInfrastructureReason = "WaitingForClusterInfrastructure" // WaitingForBootstrapDataReason used when machine is waiting for bootstrap data to be ready before proceeding. WaitingForBootstrapDataReason = "WaitingForBootstrapData" ) // TODO add Snowball Device conditions, e.g., direct-network-interface ready condition const ( // SecurityGroupsReadyCondition indicates the security groups are up to date on the AWSMachine. SecurityGroupsReadyCondition clusterv1.ConditionType = "SecurityGroupsReady" // SecurityGroupsFailedReason used when the security groups could not be synced. SecurityGroupsFailedReason = "SecurityGroupsSyncFailed" ) const ( // ELBAttachedCondition will report true when a control plane is successfully registered with an ELB. // When set to false, severity can be an Error if the subnet is not found or unavailable in the instance's AZ. // Note this is only applicable to control plane machines. // Only applicable to control plane machines. ELBAttachedCondition clusterv1.ConditionType = "ELBAttached" // ELBAttachFailedReason used when a control plane node fails to attach to the ELB. ELBAttachFailedReason = "ELBAttachFailed" // ELBDetachFailedReason used when a control plane node fails to detach from an ELB. ELBDetachFailedReason = "ELBDetachFailed" ) const ( Bottlerocket OSFamily = "bottlerocket" Ubuntu OSFamily = "ubuntu" )
166
eks-anywhere
aws
Go
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package v1beta1 contains API Schema definitions for the infrastructure v1beta1 API group // +kubebuilder:object:generate=true // +groupName=infrastructure.cluster.x-k8s.io package snow import ( "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/scheme" ) var ( // GroupVersion is group version used to register these objects GroupVersion = schema.GroupVersion{Group: "infrastructure.cluster.x-k8s.io", Version: "v1beta1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = SchemeBuilder.AddToScheme )
38
eks-anywhere
aws
Go
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package snow import ( "fmt" "reflect" "k8s.io/apimachinery/pkg/types" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) // Tags defines a map of tags. type Tags map[string]string // Equals returns true if the tags are equal. func (t Tags) Equals(other Tags) bool { return reflect.DeepEqual(t, other) } // HasOwned returns true if the tags contains a tag that marks the resource as owned by the cluster from the perspective of this management tooling. func (t Tags) HasOwned(cluster string) bool { value, ok := t[ClusterTagKey(cluster)] return ok && ResourceLifecycle(value) == ResourceLifecycleOwned } // HasOwned returns true if the tags contains a tag that marks the resource as owned by the cluster from the perspective of the in-tree cloud provider. func (t Tags) HasAWSCloudProviderOwned(cluster string) bool { value, ok := t[ClusterAWSCloudProviderTagKey(cluster)] return ok && ResourceLifecycle(value) == ResourceLifecycleOwned } // GetRole returns the Cluster API role for the tagged resource func (t Tags) GetRole() string { return t[NameAWSClusterAPIRole] } // Difference returns the difference between this map of tags and the other map of tags. // Items are considered equals if key and value are equals. func (t Tags) Difference(other Tags) Tags { res := make(Tags, len(t)) for key, value := range t { if otherValue, ok := other[key]; ok && value == otherValue { continue } res[key] = value } return res } // Merge merges in tags from other. If a tag already exists, it is replaced by the tag in other. func (t Tags) Merge(other Tags) { for k, v := range other { t[k] = v } } // ResourceLifecycle configures the lifecycle of a resource type ResourceLifecycle string const ( // ResourceLifecycleOwned is the value we use when tagging resources to indicate // that the resource is considered owned and managed by the cluster, // and in particular that the lifecycle is tied to the lifecycle of the cluster. ResourceLifecycleOwned = ResourceLifecycle("owned") // ResourceLifecycleShared is the value we use when tagging resources to indicate // that the resource is shared between multiple clusters, and should not be destroyed // if the cluster is destroyed. ResourceLifecycleShared = ResourceLifecycle("shared") // NameKubernetesClusterPrefix is the tag name used by the cloud provider to logically // separate independent cluster resources. We use it to identify which resources we expect // to be permissive about state changes. // logically independent clusters running in the same AZ. // The tag key = NameKubernetesAWSCloudProviderPrefix + clusterID // The tag value is an ownership value NameKubernetesAWSCloudProviderPrefix = "kubernetes.io/cluster/" // NameAWSProviderPrefix is the tag prefix we use to differentiate // cluster-api-provider-aws owned components from other tooling that // uses NameKubernetesClusterPrefix NameAWSProviderPrefix = "sigs.k8s.io/cluster-api-provider-aws-snow/" // NameAWSProviderOwned is the tag name we use to differentiate // cluster-api-provider-aws owned components from other tooling that // uses NameKubernetesClusterPrefix NameAWSProviderOwned = NameAWSProviderPrefix + "cluster/" // NameAWSClusterAPIRole is the tag name we use to mark roles for resources // dedicated to this cluster api provider implementation. NameAWSClusterAPIRole = NameAWSProviderPrefix + "role" NameAWSSubnetAssociation = NameAWSProviderPrefix + "association" SecondarySubnetTagValue = "secondary" // APIServerRoleTagValue describes the value for the apiserver role APIServerRoleTagValue = "apiserver" // BastionRoleTagValue describes the value for the bastion role BastionRoleTagValue = "bastion" // CommonRoleTagValue describes the value for the common role CommonRoleTagValue = "common" // PublicRoleTagValue describes the value for the public role PublicRoleTagValue = "public" // PrivateRoleTagValue describes the value for the private role PrivateRoleTagValue = "private" // MachineNameTagKey is the key for machine name MachineNameTagKey = "MachineName" ) // ClusterTagKey generates the key for resources associated with a cluster. func ClusterTagKey(name string) string { return fmt.Sprintf("%s%s", NameAWSProviderOwned, name) } // ClusterAWSCloudProviderTagKey generates the key for resources associated a cluster's AWS cloud provider. func ClusterAWSCloudProviderTagKey(name string) string { return fmt.Sprintf("%s%s", NameKubernetesAWSCloudProviderPrefix, name) } // BuildParams is used to build tags around an aws resource. type BuildParams struct { // Lifecycle determines the resource lifecycle. Lifecycle ResourceLifecycle // ClusterName is the cluster associated with the resource. ClusterName string // ResourceID is the unique identifier of the resource to be tagged. ResourceID string // Name is the name of the resource, it's applied as the tag "Name" on AWS. // +optional Name *string // Role is the role associated to the resource. // +optional Role *string // Any additional tags to be added to the resource. // +optional Additional Tags } // WithMachineName tags the namespaced machine name // The machine name will be tagged with key "MachineName" func (b BuildParams) WithMachineName(m *clusterv1.Machine) BuildParams { machineNamespacedName := types.NamespacedName{Namespace: m.Namespace, Name: m.Name} b.Additional[MachineNameTagKey] = machineNamespacedName.String() return b } // WithCloudProvider tags the cluster ownership for a resource func (b BuildParams) WithCloudProvider(name string) BuildParams { b.Additional[ClusterAWSCloudProviderTagKey(name)] = string(ResourceLifecycleOwned) return b } // Build builds tags including the cluster tag and returns them in map form. func Build(params BuildParams) Tags { tags := make(Tags) for k, v := range params.Additional { tags[k] = v } tags[ClusterTagKey(params.ClusterName)] = string(params.Lifecycle) if params.Role != nil { tags[NameAWSClusterAPIRole] = *params.Role } if params.Name != nil { tags["Name"] = *params.Name } return tags }
199
eks-anywhere
aws
Go
/* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package snow import ( "k8s.io/apimachinery/pkg/util/sets" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) // AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters. // Only one of ID, ARN or Filters may be specified. Specifying more than one will result in // a validation error. type AWSResourceReference struct { // ID of resource // +optional ID *string `json:"id,omitempty"` // ARN of resource // +optional ARN *string `json:"arn,omitempty"` // Filters is a set of key/value pairs used to identify a resource // They are applied according to the rules defined by the AWS API: // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html // +optional Filters []Filter `json:"filters,omitempty"` } // Filter is a filter used to identify an AWS resource type Filter struct { // Name of the filter. Filter names are case-sensitive. Name string `json:"name"` // Values includes one or more filter values. Filter values are case-sensitive. Values []string `json:"values"` } // Instance describes an AWS instance. type Instance struct { ID string `json:"id"` // The current state of the instance. State InstanceState `json:"instanceState,omitempty"` // The instance type. Type string `json:"type,omitempty"` // The ID of the subnet of the instance. SubnetID string `json:"subnetId,omitempty"` // The ID of the AMI used to launch the instance. ImageID string `json:"imageId,omitempty"` // The name of the SSH key pair. SSHKeyName *string `json:"sshKeyName,omitempty"` // SecurityGroupIDs are one or more security group IDs this instance belongs to. SecurityGroupIDs []string `json:"securityGroupIds,omitempty"` // UserData is the raw data script passed to the instance which is run upon bootstrap. // This field must not be base64 encoded and should only be used when running a new instance. UserData *string `json:"userData,omitempty"` // The name of the IAM instance profile associated with the instance, if applicable. IAMProfile string `json:"iamProfile,omitempty"` // Addresses contains the AWS instance associated addresses. Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"` // The private IPv4 address assigned to the instance. PrivateIP *string `json:"privateIp,omitempty"` // The public IPv4 address assigned to the instance, if applicable. PublicIP *string `json:"publicIp,omitempty"` // Specifies whether enhanced networking with ENA is enabled. ENASupport *bool `json:"enaSupport,omitempty"` // Indicates whether the instance is optimized for Amazon EBS I/O. EBSOptimized *bool `json:"ebsOptimized,omitempty"` // Configuration options for the root storage volume. // +optional RootVolume *Volume `json:"rootVolume,omitempty"` // Configuration options for the non root storage volumes. // +optional NonRootVolumes []*Volume `json:"nonRootVolumes,omitempty"` // Configuration options for the containers data volume // +optional ContainersVolume *Volume `json:"containersVolume,omitempty"` // Specifies ENIs attached to instance NetworkInterfaces []string `json:"networkInterfaces,omitempty"` // The tags associated with the instance. Tags map[string]string `json:"tags,omitempty"` // Availability zone of instance AvailabilityZone string `json:"availabilityZone,omitempty"` // SpotMarketOptions option for configuring instances to be run using AWS Spot instances. // SpotMarketOptions *SpotMarketOptions `json:"spotMarketOptions,omitempty"` // Tenancy indicates if instance should run on shared or single-tenant hardware. // +optional Tenancy string `json:"tenancy,omitempty"` } // Volume encapsulates the configuration options for the storage device // TODO: Trim the fields that do not apply for Snow. type Volume struct { // Device name // +optional DeviceName string `json:"deviceName,omitempty"` // Size specifies size (in Gi) of the storage device. // Must be greater than the image snapshot size or 8 (whichever is greater). // +kubebuilder:validation:Minimum=8 Size int64 `json:"size"` // Type is the type of the volume (sbp1 for capacity-optimized HDD, sbg1 performance-optimized SSD, default is sbp1) // +optional // +kubebuilder:validation:Enum:=sbp1;sbg1 Type string `json:"type,omitempty"` } // InstanceState describes the state of an AWS instance. type InstanceState string var ( // InstanceStatePending is the string representing an instance in a pending state InstanceStatePending = InstanceState("pending") // InstanceStateRunning is the string representing an instance in a running state InstanceStateRunning = InstanceState("running") // InstanceStateShuttingDown is the string representing an instance shutting down InstanceStateShuttingDown = InstanceState("shutting-down") // InstanceStateTerminated is the string representing an instance that has been terminated InstanceStateTerminated = InstanceState("terminated") // InstanceStateStopping is the string representing an instance // that is in the process of being stopped and can be restarted InstanceStateStopping = InstanceState("stopping") // InstanceStateStopped is the string representing an instance // that has been stopped and can be restarted InstanceStateStopped = InstanceState("stopped") // InstanceRunningStates defines the set of states in which an EC2 instance is // running or going to be running soon InstanceRunningStates = sets.NewString( string(InstanceStatePending), string(InstanceStateRunning), ) // InstanceOperationalStates defines the set of states in which an EC2 instance is // or can return to running, and supports all EC2 operations InstanceOperationalStates = InstanceRunningStates.Union( sets.NewString( string(InstanceStateStopping), string(InstanceStateStopped), ), ) // InstanceKnownStates represents all known EC2 instance states InstanceKnownStates = InstanceOperationalStates.Union( sets.NewString( string(InstanceStateShuttingDown), string(InstanceStateTerminated), ), ) ) // AWSSnowMachineTemplateResource describes the data needed to create am AWSSnowMachine from a template type AWSSnowMachineTemplateResource struct { // Spec is the specification of the desired behavior of the machine. Spec AWSSnowMachineSpec `json:"spec"` } type OSFamily string
199
eks-anywhere
aws
Go
//go:build !ignore_autogenerated // +build !ignore_autogenerated // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated by controller-gen. DO NOT EDIT. package snow import ( "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/errors" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSResourceReference) DeepCopyInto(out *AWSResourceReference) { *out = *in if in.ID != nil { in, out := &in.ID, &out.ID *out = new(string) **out = **in } if in.ARN != nil { in, out := &in.ARN, &out.ARN *out = new(string) **out = **in } if in.Filters != nil { in, out := &in.Filters, &out.Filters *out = make([]Filter, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceReference. func (in *AWSResourceReference) DeepCopy() *AWSResourceReference { if in == nil { return nil } out := new(AWSResourceReference) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSSnowCluster) DeepCopyInto(out *AWSSnowCluster) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSnowCluster. func (in *AWSSnowCluster) DeepCopy() *AWSSnowCluster { if in == nil { return nil } out := new(AWSSnowCluster) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *AWSSnowCluster) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSSnowClusterList) DeepCopyInto(out *AWSSnowClusterList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]AWSSnowCluster, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSnowClusterList. func (in *AWSSnowClusterList) DeepCopy() *AWSSnowClusterList { if in == nil { return nil } out := new(AWSSnowClusterList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *AWSSnowClusterList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSSnowClusterSpec) DeepCopyInto(out *AWSSnowClusterSpec) { *out = *in if in.SSHKeyName != nil { in, out := &in.SSHKeyName, &out.SSHKeyName *out = new(string) **out = **in } out.ControlPlaneEndpoint = in.ControlPlaneEndpoint if in.PhysicalNetworkConnectorType != nil { in, out := &in.PhysicalNetworkConnectorType, &out.PhysicalNetworkConnectorType *out = new(string) **out = **in } if in.IdentityRef != nil { in, out := &in.IdentityRef, &out.IdentityRef *out = new(AWSSnowIdentityReference) **out = **in } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSnowClusterSpec. func (in *AWSSnowClusterSpec) DeepCopy() *AWSSnowClusterSpec { if in == nil { return nil } out := new(AWSSnowClusterSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSSnowClusterStatus) DeepCopyInto(out *AWSSnowClusterStatus) { *out = *in if in.FailureDomains != nil { in, out := &in.FailureDomains, &out.FailureDomains *out = make(v1beta1.FailureDomains, len(*in)) for key, val := range *in { (*out)[key] = *val.DeepCopy() } } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make(v1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSnowClusterStatus. func (in *AWSSnowClusterStatus) DeepCopy() *AWSSnowClusterStatus { if in == nil { return nil } out := new(AWSSnowClusterStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSSnowDirectNetworkInterface) DeepCopyInto(out *AWSSnowDirectNetworkInterface) { *out = *in if in.VlanID != nil { in, out := &in.VlanID, &out.VlanID *out = new(int32) **out = **in } if in.IPPool != nil { in, out := &in.IPPool, &out.IPPool *out = new(v1.ObjectReference) **out = **in } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSnowDirectNetworkInterface. func (in *AWSSnowDirectNetworkInterface) DeepCopy() *AWSSnowDirectNetworkInterface { if in == nil { return nil } out := new(AWSSnowDirectNetworkInterface) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSSnowIPPool) DeepCopyInto(out *AWSSnowIPPool) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) out.Status = in.Status } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSnowIPPool. func (in *AWSSnowIPPool) DeepCopy() *AWSSnowIPPool { if in == nil { return nil } out := new(AWSSnowIPPool) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *AWSSnowIPPool) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSSnowIPPoolList) DeepCopyInto(out *AWSSnowIPPoolList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]AWSSnowIPPool, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSnowIPPoolList. func (in *AWSSnowIPPoolList) DeepCopy() *AWSSnowIPPoolList { if in == nil { return nil } out := new(AWSSnowIPPoolList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *AWSSnowIPPoolList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSSnowIPPoolSpec) DeepCopyInto(out *AWSSnowIPPoolSpec) { *out = *in if in.IPPools != nil { in, out := &in.IPPools, &out.IPPools *out = make([]IPPool, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSnowIPPoolSpec. func (in *AWSSnowIPPoolSpec) DeepCopy() *AWSSnowIPPoolSpec { if in == nil { return nil } out := new(AWSSnowIPPoolSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSSnowIPPoolStatus) DeepCopyInto(out *AWSSnowIPPoolStatus) { *out = *in } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSnowIPPoolStatus. func (in *AWSSnowIPPoolStatus) DeepCopy() *AWSSnowIPPoolStatus { if in == nil { return nil } out := new(AWSSnowIPPoolStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSSnowIdentityReference) DeepCopyInto(out *AWSSnowIdentityReference) { *out = *in } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSnowIdentityReference. func (in *AWSSnowIdentityReference) DeepCopy() *AWSSnowIdentityReference { if in == nil { return nil } out := new(AWSSnowIdentityReference) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSSnowMachine) DeepCopyInto(out *AWSSnowMachine) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSnowMachine. func (in *AWSSnowMachine) DeepCopy() *AWSSnowMachine { if in == nil { return nil } out := new(AWSSnowMachine) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *AWSSnowMachine) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSSnowMachineList) DeepCopyInto(out *AWSSnowMachineList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]AWSSnowMachine, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSnowMachineList. func (in *AWSSnowMachineList) DeepCopy() *AWSSnowMachineList { if in == nil { return nil } out := new(AWSSnowMachineList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *AWSSnowMachineList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSSnowMachineSpec) DeepCopyInto(out *AWSSnowMachineSpec) { *out = *in if in.ProviderID != nil { in, out := &in.ProviderID, &out.ProviderID *out = new(string) **out = **in } if in.InstanceID != nil { in, out := &in.InstanceID, &out.InstanceID *out = new(string) **out = **in } in.AMI.DeepCopyInto(&out.AMI) if in.PublicIP != nil { in, out := &in.PublicIP, &out.PublicIP *out = new(bool) **out = **in } if in.AdditionalSecurityGroups != nil { in, out := &in.AdditionalSecurityGroups, &out.AdditionalSecurityGroups *out = make([]AWSResourceReference, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.FailureDomain != nil { in, out := &in.FailureDomain, &out.FailureDomain *out = new(string) **out = **in } if in.Subnet != nil { in, out := &in.Subnet, &out.Subnet *out = new(AWSResourceReference) (*in).DeepCopyInto(*out) } if in.SSHKeyName != nil { in, out := &in.SSHKeyName, &out.SSHKeyName *out = new(string) **out = **in } if in.RootVolume != nil { in, out := &in.RootVolume, &out.RootVolume *out = new(Volume) **out = **in } if in.NonRootVolumes != nil { in, out := &in.NonRootVolumes, &out.NonRootVolumes *out = make([]*Volume, len(*in)) for i := range *in { if (*in)[i] != nil { in, out := &(*in)[i], &(*out)[i] *out = new(Volume) **out = **in } } } if in.ContainersVolume != nil { in, out := &in.ContainersVolume, &out.ContainersVolume *out = new(Volume) **out = **in } if in.NetworkInterfaces != nil { in, out := &in.NetworkInterfaces, &out.NetworkInterfaces *out = make([]string, len(*in)) copy(*out, *in) } if in.UncompressedUserData != nil { in, out := &in.UncompressedUserData, &out.UncompressedUserData *out = new(bool) **out = **in } out.CloudInit = in.CloudInit if in.PhysicalNetworkConnectorType != nil { in, out := &in.PhysicalNetworkConnectorType, &out.PhysicalNetworkConnectorType *out = new(string) **out = **in } if in.Devices != nil { in, out := &in.Devices, &out.Devices *out = make([]string, len(*in)) copy(*out, *in) } if in.OSFamily != nil { in, out := &in.OSFamily, &out.OSFamily *out = new(OSFamily) **out = **in } in.Network.DeepCopyInto(&out.Network) if in.PrimaryPublicIP != nil { in, out := &in.PrimaryPublicIP, &out.PrimaryPublicIP *out = new(string) **out = **in } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSnowMachineSpec. func (in *AWSSnowMachineSpec) DeepCopy() *AWSSnowMachineSpec { if in == nil { return nil } out := new(AWSSnowMachineSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSSnowMachineStatus) DeepCopyInto(out *AWSSnowMachineStatus) { *out = *in if in.Addresses != nil { in, out := &in.Addresses, &out.Addresses *out = make([]v1beta1.MachineAddress, len(*in)) copy(*out, *in) } if in.InstanceState != nil { in, out := &in.InstanceState, &out.InstanceState *out = new(InstanceState) **out = **in } if in.FailureReason != nil { in, out := &in.FailureReason, &out.FailureReason *out = new(errors.MachineStatusError) **out = **in } if in.FailureMessage != nil { in, out := &in.FailureMessage, &out.FailureMessage *out = new(string) **out = **in } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make(v1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSnowMachineStatus. func (in *AWSSnowMachineStatus) DeepCopy() *AWSSnowMachineStatus { if in == nil { return nil } out := new(AWSSnowMachineStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSSnowMachineTemplate) DeepCopyInto(out *AWSSnowMachineTemplate) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSnowMachineTemplate. func (in *AWSSnowMachineTemplate) DeepCopy() *AWSSnowMachineTemplate { if in == nil { return nil } out := new(AWSSnowMachineTemplate) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *AWSSnowMachineTemplate) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSSnowMachineTemplateList) DeepCopyInto(out *AWSSnowMachineTemplateList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]AWSSnowMachineTemplate, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSnowMachineTemplateList. func (in *AWSSnowMachineTemplateList) DeepCopy() *AWSSnowMachineTemplateList { if in == nil { return nil } out := new(AWSSnowMachineTemplateList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *AWSSnowMachineTemplateList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSSnowMachineTemplateResource) DeepCopyInto(out *AWSSnowMachineTemplateResource) { *out = *in in.Spec.DeepCopyInto(&out.Spec) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSnowMachineTemplateResource. func (in *AWSSnowMachineTemplateResource) DeepCopy() *AWSSnowMachineTemplateResource { if in == nil { return nil } out := new(AWSSnowMachineTemplateResource) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSSnowMachineTemplateSpec) DeepCopyInto(out *AWSSnowMachineTemplateSpec) { *out = *in in.Template.DeepCopyInto(&out.Template) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSnowMachineTemplateSpec. func (in *AWSSnowMachineTemplateSpec) DeepCopy() *AWSSnowMachineTemplateSpec { if in == nil { return nil } out := new(AWSSnowMachineTemplateSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSSnowNetwork) DeepCopyInto(out *AWSSnowNetwork) { *out = *in if in.DirectNetworkInterfaces != nil { in, out := &in.DirectNetworkInterfaces, &out.DirectNetworkInterfaces *out = make([]AWSSnowDirectNetworkInterface, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSSnowNetwork. func (in *AWSSnowNetwork) DeepCopy() *AWSSnowNetwork { if in == nil { return nil } out := new(AWSSnowNetwork) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BuildParams) DeepCopyInto(out *BuildParams) { *out = *in if in.Name != nil { in, out := &in.Name, &out.Name *out = new(string) **out = **in } if in.Role != nil { in, out := &in.Role, &out.Role *out = new(string) **out = **in } if in.Additional != nil { in, out := &in.Additional, &out.Additional *out = make(Tags, len(*in)) for key, val := range *in { (*out)[key] = val } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildParams. func (in *BuildParams) DeepCopy() *BuildParams { if in == nil { return nil } out := new(BuildParams) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CloudInit) DeepCopyInto(out *CloudInit) { *out = *in } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudInit. func (in *CloudInit) DeepCopy() *CloudInit { if in == nil { return nil } out := new(CloudInit) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Filter) DeepCopyInto(out *Filter) { *out = *in if in.Values != nil { in, out := &in.Values, &out.Values *out = make([]string, len(*in)) copy(*out, *in) } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter. func (in *Filter) DeepCopy() *Filter { if in == nil { return nil } out := new(Filter) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPPool) DeepCopyInto(out *IPPool) { *out = *in if in.IPStart != nil { in, out := &in.IPStart, &out.IPStart *out = new(string) **out = **in } if in.IPEnd != nil { in, out := &in.IPEnd, &out.IPEnd *out = new(string) **out = **in } if in.Subnet != nil { in, out := &in.Subnet, &out.Subnet *out = new(string) **out = **in } if in.Gateway != nil { in, out := &in.Gateway, &out.Gateway *out = new(string) **out = **in } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPPool. func (in *IPPool) DeepCopy() *IPPool { if in == nil { return nil } out := new(IPPool) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Instance) DeepCopyInto(out *Instance) { *out = *in if in.SSHKeyName != nil { in, out := &in.SSHKeyName, &out.SSHKeyName *out = new(string) **out = **in } if in.SecurityGroupIDs != nil { in, out := &in.SecurityGroupIDs, &out.SecurityGroupIDs *out = make([]string, len(*in)) copy(*out, *in) } if in.UserData != nil { in, out := &in.UserData, &out.UserData *out = new(string) **out = **in } if in.Addresses != nil { in, out := &in.Addresses, &out.Addresses *out = make([]v1beta1.MachineAddress, len(*in)) copy(*out, *in) } if in.PrivateIP != nil { in, out := &in.PrivateIP, &out.PrivateIP *out = new(string) **out = **in } if in.PublicIP != nil { in, out := &in.PublicIP, &out.PublicIP *out = new(string) **out = **in } if in.ENASupport != nil { in, out := &in.ENASupport, &out.ENASupport *out = new(bool) **out = **in } if in.EBSOptimized != nil { in, out := &in.EBSOptimized, &out.EBSOptimized *out = new(bool) **out = **in } if in.RootVolume != nil { in, out := &in.RootVolume, &out.RootVolume *out = new(Volume) **out = **in } if in.NonRootVolumes != nil { in, out := &in.NonRootVolumes, &out.NonRootVolumes *out = make([]*Volume, len(*in)) for i := range *in { if (*in)[i] != nil { in, out := &(*in)[i], &(*out)[i] *out = new(Volume) **out = **in } } } if in.ContainersVolume != nil { in, out := &in.ContainersVolume, &out.ContainersVolume *out = new(Volume) **out = **in } if in.NetworkInterfaces != nil { in, out := &in.NetworkInterfaces, &out.NetworkInterfaces *out = make([]string, len(*in)) copy(*out, *in) } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make(map[string]string, len(*in)) for key, val := range *in { (*out)[key] = val } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance. func (in *Instance) DeepCopy() *Instance { if in == nil { return nil } out := new(Instance) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in Tags) DeepCopyInto(out *Tags) { { in := &in *out = make(Tags, len(*in)) for key, val := range *in { (*out)[key] = val } } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tags. func (in Tags) DeepCopy() Tags { if in == nil { return nil } out := new(Tags) in.DeepCopyInto(out) return *out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Volume) DeepCopyInto(out *Volume) { *out = *in } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. func (in *Volume) DeepCopy() *Volume { if in == nil { return nil } out := new(Volume) in.DeepCopyInto(out) return out }
860
eks-anywhere
aws
Go
// Code generated by MockGen. DO NOT EDIT. // Source: pkg/providers/snow/aws.go // Package mocks is a generated GoMock package. package mocks import ( context "context" reflect "reflect" aws "github.com/aws/eks-anywhere/pkg/aws" gomock "github.com/golang/mock/gomock" ) // MockAwsClient is a mock of AwsClient interface. type MockAwsClient struct { ctrl *gomock.Controller recorder *MockAwsClientMockRecorder } // MockAwsClientMockRecorder is the mock recorder for MockAwsClient. type MockAwsClientMockRecorder struct { mock *MockAwsClient } // NewMockAwsClient creates a new mock instance. func NewMockAwsClient(ctrl *gomock.Controller) *MockAwsClient { mock := &MockAwsClient{ctrl: ctrl} mock.recorder = &MockAwsClientMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockAwsClient) EXPECT() *MockAwsClientMockRecorder { return m.recorder } // EC2ImageExists mocks base method. func (m *MockAwsClient) EC2ImageExists(ctx context.Context, imageID string) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EC2ImageExists", ctx, imageID) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 } // EC2ImageExists indicates an expected call of EC2ImageExists. func (mr *MockAwsClientMockRecorder) EC2ImageExists(ctx, imageID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EC2ImageExists", reflect.TypeOf((*MockAwsClient)(nil).EC2ImageExists), ctx, imageID) } // EC2ImportKeyPair mocks base method. func (m *MockAwsClient) EC2ImportKeyPair(ctx context.Context, keyName string, keyMaterial []byte) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EC2ImportKeyPair", ctx, keyName, keyMaterial) ret0, _ := ret[0].(error) return ret0 } // EC2ImportKeyPair indicates an expected call of EC2ImportKeyPair. func (mr *MockAwsClientMockRecorder) EC2ImportKeyPair(ctx, keyName, keyMaterial interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EC2ImportKeyPair", reflect.TypeOf((*MockAwsClient)(nil).EC2ImportKeyPair), ctx, keyName, keyMaterial) } // EC2InstanceTypes mocks base method. func (m *MockAwsClient) EC2InstanceTypes(ctx context.Context) ([]aws.EC2InstanceType, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EC2InstanceTypes", ctx) ret0, _ := ret[0].([]aws.EC2InstanceType) ret1, _ := ret[1].(error) return ret0, ret1 } // EC2InstanceTypes indicates an expected call of EC2InstanceTypes. func (mr *MockAwsClientMockRecorder) EC2InstanceTypes(ctx interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EC2InstanceTypes", reflect.TypeOf((*MockAwsClient)(nil).EC2InstanceTypes), ctx) } // EC2KeyNameExists mocks base method. func (m *MockAwsClient) EC2KeyNameExists(ctx context.Context, keyName string) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EC2KeyNameExists", ctx, keyName) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 } // EC2KeyNameExists indicates an expected call of EC2KeyNameExists. func (mr *MockAwsClientMockRecorder) EC2KeyNameExists(ctx, keyName interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EC2KeyNameExists", reflect.TypeOf((*MockAwsClient)(nil).EC2KeyNameExists), ctx, keyName) } // IsSnowballDeviceUnlocked mocks base method. func (m *MockAwsClient) IsSnowballDeviceUnlocked(ctx context.Context) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "IsSnowballDeviceUnlocked", ctx) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 } // IsSnowballDeviceUnlocked indicates an expected call of IsSnowballDeviceUnlocked. func (mr *MockAwsClientMockRecorder) IsSnowballDeviceUnlocked(ctx interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsSnowballDeviceUnlocked", reflect.TypeOf((*MockAwsClient)(nil).IsSnowballDeviceUnlocked), ctx) } // SnowballDeviceSoftwareVersion mocks base method. func (m *MockAwsClient) SnowballDeviceSoftwareVersion(ctx context.Context) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SnowballDeviceSoftwareVersion", ctx) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // SnowballDeviceSoftwareVersion indicates an expected call of SnowballDeviceSoftwareVersion. func (mr *MockAwsClientMockRecorder) SnowballDeviceSoftwareVersion(ctx interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SnowballDeviceSoftwareVersion", reflect.TypeOf((*MockAwsClient)(nil).SnowballDeviceSoftwareVersion), ctx) } // MockLocalIMDSClient is a mock of LocalIMDSClient interface. type MockLocalIMDSClient struct { ctrl *gomock.Controller recorder *MockLocalIMDSClientMockRecorder } // MockLocalIMDSClientMockRecorder is the mock recorder for MockLocalIMDSClient. type MockLocalIMDSClientMockRecorder struct { mock *MockLocalIMDSClient } // NewMockLocalIMDSClient creates a new mock instance. func NewMockLocalIMDSClient(ctrl *gomock.Controller) *MockLocalIMDSClient { mock := &MockLocalIMDSClient{ctrl: ctrl} mock.recorder = &MockLocalIMDSClientMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockLocalIMDSClient) EXPECT() *MockLocalIMDSClientMockRecorder { return m.recorder } // EC2InstanceIP mocks base method. func (m *MockLocalIMDSClient) EC2InstanceIP(ctx context.Context) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EC2InstanceIP", ctx) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // EC2InstanceIP indicates an expected call of EC2InstanceIP. func (mr *MockLocalIMDSClientMockRecorder) EC2InstanceIP(ctx interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EC2InstanceIP", reflect.TypeOf((*MockLocalIMDSClient)(nil).EC2InstanceIP), ctx) }
164
eks-anywhere
aws
Go
// Code generated by MockGen. DO NOT EDIT. // Source: pkg/providers/snow/snow.go // Package mocks is a generated GoMock package. package mocks import ( context "context" reflect "reflect" kubernetes "github.com/aws/eks-anywhere/pkg/clients/kubernetes" gomock "github.com/golang/mock/gomock" runtime "k8s.io/apimachinery/pkg/runtime" ) // MockKubeUnAuthClient is a mock of KubeUnAuthClient interface. type MockKubeUnAuthClient struct { ctrl *gomock.Controller recorder *MockKubeUnAuthClientMockRecorder } // MockKubeUnAuthClientMockRecorder is the mock recorder for MockKubeUnAuthClient. type MockKubeUnAuthClientMockRecorder struct { mock *MockKubeUnAuthClient } // NewMockKubeUnAuthClient creates a new mock instance. func NewMockKubeUnAuthClient(ctrl *gomock.Controller) *MockKubeUnAuthClient { mock := &MockKubeUnAuthClient{ctrl: ctrl} mock.recorder = &MockKubeUnAuthClientMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockKubeUnAuthClient) EXPECT() *MockKubeUnAuthClientMockRecorder { return m.recorder } // Apply mocks base method. func (m *MockKubeUnAuthClient) Apply(ctx context.Context, kubeconfig string, obj runtime.Object) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Apply", ctx, kubeconfig, obj) ret0, _ := ret[0].(error) return ret0 } // Apply indicates an expected call of Apply. func (mr *MockKubeUnAuthClientMockRecorder) Apply(ctx, kubeconfig, obj interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Apply", reflect.TypeOf((*MockKubeUnAuthClient)(nil).Apply), ctx, kubeconfig, obj) } // KubeconfigClient mocks base method. func (m *MockKubeUnAuthClient) KubeconfigClient(kubeconfig string) kubernetes.Client { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "KubeconfigClient", kubeconfig) ret0, _ := ret[0].(kubernetes.Client) return ret0 } // KubeconfigClient indicates an expected call of KubeconfigClient. func (mr *MockKubeUnAuthClientMockRecorder) KubeconfigClient(kubeconfig interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KubeconfigClient", reflect.TypeOf((*MockKubeUnAuthClient)(nil).KubeconfigClient), kubeconfig) }
66
eks-anywhere
aws
Go
// Code generated by MockGen. DO NOT EDIT. // Source: pkg/providers/snow/clientregistry.go // Package mocks is a generated GoMock package. package mocks import ( context "context" reflect "reflect" snow "github.com/aws/eks-anywhere/pkg/providers/snow" gomock "github.com/golang/mock/gomock" ) // MockClientRegistry is a mock of ClientRegistry interface. type MockClientRegistry struct { ctrl *gomock.Controller recorder *MockClientRegistryMockRecorder } // MockClientRegistryMockRecorder is the mock recorder for MockClientRegistry. type MockClientRegistryMockRecorder struct { mock *MockClientRegistry } // NewMockClientRegistry creates a new mock instance. func NewMockClientRegistry(ctrl *gomock.Controller) *MockClientRegistry { mock := &MockClientRegistry{ctrl: ctrl} mock.recorder = &MockClientRegistryMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockClientRegistry) EXPECT() *MockClientRegistryMockRecorder { return m.recorder } // Get mocks base method. func (m *MockClientRegistry) Get(ctx context.Context) (snow.AwsClientMap, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", ctx) ret0, _ := ret[0].(snow.AwsClientMap) ret1, _ := ret[1].(error) return ret0, ret1 } // Get indicates an expected call of Get. func (mr *MockClientRegistryMockRecorder) Get(ctx interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockClientRegistry)(nil).Get), ctx) }
52
eks-anywhere
aws
Go
// Code generated by MockGen. DO NOT EDIT. // Source: pkg/providers/snow/defaults.go // Package mocks is a generated GoMock package. package mocks import ( reflect "reflect" filewriter "github.com/aws/eks-anywhere/pkg/filewriter" gomock "github.com/golang/mock/gomock" ) // MockSshKeyGenerator is a mock of SshKeyGenerator interface. type MockSshKeyGenerator struct { ctrl *gomock.Controller recorder *MockSshKeyGeneratorMockRecorder } // MockSshKeyGeneratorMockRecorder is the mock recorder for MockSshKeyGenerator. type MockSshKeyGeneratorMockRecorder struct { mock *MockSshKeyGenerator } // NewMockSshKeyGenerator creates a new mock instance. func NewMockSshKeyGenerator(ctrl *gomock.Controller) *MockSshKeyGenerator { mock := &MockSshKeyGenerator{ctrl: ctrl} mock.recorder = &MockSshKeyGeneratorMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockSshKeyGenerator) EXPECT() *MockSshKeyGeneratorMockRecorder { return m.recorder } // GenerateSSHAuthKey mocks base method. func (m *MockSshKeyGenerator) GenerateSSHAuthKey(arg0 filewriter.FileWriter) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GenerateSSHAuthKey", arg0) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // GenerateSSHAuthKey indicates an expected call of GenerateSSHAuthKey. func (mr *MockSshKeyGeneratorMockRecorder) GenerateSSHAuthKey(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateSSHAuthKey", reflect.TypeOf((*MockSshKeyGenerator)(nil).GenerateSSHAuthKey), arg0) }
51
eks-anywhere
aws
Go
package reconciler import ( "bytes" "context" "net" awstypes "github.com/aws/aws-sdk-go-v2/aws" awsconfig "github.com/aws/aws-sdk-go-v2/config" awscredentials "github.com/aws/aws-sdk-go-v2/credentials" "github.com/pkg/errors" "gopkg.in/ini.v1" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/aws/eks-anywhere/pkg/aws" "github.com/aws/eks-anywhere/pkg/providers/snow" ) type AwsClientBuilder struct { client client.Client } func NewAwsClientBuilder(client client.Client) *AwsClientBuilder { return &AwsClientBuilder{ client: client, } } func (b *AwsClientBuilder) Get(ctx context.Context) (snow.AwsClientMap, error) { // Setting the aws client map in validator on every reconcile based on the secrets at that point of time credentials, certificates, err := getSnowCredentials(ctx, b.client) if err != nil { return nil, errors.Wrap(err, "getting snow credentials") } clients, err := createAwsClients(ctx, credentials, certificates) if err != nil { return nil, err } return snow.NewAwsClientMap(clients), nil } type credentialConfiguration struct { AccessKey string `ini:"aws_access_key_id"` SecretKey string `ini:"aws_secret_access_key"` Region string `ini:"region"` } func createAwsClients(ctx context.Context, credentials []byte, certificates []byte) (aws.Clients, error) { var deviceIps []string credsCfg, err := ini.Load(credentials) if err != nil { return nil, errors.Wrap(err, "loading values from credentials") } for _, ip := range credsCfg.SectionStrings() { if net.ParseIP(ip) != nil { deviceIps = append(deviceIps, ip) } } deviceClientMap := make(aws.Clients, len(deviceIps)) for _, ip := range deviceIps { ipCfg, err := parseIpConfiguration(credsCfg, ip) if err != nil { return nil, errors.Wrapf(err, "parsing configuration for %v", ip) } clientCfg, err := awsconfig.LoadDefaultConfig(ctx, awsconfig.WithCustomCABundle(bytes.NewReader(certificates)), awsconfig.WithRegion(ipCfg.Region), awsconfig.WithCredentialsProvider(awscredentials.StaticCredentialsProvider{ Value: awstypes.Credentials{ AccessKeyID: ipCfg.AccessKey, SecretAccessKey: ipCfg.SecretKey, }, }), awsconfig.WithEndpointResolverWithOptions(aws.SnowEndpointResolver(ip)), ) if err != nil { return nil, errors.Wrap(err, "setting up aws client") } deviceClientMap[ip] = aws.NewClientFromConfig(clientCfg) } return deviceClientMap, nil } func parseIpConfiguration(credsCfg *ini.File, ip string) (*credentialConfiguration, error) { var config credentialConfiguration err := credsCfg.Section(ip).StrictMapTo(&config) if err != nil { return nil, err } if len(config.AccessKey) == 0 { return nil, errors.New("unable to set aws_access_key_id") } if len(config.SecretKey) == 0 { return nil, errors.New("unable to set aws_secret_access_key") } if len(config.Region) == 0 { return nil, errors.New("unable to set region") } return &config, nil }
104
eks-anywhere
aws
Go
package reconciler_test import ( "context" "testing" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/providers/snow/reconciler" ) const caBundle = `-----BEGIN CERTIFICATE----- MIIDXjCCAkagAwIBAgIIb5m0RljJCMEwDQYJKoZIhvcNAQENBQAwODE2MDQGA1UE AwwtSklELTIwNjg0MzQyMDAwMi0xOTItMTY4LTEtMjM1LTIyLTAxLTA2LTIyLTA0 MB4XDTIxMDExMTIyMDc1OFoXDTI1MTIxNjIyMDc1OFowODE2MDQGA1UEAwwtSklE LTIwNjg0MzQyMDAwMi0xOTItMTY4LTEtMjM1LTIyLTAxLTA2LTIyLTA0MIIBIjAN BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmOTQDBfBtPcVDFg/a59dk+rYrPRU f5zl7JgFAEw1n82SkbNm4srwloj8pCuD1nJAlN+3LKoiby9jU8ZqoQKqppJaK1QK dv27JYNlWorG9r6KrFkiETn2cxuAwcRBvq4UF76WdNr7zFjI108byPp9Pd0mxKiQ 6WVaxcKX9AEcarB/GfidHO95Aay6tiBU1SQvBJro3L1/UFu5STSpZai9zx+VkWTJ D0JXh7eLF4yL0N1oU0hX2CGDxDz4VlJmBOvbnRuwsOruRMtUFRUy59cPzr//4fjd 4S7AYbeOVPwEP7q19NZ6+P7E71jTq1rz8RhAnW/JcbTKS0KqgBUPz0U4qQIDAQAB o2wwajAMBgNVHRMEBTADAQH/MB0GA1UdDgQWBBQTaZzL2goqq7/MbJEfNRuzbwih kTA7BgNVHREENDAyhjBJRDpKSUQtMjA2ODQzNDIwMDAyLTE5Mi0xNjgtMS0yMzUt MjItMDEtMDYtMjItMDQwDQYJKoZIhvcNAQENBQADggEBAEzel+UsphUx49EVAyWB PzSzoE7X62fg/b4gU7ifFHpWpYpAPsbapz9/Tywc4TGRItfctXYZsjchJKiutGU2 zX4rt1NSHkx72iMl3obQ2jQmTD8f9LyCqya+QM4CA74kk6v2ng1EiwMYvQlTvWY4 FEWv21yNRs2yiRuHWjRYH4TF54cCoDQGpFpsOFi0L4V/yo1XuimSLx2vvKZ0lCNt KxC1oCgCxxNkOa/6iLk6qVANoX5KIVsataVhvGK+9mwWn8+dnMFneMiWd/jvi+dh eywldVELBWRKELDdBc9Xb4i5BETF6dUlmvpWgpOXXO3uJlIRGZCVFLsgQ511oMxM rEA= -----END CERTIFICATE----- ` const validCredentials = `[1.2.3.4] aws_access_key_id = ABCDEFGHIJKLMNOPQR2T aws_secret_access_key = AfSD7sYz/TBZtzkReBl6PuuISzJ2WtNkeePw+nNzJ region = snow [1.2.3.5] aws_access_key_id = ABCDEFGHIJKLMNOPQR2T aws_secret_access_key = AfSD7sYz/TBZtzkReBl6PuuISzJ2WtNkeePw+nNzJ region = snow ` const invalidCredentialsIniFormat = `1.2.3.5 aws_access_key_id = ABCDEFGHIJKLMNOPQR2T aws_secret_access_key = AfSD7sYz/TBZtzkReBl6PuuISzJ2WtNkeePw+nNzJ region = snow ` const invalidCredentialsMissingAccessKey = `[1.2.3.5] aws_secret_access_key = AfSD7sYz/TBZtzkReBl6PuuISzJ2WtNkeePw+nNzJ region = snow ` const invalidCredentialsMissingSecretKey = `[1.2.3.5] aws_access_key_id = ABCDEFGHIJKLMNOPQR2T region = snow ` const invalidCredentialsMissingRegion = `[1.2.3.5] aws_access_key_id = ABCDEFGHIJKLMNOPQR2T aws_secret_access_key = AfSD7sYz/TBZtzkReBl6PuuISzJ2WtNkeePw+nNzJ ` func TestBuildSnowAwsClientMap(t *testing.T) { tests := []struct { name string secretContent string secret *corev1.Secret wantErr string }{ { name: "valid", secret: testSecret(validCredentials), wantErr: "", }, { name: "invalid ini format", secret: testSecret(invalidCredentialsIniFormat), wantErr: "loading values from credentials: key-value delimiter not found: 1.2.3.5", }, { name: "missing access key", secret: testSecret(invalidCredentialsMissingAccessKey), wantErr: "parsing configuration for 1.2.3.5: unable to set aws_access_key_id", }, { name: "missing secret key", secret: testSecret(invalidCredentialsMissingSecretKey), wantErr: "parsing configuration for 1.2.3.5: unable to set aws_secret_access_key", }, { name: "missing region", secret: testSecret(invalidCredentialsMissingRegion), wantErr: "parsing configuration for 1.2.3.5: unable to set region", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) ctx := context.Background() objs := []runtime.Object{tt.secret} cb := fake.NewClientBuilder() cl := cb.WithRuntimeObjects(objs...).Build() clientBuilder := reconciler.NewAwsClientBuilder(cl) _, err := clientBuilder.Get(ctx) if tt.wantErr == "" { g.Expect(err).To(Succeed()) } else { g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr))) } }) } } func TestBuildSnowAwsClientMapNonexistentSecret(t *testing.T) { g := NewWithT(t) ctx := context.Background() cb := fake.NewClientBuilder() cl := cb.Build() clientBuilder := reconciler.NewAwsClientBuilder(cl) _, err := clientBuilder.Get(ctx) g.Expect(err).To(MatchError(ContainSubstring("getting snow credentials: secrets \"capas-manager-bootstrap-credentials\" not found"))) } func testSecret(creds string) *corev1.Secret { return &corev1.Secret{ TypeMeta: metav1.TypeMeta{ Kind: "Secret", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ Namespace: constants.CapasSystemNamespace, Name: reconciler.BoostrapSecretName, }, Data: map[string][]byte{ "credentials": []byte(creds), "ca-bundle": []byte(caBundle), }, } }
154
eks-anywhere
aws
Go
package reconciler import ( "context" apiv1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/aws/eks-anywhere/pkg/constants" ) const BoostrapSecretName = "capas-manager-bootstrap-credentials" func getSnowCredentials(ctx context.Context, cli client.Client) (credentials, caBundle []byte, err error) { secret := &apiv1.Secret{} secretKey := client.ObjectKey{ Namespace: constants.CapasSystemNamespace, Name: BoostrapSecretName, } if err = cli.Get(ctx, secretKey, secret); err != nil { return nil, nil, err } return secret.Data["credentials"], secret.Data["ca-bundle"], nil }
26
eks-anywhere
aws
Go
package reconciler_test import ( "os" "testing" "github.com/aws/eks-anywhere/internal/test/envtest" ) var env *envtest.Environment func TestMain(m *testing.M) { os.Exit(envtest.RunWithEnvironment(m, envtest.WithAssignment(&env))) }
15
eks-anywhere
aws
Go
package reconciler import ( "context" "fmt" "github.com/go-logr/logr" "github.com/pkg/errors" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/controller" "github.com/aws/eks-anywhere/pkg/controller/clientutil" "github.com/aws/eks-anywhere/pkg/controller/clusters" "github.com/aws/eks-anywhere/pkg/controller/serverside" "github.com/aws/eks-anywhere/pkg/providers/snow" ) type CNIReconciler interface { Reconcile(ctx context.Context, logger logr.Logger, client client.Client, spec *cluster.Spec) (controller.Result, error) } type RemoteClientRegistry interface { GetClient(ctx context.Context, cluster client.ObjectKey) (client.Client, error) } // IPValidator defines an interface for the methods to validate the control plane IP. type IPValidator interface { ValidateControlPlaneIP(ctx context.Context, log logr.Logger, spec *cluster.Spec) (controller.Result, error) } type Reconciler struct { client client.Client cniReconciler CNIReconciler remoteClientRegistry RemoteClientRegistry ipValidator IPValidator *serverside.ObjectApplier } // New initializes a new reconciler for the Snow provider. func New(client client.Client, cniReconciler CNIReconciler, remoteClientRegistry RemoteClientRegistry, ipValidator IPValidator) *Reconciler { return &Reconciler{ client: client, cniReconciler: cniReconciler, remoteClientRegistry: remoteClientRegistry, ipValidator: ipValidator, ObjectApplier: serverside.NewObjectApplier(client), } } func (r *Reconciler) Reconcile(ctx context.Context, log logr.Logger, c *anywherev1.Cluster) (controller.Result, error) { log = log.WithValues("provider", "snow") clusterSpec, err := cluster.BuildSpec(ctx, clientutil.NewKubeClient(r.client), c) if err != nil { return controller.Result{}, err } return controller.NewPhaseRunner[*cluster.Spec]().Register( r.ipValidator.ValidateControlPlaneIP, r.ValidateMachineConfigs, clusters.CleanupStatusAfterValidate, r.ReconcileControlPlane, r.CheckControlPlaneReady, r.ReconcileCNI, r.ReconcileWorkers, ).Run(ctx, log, clusterSpec) } // ReconcileWorkerNodes validates the cluster definition and reconciles the worker nodes // to the desired state. func (r *Reconciler) ReconcileWorkerNodes(ctx context.Context, log logr.Logger, c *anywherev1.Cluster) (controller.Result, error) { log = log.WithValues("provider", "snow", "reconcile type", "workers") clusterSpec, err := cluster.BuildSpec(ctx, clientutil.NewKubeClient(r.client), c) if err != nil { return controller.Result{}, err } return controller.NewPhaseRunner[*cluster.Spec]().Register( r.ValidateMachineConfigs, r.ReconcileWorkers, ).Run(ctx, log, clusterSpec) } func (r *Reconciler) ValidateMachineConfigs(ctx context.Context, log logr.Logger, clusterSpec *cluster.Spec) (controller.Result, error) { log = log.WithValues("phase", "validateMachineConfigs") for _, machineConfig := range clusterSpec.SnowMachineConfigs { if !machineConfig.Status.SpecValid { if machineConfig.Status.FailureMessage != nil { failureMessage := fmt.Sprintf("Invalid %s SnowMachineConfig: %s", machineConfig.Name, *machineConfig.Status.FailureMessage) clusterSpec.Cluster.Status.FailureMessage = &failureMessage log.Error(errors.New(*machineConfig.Status.FailureMessage), "Invalid SnowMachineConfig", "machineConfig", klog.KObj(machineConfig)) } else { log.Info("SnowMachineConfig hasn't been validated yet", "machineConfig", klog.KObj(machineConfig)) } return controller.ResultWithReturn(), nil } } return controller.Result{}, nil } func (s *Reconciler) ReconcileControlPlane(ctx context.Context, log logr.Logger, clusterSpec *cluster.Spec) (controller.Result, error) { log = log.WithValues("phase", "reconcileControlPlane") log.Info("Applying control plane CAPI objects") cp, err := snow.ControlPlaneSpec(ctx, log, clientutil.NewKubeClient(s.client), clusterSpec) if err != nil { return controller.Result{}, err } return clusters.ReconcileControlPlane(ctx, s.client, toClientControlPlane(cp)) } func (r *Reconciler) CheckControlPlaneReady(ctx context.Context, log logr.Logger, clusterSpec *cluster.Spec) (controller.Result, error) { log = log.WithValues("phase", "checkControlPlaneReady") return clusters.CheckControlPlaneReady(ctx, r.client, log, clusterSpec.Cluster) } func (s *Reconciler) ReconcileCNI(ctx context.Context, log logr.Logger, clusterSpec *cluster.Spec) (controller.Result, error) { log = log.WithValues("phase", "reconcileCNI") client, err := s.remoteClientRegistry.GetClient(ctx, controller.CapiClusterObjectKey(clusterSpec.Cluster)) if err != nil { return controller.Result{}, err } return s.cniReconciler.Reconcile(ctx, log, client, clusterSpec) } func (s *Reconciler) ReconcileWorkers(ctx context.Context, log logr.Logger, clusterSpec *cluster.Spec) (controller.Result, error) { log = log.WithValues("phase", "reconcileWorkers") log.Info("Applying worker CAPI objects") w, err := snow.WorkersSpec(ctx, log, clusterSpec, clientutil.NewKubeClient(s.client)) if err != nil { return controller.Result{}, err } return clusters.ReconcileWorkersForEKSA(ctx, log, s.client, clusterSpec.Cluster, toClientWorkers(w)) } func toClientControlPlane(cp *snow.ControlPlane) *clusters.ControlPlane { other := make([]client.Object, 0, 2+len(cp.CAPASIPPools)) other = append(other, cp.Secret) for _, p := range cp.CAPASIPPools { other = append(other, p) } return &clusters.ControlPlane{ Cluster: cp.Cluster, ProviderCluster: cp.ProviderCluster, KubeadmControlPlane: cp.KubeadmControlPlane, ControlPlaneMachineTemplate: cp.ControlPlaneMachineTemplate, EtcdCluster: cp.EtcdCluster, EtcdMachineTemplate: cp.EtcdMachineTemplate, Other: other, } } func toClientWorkers(workers *snow.Workers) *clusters.Workers { other := make([]client.Object, 0, len(workers.CAPASIPPools)) for _, p := range workers.CAPASIPPools { other = append(other, p) } w := &clusters.Workers{ Groups: make([]clusters.WorkerGroup, 0, len(workers.Groups)), Other: other, } for _, g := range workers.Groups { w.Groups = append(w.Groups, clusters.WorkerGroup{ MachineDeployment: g.MachineDeployment, KubeadmConfigTemplate: g.KubeadmConfigTemplate, ProviderMachineTemplate: g.ProviderMachineTemplate, }) } return w }
184
eks-anywhere
aws
Go
package reconciler_test import ( "context" "testing" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/internal/test/envtest" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" clusterspec "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/controller" "github.com/aws/eks-anywhere/pkg/controller/clientutil" snowv1 "github.com/aws/eks-anywhere/pkg/providers/snow/api/v1beta1" "github.com/aws/eks-anywhere/pkg/providers/snow/reconciler" "github.com/aws/eks-anywhere/pkg/providers/snow/reconciler/mocks" "github.com/aws/eks-anywhere/pkg/utils/ptr" ) const ( clusterNamespace = "test-namespace" ) func TestReconcilerReconcileSuccess(t *testing.T) { tt := newReconcilerTest(t) // We want to check that the cluster status is cleaned up if validations are passed tt.cluster.Status.FailureMessage = ptr.String("invalid cluster") capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) { c.Name = tt.cluster.Name }) tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster) tt.createAllObjs() logger := test.NewNullLogger() remoteClient := fake.NewClientBuilder().Build() tt.ipValidator.EXPECT().ValidateControlPlaneIP(tt.ctx, logger, tt.buildSpec()).Return(controller.Result{}, nil) tt.remoteClientRegistry.EXPECT().GetClient( tt.ctx, client.ObjectKey{Name: "workload-cluster", Namespace: "eksa-system"}, ).Return(remoteClient, nil) tt.cniReconciler.EXPECT().Reconcile(tt.ctx, logger, remoteClient, tt.buildSpec()) result, err := tt.reconciler().Reconcile(tt.ctx, logger, tt.cluster) tt.Expect(err).NotTo(HaveOccurred()) tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero()) tt.Expect(result).To(Equal(controller.Result{})) tt.Expect(tt.cluster.Status.FailureMessage).To(BeNil()) } func TestReconcilerReconcileWorkerNodesSuccess(t *testing.T) { tt := newReconcilerTest(t) tt.cluster.Name = "my-management-cluster" tt.cluster.SetSelfManaged() capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) { c.Name = tt.cluster.Name }) tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster) tt.createAllObjs() logger := test.NewNullLogger() result, err := tt.reconciler().ReconcileWorkerNodes(tt.ctx, logger, tt.cluster) tt.Expect(err).NotTo(HaveOccurred()) tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero()) tt.Expect(result).To(Equal(controller.Result{})) tt.ShouldEventuallyExist(tt.ctx, &bootstrapv1.KubeadmConfigTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: "my-management-cluster-md-0-1", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &snowv1.AWSSnowMachineTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: "my-management-cluster-md-0-1", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &clusterv1.MachineDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "my-management-cluster-md-0", Namespace: constants.EksaSystemNamespace, }, }, ) tt.ShouldEventuallyExist(tt.ctx, &snowv1.AWSSnowIPPool{ ObjectMeta: metav1.ObjectMeta{ Name: "test-ip-pool", Namespace: constants.EksaSystemNamespace, }, }, ) } func TestReconcilerValidateMachineConfigsInvalidWorkerMachineConfig(t *testing.T) { tt := newReconcilerTest(t) tt.machineConfigWorker.Status.SpecValid = false m := "Something wrong" tt.machineConfigWorker.Status.FailureMessage = &m tt.withFakeClient() result, err := tt.reconciler().ValidateMachineConfigs(tt.ctx, test.NewNullLogger(), tt.buildSpec()) tt.Expect(err).To(BeNil(), "error should be nil to prevent requeue") tt.Expect(result).To(Equal(controller.Result{Result: &reconcile.Result{}}), "result should stop reconciliation") tt.Expect(tt.cluster.Status.FailureMessage).ToNot(BeZero()) tt.Expect(*tt.cluster.Status.FailureMessage).To(ContainSubstring("Invalid worker-machine-config SnowMachineConfig")) tt.Expect(*tt.cluster.Status.FailureMessage).To(ContainSubstring("Something wrong")) } func TestReconcilerValidateMachineConfigsInvalidControlPlaneMachineConfig(t *testing.T) { tt := newReconcilerTest(t) tt.machineConfigControlPlane.Status.SpecValid = false m := "Something wrong" tt.machineConfigControlPlane.Status.FailureMessage = &m tt.withFakeClient() result, err := tt.reconciler().ValidateMachineConfigs(tt.ctx, test.NewNullLogger(), tt.buildSpec()) tt.Expect(err).To(BeNil(), "error should be nil to prevent requeue") tt.Expect(result).To(Equal(controller.Result{Result: &reconcile.Result{}}), "result should stop reconciliation") tt.Expect(tt.cluster.Status.FailureMessage).ToNot(BeZero()) tt.Expect(*tt.cluster.Status.FailureMessage).To(ContainSubstring("Invalid cp-machine-config SnowMachineConfig")) tt.Expect(*tt.cluster.Status.FailureMessage).To(ContainSubstring("Something wrong")) } func TestReconcilerValidateMachineConfigsMachineConfigNotValidated(t *testing.T) { tt := newReconcilerTest(t) tt.machineConfigWorker.Status.SpecValid = false tt.withFakeClient() result, err := tt.reconciler().ValidateMachineConfigs(tt.ctx, test.NewNullLogger(), tt.buildSpec()) tt.Expect(err).To(BeNil(), "error should be nil to prevent requeue") tt.Expect(result).To(Equal(controller.Result{Result: &reconcile.Result{}}), "result should stop reconciliation") tt.Expect(tt.cluster.Status.FailureMessage).To(BeNil()) } func TestReconcilerReconcileWorkers(t *testing.T) { tt := newReconcilerTest(t) capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) { c.Name = tt.cluster.Name }) tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster) tt.createAllObjs() result, err := tt.reconciler().ReconcileWorkers(tt.ctx, test.NewNullLogger(), tt.buildSpec()) tt.Expect(err).NotTo(HaveOccurred()) tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero()) tt.Expect(result).To(Equal(controller.Result{})) } func TestReconcilerReconcileControlPlane(t *testing.T) { tt := newReconcilerTest(t) tt.createAllObjs() result, err := tt.reconciler().ReconcileControlPlane(tt.ctx, test.NewNullLogger(), tt.buildSpec()) tt.Expect(err).NotTo(HaveOccurred()) tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero()) tt.Expect(result).To(Equal(controller.Result{})) tt.ShouldEventuallyExist(tt.ctx, &controlplanev1.KubeadmControlPlane{ ObjectMeta: metav1.ObjectMeta{ Name: "workload-cluster", Namespace: "eksa-system", }, }, ) tt.ShouldEventuallyExist(tt.ctx, &snowv1.AWSSnowMachineTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: "workload-cluster-control-plane-1", Namespace: "eksa-system", }, }, ) capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) { c.Name = "workload-cluster" }) tt.ShouldEventuallyExist(tt.ctx, capiCluster) tt.ShouldEventuallyExist(tt.ctx, &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "workload-cluster-snow-credentials", Namespace: "eksa-system"}}) } func TestReconcilerCheckControlPlaneReadyItIsReady(t *testing.T) { tt := newReconcilerTest(t) capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) { c.Name = tt.cluster.Name }) tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster) tt.withFakeClient() result, err := tt.reconciler().CheckControlPlaneReady(tt.ctx, test.NewNullLogger(), tt.buildSpec()) tt.Expect(err).NotTo(HaveOccurred()) tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero()) tt.Expect(result).To(Equal(controller.Result{})) } func TestReconcilerReconcileCNISuccess(t *testing.T) { tt := newReconcilerTest(t) tt.withFakeClient() logger := test.NewNullLogger() remoteClient := fake.NewClientBuilder().Build() spec := tt.buildSpec() tt.remoteClientRegistry.EXPECT().GetClient( tt.ctx, client.ObjectKey{Name: "workload-cluster", Namespace: "eksa-system"}, ).Return(remoteClient, nil) tt.cniReconciler.EXPECT().Reconcile(tt.ctx, logger, remoteClient, spec) result, err := tt.reconciler().ReconcileCNI(tt.ctx, logger, spec) tt.Expect(err).NotTo(HaveOccurred()) tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero()) tt.Expect(result).To(Equal(controller.Result{})) } func TestReconcilerReconcileCNIErrorClientRegistry(t *testing.T) { tt := newReconcilerTest(t) tt.withFakeClient() logger := test.NewNullLogger() spec := tt.buildSpec() tt.remoteClientRegistry.EXPECT().GetClient( tt.ctx, client.ObjectKey{Name: "workload-cluster", Namespace: "eksa-system"}, ).Return(nil, errors.New("building client")) result, err := tt.reconciler().ReconcileCNI(tt.ctx, logger, spec) tt.Expect(err).To(MatchError(ContainSubstring("building client"))) tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero()) tt.Expect(result).To(Equal(controller.Result{})) } type reconcilerTest struct { t testing.TB *WithT *envtest.APIExpecter ctx context.Context cniReconciler *mocks.MockCNIReconciler remoteClientRegistry *mocks.MockRemoteClientRegistry ipValidator *mocks.MockIPValidator cluster *anywherev1.Cluster client client.Client env *envtest.Environment eksaSupportObjs []client.Object machineConfigControlPlane *anywherev1.SnowMachineConfig machineConfigWorker *anywherev1.SnowMachineConfig } func newReconcilerTest(t testing.TB) *reconcilerTest { ctrl := gomock.NewController(t) cniReconciler := mocks.NewMockCNIReconciler(ctrl) remoteClientRegistry := mocks.NewMockRemoteClientRegistry(ctrl) ipValidator := mocks.NewMockIPValidator(ctrl) c := env.Client() bundle := test.Bundle() managementCluster := snowCluster(func(c *anywherev1.Cluster) { c.Name = "management-cluster" c.Spec.ManagementCluster = anywherev1.ManagementCluster{ Name: c.Name, } c.Spec.BundlesRef = &anywherev1.BundlesRef{ Name: bundle.Name, Namespace: bundle.Namespace, APIVersion: bundle.APIVersion, } }) machineConfigCP := snowMachineConfig(func(m *anywherev1.SnowMachineConfig) { m.Name = "cp-machine-config" m.Status.SpecValid = true }) ipPool := ipPool() machineConfigWN := snowMachineConfig(func(m *anywherev1.SnowMachineConfig) { m.Name = "worker-machine-config" m.Spec.Network.DirectNetworkInterfaces[0].DHCP = false m.Spec.Network.DirectNetworkInterfaces[0].IPPoolRef = &anywherev1.Ref{ Name: ipPool.Name, Kind: ipPool.Kind, } m.Status.SpecValid = true }) credentialsSecret := credentialsSecret() workloadClusterDatacenter := snowDataCenter(func(d *anywherev1.SnowDatacenterConfig) { d.Spec.IdentityRef = anywherev1.Ref{ Kind: "Secret", Name: credentialsSecret.Name, } }) cluster := snowCluster(func(c *anywherev1.Cluster) { c.Name = "workload-cluster" c.Spec.ManagementCluster = anywherev1.ManagementCluster{ Name: managementCluster.Name, } c.Spec.BundlesRef = &anywherev1.BundlesRef{ Name: bundle.Name, Namespace: bundle.Namespace, APIVersion: bundle.APIVersion, } c.Spec.ControlPlaneConfiguration = anywherev1.ControlPlaneConfiguration{ Count: 1, Endpoint: &anywherev1.Endpoint{ Host: "1.1.1.1", }, MachineGroupRef: &anywherev1.Ref{ Kind: "SnowMachineConfig", Name: machineConfigCP.Name, }, } c.Spec.DatacenterRef = anywherev1.Ref{ Kind: anywherev1.SnowDatacenterKind, Name: workloadClusterDatacenter.Name, } c.Spec.WorkerNodeGroupConfigurations = append(c.Spec.WorkerNodeGroupConfigurations, anywherev1.WorkerNodeGroupConfiguration{ Count: ptr.Int(1), MachineGroupRef: &anywherev1.Ref{ Kind: "SnowMachineConfig", Name: machineConfigWN.Name, }, Name: "md-0", Labels: nil, }, ) }) tt := &reconcilerTest{ t: t, WithT: NewWithT(t), APIExpecter: envtest.NewAPIExpecter(t, c), ctx: context.Background(), cniReconciler: cniReconciler, remoteClientRegistry: remoteClientRegistry, ipValidator: ipValidator, client: c, env: env, eksaSupportObjs: []client.Object{ test.Namespace(clusterNamespace), test.Namespace(constants.EksaSystemNamespace), managementCluster, workloadClusterDatacenter, bundle, test.EksdRelease(), credentialsSecret, ipPool, }, cluster: cluster, machineConfigControlPlane: machineConfigCP, machineConfigWorker: machineConfigWN, } t.Cleanup(tt.cleanup) return tt } func (tt *reconcilerTest) cleanup() { tt.DeleteAndWait(tt.ctx, tt.allObjs()...) tt.DeleteAllOfAndWait(tt.ctx, &bootstrapv1.KubeadmConfigTemplate{}) tt.DeleteAllOfAndWait(tt.ctx, &snowv1.AWSSnowMachineTemplate{}) tt.DeleteAllOfAndWait(tt.ctx, &clusterv1.MachineDeployment{}) } func (tt *reconcilerTest) buildSpec() *clusterspec.Spec { tt.t.Helper() spec, err := clusterspec.BuildSpec(tt.ctx, clientutil.NewKubeClient(tt.client), tt.cluster) tt.Expect(err).NotTo(HaveOccurred()) return spec } func (tt *reconcilerTest) withFakeClient() { tt.client = fake.NewClientBuilder().WithObjects(clientutil.ObjectsToClientObjects(tt.allObjs())...).Build() } func (tt *reconcilerTest) reconciler() *reconciler.Reconciler { return reconciler.New(tt.client, tt.cniReconciler, tt.remoteClientRegistry, tt.ipValidator) } func (tt *reconcilerTest) createAllObjs() { tt.t.Helper() envtest.CreateObjs(tt.ctx, tt.t, tt.client, tt.allObjs()...) } func (tt *reconcilerTest) allObjs() []client.Object { objs := make([]client.Object, 0, len(tt.eksaSupportObjs)+3) objs = append(objs, tt.eksaSupportObjs...) objs = append(objs, tt.cluster, tt.machineConfigControlPlane, tt.machineConfigWorker) return objs } type clusterOpt func(*anywherev1.Cluster) func snowCluster(opts ...clusterOpt) *anywherev1.Cluster { c := &anywherev1.Cluster{ TypeMeta: metav1.TypeMeta{ Kind: anywherev1.ClusterKind, APIVersion: anywherev1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Namespace: clusterNamespace, }, Spec: anywherev1.ClusterSpec{ KubernetesVersion: "1.22", ClusterNetwork: anywherev1.ClusterNetwork{ Pods: anywherev1.Pods{ CidrBlocks: []string{"0.0.0.0"}, }, Services: anywherev1.Services{ CidrBlocks: []string{"0.0.0.0"}, }, }, }, } for _, opt := range opts { opt(c) } return c } type datacenterOpt func(*anywherev1.SnowDatacenterConfig) func snowDataCenter(opts ...datacenterOpt) *anywherev1.SnowDatacenterConfig { d := &anywherev1.SnowDatacenterConfig{ TypeMeta: metav1.TypeMeta{ Kind: anywherev1.SnowDatacenterKind, APIVersion: anywherev1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Name: "datacenter", Namespace: clusterNamespace, }, } for _, opt := range opts { opt(d) } return d } type snowMachineOpt func(*anywherev1.SnowMachineConfig) func snowMachineConfig(opts ...snowMachineOpt) *anywherev1.SnowMachineConfig { m := &anywherev1.SnowMachineConfig{ TypeMeta: metav1.TypeMeta{ Kind: anywherev1.SnowMachineConfigKind, APIVersion: anywherev1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Namespace: clusterNamespace, }, Spec: anywherev1.SnowMachineConfigSpec{ PhysicalNetworkConnector: anywherev1.SFPPlus, OSFamily: anywherev1.Ubuntu, Network: anywherev1.SnowNetwork{ DirectNetworkInterfaces: []anywherev1.SnowDirectNetworkInterface{ { Index: 1, Primary: true, DHCP: true, }, }, }, }, } for _, opt := range opts { opt(m) } return m } func credentialsSecret() *corev1.Secret { return &corev1.Secret{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "Secret", }, ObjectMeta: metav1.ObjectMeta{ Name: "test-snow-credentials", Namespace: clusterNamespace, }, Data: map[string][]byte{ "credentials": []byte("creds"), "ca-bundle": []byte("certs"), }, Type: "Opaque", } } func ipPool() *anywherev1.SnowIPPool { return &anywherev1.SnowIPPool{ TypeMeta: metav1.TypeMeta{ Kind: anywherev1.SnowIPPoolKind, APIVersion: anywherev1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Name: "test-ip-pool", Namespace: clusterNamespace, }, Spec: anywherev1.SnowIPPoolSpec{ Pools: []anywherev1.IPPool{ { IPStart: "start", IPEnd: "end", Gateway: "gateway", Subnet: "subnet", }, }, }, } }
556
eks-anywhere
aws
Go
// Code generated by MockGen. DO NOT EDIT. // Source: pkg/providers/snow/reconciler/reconciler.go // Package mocks is a generated GoMock package. package mocks import ( context "context" reflect "reflect" cluster "github.com/aws/eks-anywhere/pkg/cluster" controller "github.com/aws/eks-anywhere/pkg/controller" logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" client "sigs.k8s.io/controller-runtime/pkg/client" ) // MockCNIReconciler is a mock of CNIReconciler interface. type MockCNIReconciler struct { ctrl *gomock.Controller recorder *MockCNIReconcilerMockRecorder } // MockCNIReconcilerMockRecorder is the mock recorder for MockCNIReconciler. type MockCNIReconcilerMockRecorder struct { mock *MockCNIReconciler } // NewMockCNIReconciler creates a new mock instance. func NewMockCNIReconciler(ctrl *gomock.Controller) *MockCNIReconciler { mock := &MockCNIReconciler{ctrl: ctrl} mock.recorder = &MockCNIReconcilerMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockCNIReconciler) EXPECT() *MockCNIReconcilerMockRecorder { return m.recorder } // Reconcile mocks base method. func (m *MockCNIReconciler) Reconcile(ctx context.Context, logger logr.Logger, client client.Client, spec *cluster.Spec) (controller.Result, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Reconcile", ctx, logger, client, spec) ret0, _ := ret[0].(controller.Result) ret1, _ := ret[1].(error) return ret0, ret1 } // Reconcile indicates an expected call of Reconcile. func (mr *MockCNIReconcilerMockRecorder) Reconcile(ctx, logger, client, spec interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reconcile", reflect.TypeOf((*MockCNIReconciler)(nil).Reconcile), ctx, logger, client, spec) } // MockRemoteClientRegistry is a mock of RemoteClientRegistry interface. type MockRemoteClientRegistry struct { ctrl *gomock.Controller recorder *MockRemoteClientRegistryMockRecorder } // MockRemoteClientRegistryMockRecorder is the mock recorder for MockRemoteClientRegistry. type MockRemoteClientRegistryMockRecorder struct { mock *MockRemoteClientRegistry } // NewMockRemoteClientRegistry creates a new mock instance. func NewMockRemoteClientRegistry(ctrl *gomock.Controller) *MockRemoteClientRegistry { mock := &MockRemoteClientRegistry{ctrl: ctrl} mock.recorder = &MockRemoteClientRegistryMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockRemoteClientRegistry) EXPECT() *MockRemoteClientRegistryMockRecorder { return m.recorder } // GetClient mocks base method. func (m *MockRemoteClientRegistry) GetClient(ctx context.Context, cluster client.ObjectKey) (client.Client, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetClient", ctx, cluster) ret0, _ := ret[0].(client.Client) ret1, _ := ret[1].(error) return ret0, ret1 } // GetClient indicates an expected call of GetClient. func (mr *MockRemoteClientRegistryMockRecorder) GetClient(ctx, cluster interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClient", reflect.TypeOf((*MockRemoteClientRegistry)(nil).GetClient), ctx, cluster) } // MockIPValidator is a mock of IPValidator interface. type MockIPValidator struct { ctrl *gomock.Controller recorder *MockIPValidatorMockRecorder } // MockIPValidatorMockRecorder is the mock recorder for MockIPValidator. type MockIPValidatorMockRecorder struct { mock *MockIPValidator } // NewMockIPValidator creates a new mock instance. func NewMockIPValidator(ctrl *gomock.Controller) *MockIPValidator { mock := &MockIPValidator{ctrl: ctrl} mock.recorder = &MockIPValidatorMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockIPValidator) EXPECT() *MockIPValidatorMockRecorder { return m.recorder } // ValidateControlPlaneIP mocks base method. func (m *MockIPValidator) ValidateControlPlaneIP(ctx context.Context, log logr.Logger, spec *cluster.Spec) (controller.Result, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateControlPlaneIP", ctx, log, spec) ret0, _ := ret[0].(controller.Result) ret1, _ := ret[1].(error) return ret0, ret1 } // ValidateControlPlaneIP indicates an expected call of ValidateControlPlaneIP. func (mr *MockIPValidatorMockRecorder) ValidateControlPlaneIP(ctx, log, spec interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateControlPlaneIP", reflect.TypeOf((*MockIPValidator)(nil).ValidateControlPlaneIP), ctx, log, spec) }
131
eks-anywhere
aws
Go
package tinkerbell import ( "errors" "fmt" "net/http" tinkerbellv1 "github.com/tinkerbell/cluster-api-provider-tinkerbell/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/clusterapi" "github.com/aws/eks-anywhere/pkg/networkutils" "github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware" ) // TODO(chrisdoherty) Add worker node group assertions // AssertMachineConfigsValid iterates over all machine configs in calling validateMachineConfig. func AssertMachineConfigsValid(spec *ClusterSpec) error { for _, config := range spec.MachineConfigs { if err := config.Validate(); err != nil { return err } } return nil } // AssertDatacenterConfigValid asserts the DatacenterConfig in spec is valid. func AssertDatacenterConfigValid(spec *ClusterSpec) error { return spec.DatacenterConfig.Validate() } // AssertMachineConfigNamespaceMatchesDatacenterConfig ensures all machine configuration instances // are configured with the same namespace as the provider specific data center configuration // namespace. func AssertMachineConfigNamespaceMatchesDatacenterConfig(spec *ClusterSpec) error { return validateMachineConfigNamespacesMatchDatacenterConfig(spec.DatacenterConfig, spec.MachineConfigs) } // AssertControlPlaneMachineRefExists ensures the control plane machine ref is referencing a // known machine config. func AssertControlPlaneMachineRefExists(spec *ClusterSpec) error { controlPlaneMachineRef := spec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef if err := validateMachineRefExists(controlPlaneMachineRef, spec.MachineConfigs); err != nil { return fmt.Errorf("control plane configuration machine ref: %v", err) } return nil } // AssertEtcdMachineRefExists ensures that, if the etcd configuration is specified, it references // a known machine config. func AssertEtcdMachineRefExists(spec *ClusterSpec) error { // Unstacked etcd is optional. if spec.Cluster.Spec.ExternalEtcdConfiguration == nil { return nil } etcdMachineRef := spec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef if err := validateMachineRefExists(etcdMachineRef, spec.MachineConfigs); err != nil { return fmt.Errorf("external etcd configuration machine group ref: %v", err) } return nil } // AssertWorkerNodeGroupMachineRefsExists ensures all worker node group machine refs are // referencing a known machine config. func AssertWorkerNodeGroupMachineRefsExists(spec *ClusterSpec) error { for _, group := range spec.Cluster.Spec.WorkerNodeGroupConfigurations { groupRef := group.MachineGroupRef if err := validateMachineRefExists(groupRef, spec.MachineConfigs); err != nil { return fmt.Errorf("worker node group configuration machine group ref: %v", err) } } return nil } // AssertK8SVersionNot120 ensures Kubernetes version is not set to v1.20. func AssertK8SVersionNot120(spec *ClusterSpec) error { if spec.Cluster.Spec.KubernetesVersion == v1alpha1.Kube120 { return errors.New("kubernetes version v1.20 is not supported for Bare Metal") } return nil } func AssertOsFamilyValid(spec *ClusterSpec) error { return validateOsFamily(spec) } // AssertcontrolPlaneIPNotInUse ensures the endpoint host for the control plane isn't in use. // The check may be unreliable due to its implementation. func NewIPNotInUseAssertion(client networkutils.NetClient) ClusterSpecAssertion { return func(spec *ClusterSpec) error { ip := spec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host if err := validateIPUnused(client, ip); err != nil { return fmt.Errorf("control plane endpoint ip in use: %v", ip) } return nil } } // AssertTinkerbellIPNotInUse ensures tinkerbell ip isn't in use. func AssertTinkerbellIPNotInUse(client networkutils.NetClient) ClusterSpecAssertion { return func(spec *ClusterSpec) error { ip := spec.DatacenterConfig.Spec.TinkerbellIP if err := validateIPUnused(client, ip); err != nil { return fmt.Errorf("tinkerbellIP <%s> is already in use, please provide a unique IP", ip) } return nil } } // AssertTinkerbellIPAndControlPlaneIPNotSame ensures tinkerbell ip and controlplane ip are not the same. func AssertTinkerbellIPAndControlPlaneIPNotSame(spec *ClusterSpec) error { tinkerbellIP := spec.DatacenterConfig.Spec.TinkerbellIP controlPlaneIP := spec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host if tinkerbellIP == controlPlaneIP { return fmt.Errorf("controlPlaneConfiguration.endpoint.host and tinkerbellIP are the same (%s), please provide two unique IPs", tinkerbellIP) } return nil } // AssertHookRetrievableWithoutProxy ensures the executing machine can retrieve Hook // from the host URL without a proxy configured. It does not guarantee the target node // will be able to download Hook. func AssertHookRetrievableWithoutProxy(spec *ClusterSpec) error { if spec.Cluster.Spec.ProxyConfiguration == nil { return nil } // return an error if hookImagesURLPath field is not specified for during Proxy configuration. if spec.DatacenterConfig.Spec.HookImagesURLPath == "" { return fmt.Errorf("locally hosted hookImagesURLPath is required to support ProxyConfiguration") } // verify hookImagesURLPath is accessible locally too transport := http.DefaultTransport.(*http.Transport).Clone() transport.Proxy = nil client := &http.Client{ Transport: transport, } resp, err := client.Get(spec.DatacenterConfig.Spec.HookImagesURLPath) if err != nil { return fmt.Errorf("HookImagesURLPath: %s needs to be hosted locally while specifiying Proxy configuration: %v", spec.DatacenterConfig.Spec.HookImagesURLPath, err) } defer resp.Body.Close() return nil } // AssertPortsNotInUse ensures that ports 80, 42113, and 50061 are available. func AssertPortsNotInUse(client networkutils.NetClient) ClusterSpecAssertion { return func(spec *ClusterSpec) error { host := "0.0.0.0" if err := validatePortsAvailable(client, host); err != nil { return err } return nil } } // HardwareSatisfiesOnlyOneSelectorAssertion ensures hardware in catalogue only satisfies 1 // of the MachineConfig's HardwareSelector's from the spec. func HardwareSatisfiesOnlyOneSelectorAssertion(catalogue *hardware.Catalogue) ClusterSpecAssertion { return func(spec *ClusterSpec) error { selectors, err := selectorsFromClusterSpec(spec) if err != nil { return err } return validateHardwareSatisfiesOnlyOneSelector(catalogue.AllHardware(), selectors) } } // selectorsFromClusterSpec extracts all selectors specified on MachineConfig's from spec. func selectorsFromClusterSpec(spec *ClusterSpec) (selectorSet, error) { selectors := selectorSet{} if err := selectors.Add(spec.ControlPlaneMachineConfig().Spec.HardwareSelector); err != nil { return nil, err } for _, nodeGroup := range spec.WorkerNodeGroupConfigurations() { err := selectors.Add(spec.WorkerNodeGroupMachineConfig(nodeGroup).Spec.HardwareSelector) if err != nil { return nil, err } } if spec.HasExternalEtcd() { if err := selectors.Add(spec.ExternalEtcdMachineConfig().Spec.HardwareSelector); err != nil { return nil, err } } return selectors, nil } // MinimumHardwareAvailableAssertionForCreate asserts that catalogue has sufficient hardware to // support the ClusterSpec during a create workflow. // // It does not protect against intersections or subsets so consumers should ensure a 1-2-1 // mapping between catalogue hardware and selectors. func MinimumHardwareAvailableAssertionForCreate(catalogue *hardware.Catalogue) ClusterSpecAssertion { return func(spec *ClusterSpec) error { // Without Hardware selectors we get undesirable behavior so ensure we have them for // all MachineConfigs. if err := ensureHardwareSelectorsSpecified(spec); err != nil { return err } // Build a set of required hardware counts per machine group. minimumHardwareRequirements // will account for the same selector being specified on different groups. requirements := minimumHardwareRequirements{} err := requirements.Add( spec.ControlPlaneMachineConfig().Spec.HardwareSelector, spec.ControlPlaneConfiguration().Count, ) if err != nil { return err } for _, nodeGroup := range spec.WorkerNodeGroupConfigurations() { err := requirements.Add( spec.WorkerNodeGroupMachineConfig(nodeGroup).Spec.HardwareSelector, *nodeGroup.Count, ) if err != nil { return err } } if spec.HasExternalEtcd() { err := requirements.Add( spec.ExternalEtcdMachineConfig().Spec.HardwareSelector, spec.ExternalEtcdConfiguration().Count, ) if err != nil { return err } } return validateMinimumHardwareRequirements(requirements, catalogue) } } // WorkerNodeHardware holds machine deployment name, replica count and hardware selector for a Tinkerbell worker node. type WorkerNodeHardware struct { MachineDeploymentName string Replicas int } // ValidatableCluster allows assertions to pull worker node and control plane information. type ValidatableCluster interface { // WorkerNodeHardwareGroups retrieves a list of WorkerNodeHardwares containing MachineDeployment name, // replica count and hardware selector for each worker node of a ValidatableCluster. WorkerNodeHardwareGroups() []WorkerNodeHardware // ControlPlaneReplicaCount retrieves the control plane replica count of the ValidatableCluster. ControlPlaneReplicaCount() int } // ValidatableTinkerbellClusterSpec wraps around the Tinkerbell ClusterSpec as a ValidatableCluster. type ValidatableTinkerbellClusterSpec struct { *ClusterSpec } // ControlPlaneReplicaCount retrieves the ValidatableTinkerbellClusterSpec control plane replica count. func (v *ValidatableTinkerbellClusterSpec) ControlPlaneReplicaCount() int { return v.Cluster.Spec.ControlPlaneConfiguration.Count } // WorkerNodeHardwareGroups retrieves a list of WorkerNodeHardwares for a ValidatableTinkerbellClusterSpec. func (v *ValidatableTinkerbellClusterSpec) WorkerNodeHardwareGroups() []WorkerNodeHardware { workerNodeGroupConfigs := make([]WorkerNodeHardware, 0, len(v.Cluster.Spec.WorkerNodeGroupConfigurations)) for _, workerNodeGroup := range v.Cluster.Spec.WorkerNodeGroupConfigurations { workerNodeGroupConfig := &WorkerNodeHardware{ MachineDeploymentName: machineDeploymentName(v.Cluster.Name, workerNodeGroup.Name), Replicas: *workerNodeGroup.Count, } workerNodeGroupConfigs = append(workerNodeGroupConfigs, *workerNodeGroupConfig) } return workerNodeGroupConfigs } // ValidatableTinkerbellCAPI wraps around the Tinkerbell control plane and worker CAPI obects as a ValidatableCluster. type ValidatableTinkerbellCAPI struct { KubeadmControlPlane *controlplanev1.KubeadmControlPlane WorkerGroups []*clusterapi.WorkerGroup[*tinkerbellv1.TinkerbellMachineTemplate] } // ControlPlaneReplicaCount retrieves the ValidatableTinkerbellCAPI control plane replica count. func (v *ValidatableTinkerbellCAPI) ControlPlaneReplicaCount() int { return int(*v.KubeadmControlPlane.Spec.Replicas) } // WorkerNodeHardwareGroups retrieves a list of WorkerNodeHardwares for a ValidatableTinkerbellCAPI. func (v *ValidatableTinkerbellCAPI) WorkerNodeHardwareGroups() []WorkerNodeHardware { workerNodeHardwareList := make([]WorkerNodeHardware, 0, len(v.WorkerGroups)) for _, workerGroup := range v.WorkerGroups { workerNodeHardware := &WorkerNodeHardware{ MachineDeploymentName: workerGroup.MachineDeployment.Name, Replicas: int(*workerGroup.MachineDeployment.Spec.Replicas), } workerNodeHardwareList = append(workerNodeHardwareList, *workerNodeHardware) } return workerNodeHardwareList } // AssertionsForScaleUpDown asserts that catalogue has sufficient hardware to // support the scaling up/down from current ClusterSpec to desired ValidatableCluster. // nolint:gocyclo // TODO: Reduce cyclomatic complexity https://github.com/aws/eks-anywhere-internal/issues/1186 func AssertionsForScaleUpDown(catalogue *hardware.Catalogue, current ValidatableCluster, rollingUpgrade bool) ClusterSpecAssertion { return func(spec *ClusterSpec) error { // Without Hardware selectors we get undesirable behavior so ensure we have them for // all MachineConfigs. if err := ensureHardwareSelectorsSpecified(spec); err != nil { return err } if spec.HasExternalEtcd() { return fmt.Errorf("scale up/down not supported for external etcd") } // Build a set of required hardware counts per machine group. minimumHardwareRequirements // will account for the same selector being specified on different groups. requirements := minimumHardwareRequirements{} if current.ControlPlaneReplicaCount() != spec.Cluster.Spec.ControlPlaneConfiguration.Count { if rollingUpgrade { return fmt.Errorf("cannot perform scale up or down during rolling upgrades") } if current.ControlPlaneReplicaCount() < spec.Cluster.Spec.ControlPlaneConfiguration.Count { err := requirements.Add( spec.ControlPlaneMachineConfig().Spec.HardwareSelector, spec.Cluster.Spec.ControlPlaneConfiguration.Count-current.ControlPlaneReplicaCount(), ) if err != nil { return fmt.Errorf("error during scale up: %v", err) } } } workerNodeHardwareMap := make(map[string]WorkerNodeHardware) for _, workerNodeHardware := range current.WorkerNodeHardwareGroups() { workerNodeHardwareMap[workerNodeHardware.MachineDeploymentName] = workerNodeHardware } for _, nodeGroupNewSpec := range spec.Cluster.Spec.WorkerNodeGroupConfigurations { nodeGroupMachineDeploymentNameNewSpec := machineDeploymentName(spec.Cluster.Name, nodeGroupNewSpec.Name) if workerNodeGroupOldSpec, ok := workerNodeHardwareMap[nodeGroupMachineDeploymentNameNewSpec]; ok { if *nodeGroupNewSpec.Count != workerNodeGroupOldSpec.Replicas { if rollingUpgrade { return fmt.Errorf("cannot perform scale up or down during rolling upgrades") } if *nodeGroupNewSpec.Count > workerNodeGroupOldSpec.Replicas { err := requirements.Add( spec.WorkerNodeGroupMachineConfig(nodeGroupNewSpec).Spec.HardwareSelector, *nodeGroupNewSpec.Count-workerNodeGroupOldSpec.Replicas, ) if err != nil { return fmt.Errorf("error during scale up: %v", err) } } } } else { // worker node group was newly added if rollingUpgrade { return fmt.Errorf("cannot perform scale up or down during rolling upgrades") } err := requirements.Add( spec.WorkerNodeGroupMachineConfig(nodeGroupNewSpec).Spec.HardwareSelector, *nodeGroupNewSpec.Count, ) if err != nil { return fmt.Errorf("error during scale up: %v", err) } } } if err := validateMinimumHardwareRequirements(requirements, catalogue); err != nil { return fmt.Errorf("for scale up, %v", err) } return nil } } // ExtraHardwareAvailableAssertionForRollingUpgrade asserts that catalogue has sufficient hardware to // support the ClusterSpec during an rolling upgrade workflow. func ExtraHardwareAvailableAssertionForRollingUpgrade(catalogue *hardware.Catalogue) ClusterSpecAssertion { return func(spec *ClusterSpec) error { // Without Hardware selectors we get undesirable behavior so ensure we have them for // all MachineConfigs. if err := ensureHardwareSelectorsSpecified(spec); err != nil { return err } // Build a set of required hardware counts per machine group. minimumHardwareRequirements // will account for the same selector being specified on different groups. requirements := minimumHardwareRequirements{} maxSurge := 1 if spec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy != nil { maxSurge = spec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy.RollingUpdate.MaxSurge } err := requirements.Add( spec.ControlPlaneMachineConfig().Spec.HardwareSelector, maxSurge, ) if err != nil { return fmt.Errorf("for rolling upgrade, %v", err) } for _, nodeGroup := range spec.WorkerNodeGroupConfigurations() { maxSurge = 1 if nodeGroup.UpgradeRolloutStrategy != nil { maxSurge = nodeGroup.UpgradeRolloutStrategy.RollingUpdate.MaxSurge } err := requirements.Add( spec.WorkerNodeGroupMachineConfig(nodeGroup).Spec.HardwareSelector, maxSurge, ) if err != nil { return fmt.Errorf("for rolling upgrade, %v", err) } } if spec.HasExternalEtcd() { return fmt.Errorf("external etcd upgrade is not supported") } if err := validateMinimumHardwareRequirements(requirements, catalogue); err != nil { return fmt.Errorf("for rolling upgrade, %v", err) } return nil } } // ensureHardwareSelectorsSpecified ensures each machine config present in spec has a hardware // selector. func ensureHardwareSelectorsSpecified(spec *ClusterSpec) error { if len(spec.ControlPlaneMachineConfig().Spec.HardwareSelector) == 0 { return missingHardwareSelectorErr{ Name: spec.ControlPlaneMachineConfig().Name, } } for _, nodeGroup := range spec.WorkerNodeGroupConfigurations() { if len(spec.WorkerNodeGroupMachineConfig(nodeGroup).Spec.HardwareSelector) == 0 { return missingHardwareSelectorErr{ Name: spec.WorkerNodeGroupMachineConfig(nodeGroup).Name, } } } if spec.HasExternalEtcd() { if len(spec.ExternalEtcdMachineConfig().Spec.HardwareSelector) == 0 { return missingHardwareSelectorErr{ Name: spec.ExternalEtcdMachineConfig().Name, } } } return nil } type missingHardwareSelectorErr struct { Name string } func (e missingHardwareSelectorErr) Error() string { return fmt.Sprintf("missing hardware selector for %v", e.Name) }
478