repo_name
stringlengths 1
52
| repo_creator
stringclasses 6
values | programming_language
stringclasses 4
values | code
stringlengths 0
9.68M
| num_lines
int64 1
234k
|
---|---|---|---|---|
eks-anywhere | aws | Go | package cluster
import (
"context"
"path"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func fluxEntry() *ConfigManagerEntry {
return &ConfigManagerEntry{
APIObjectMapping: map[string]APIObjectGenerator{
anywherev1.FluxConfigKind: func() APIObject {
return &anywherev1.FluxConfig{}
},
},
Processors: []ParsedProcessor{processFlux},
Defaulters: []Defaulter{
setFluxDefaults,
SetDefaultFluxConfigPath,
},
Validations: []Validation{
validateFlux,
validateFluxNamespace,
},
}
}
func processFlux(c *Config, objects ObjectLookup) {
if c.Cluster.Spec.GitOpsRef == nil {
return
}
if c.Cluster.Spec.GitOpsRef.Kind == anywherev1.FluxConfigKind {
flux := objects.GetFromRef(c.Cluster.APIVersion, *c.Cluster.Spec.GitOpsRef)
if flux == nil {
return
}
c.FluxConfig = flux.(*anywherev1.FluxConfig)
}
}
func validateFlux(c *Config) error {
if c.FluxConfig != nil {
return c.FluxConfig.Validate()
}
return nil
}
func validateFluxNamespace(c *Config) error {
if c.FluxConfig != nil {
if err := validateSameNamespace(c, c.FluxConfig); err != nil {
return err
}
}
return nil
}
func setFluxDefaults(c *Config) error {
if c.FluxConfig != nil {
c.FluxConfig.SetDefaults()
}
return nil
}
func SetDefaultFluxConfigPath(c *Config) error {
if c.FluxConfig == nil {
return nil
}
fluxConfig := c.FluxConfig
if fluxConfig.Spec.ClusterConfigPath != "" {
return nil
}
if c.Cluster.IsSelfManaged() {
fluxConfig.Spec.ClusterConfigPath = path.Join("clusters", c.Cluster.Name)
} else {
fluxConfig.Spec.ClusterConfigPath = path.Join("clusters", c.Cluster.ManagedBy())
}
return nil
}
func getFluxConfig(ctx context.Context, client Client, c *Config) error {
if c.Cluster.Spec.GitOpsRef == nil || c.Cluster.Spec.GitOpsRef.Kind != anywherev1.FluxConfigKind {
return nil
}
fluxConfig := &anywherev1.FluxConfig{}
if err := client.Get(ctx, c.Cluster.Spec.GitOpsRef.Name, c.Cluster.Namespace, fluxConfig); err != nil {
return err
}
c.FluxConfig = fluxConfig
return nil
}
| 99 |
eks-anywhere | aws | Go | package cluster_test
import (
"context"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/cluster/mocks"
)
func TestDefaultConfigClientBuilderFluxConfig(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
b := cluster.NewDefaultConfigClientBuilder()
ctrl := gomock.NewController(t)
client := mocks.NewMockClient(ctrl)
cluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "default",
},
Spec: anywherev1.ClusterSpec{
GitOpsRef: &anywherev1.Ref{
Kind: anywherev1.FluxConfigKind,
Name: "my-flux",
},
},
}
fluxConfig := &anywherev1.FluxConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "my-flux",
Namespace: "default",
},
}
client.EXPECT().Get(ctx, "my-flux", "default", &anywherev1.FluxConfig{}).DoAndReturn(
func(ctx context.Context, name, namespace string, obj runtime.Object) error {
c := obj.(*anywherev1.FluxConfig)
c.ObjectMeta = fluxConfig.ObjectMeta
return nil
},
)
config, err := b.Build(ctx, client, cluster)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(config).NotTo(BeNil())
g.Expect(config.Cluster).To(Equal(cluster))
g.Expect(config.FluxConfig).To(Equal(fluxConfig))
}
| 56 |
eks-anywhere | aws | Go | package cluster
import (
"context"
"path"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func gitOpsEntry() *ConfigManagerEntry {
return &ConfigManagerEntry{
APIObjectMapping: map[string]APIObjectGenerator{
anywherev1.GitOpsConfigKind: func() APIObject {
return &anywherev1.GitOpsConfig{}
},
},
Processors: []ParsedProcessor{processGitOps},
Defaulters: []Defaulter{
setGitOpsDefaults,
SetDefaultFluxGitHubConfigPath,
},
Validations: []Validation{
validateGitOps,
validateGitOpsNamespace,
},
}
}
func processGitOps(c *Config, objects ObjectLookup) {
if c.Cluster.Spec.GitOpsRef == nil {
return
}
if c.Cluster.Spec.GitOpsRef.Kind == anywherev1.GitOpsConfigKind {
gitOps := objects.GetFromRef(c.Cluster.APIVersion, *c.Cluster.Spec.GitOpsRef)
if gitOps == nil {
return
}
// GitOpsConfig will be deprecated.
// During the deprecation window, FluxConfig will be used internally
// GitOpsConfig will preserved it was in the original spec
gitOpsConf := gitOps.(*anywherev1.GitOpsConfig)
c.GitOpsConfig = gitOpsConf
c.FluxConfig = gitOpsConf.ConvertToFluxConfig()
}
}
func validateGitOps(c *Config) error {
if c.GitOpsConfig != nil {
return c.GitOpsConfig.Validate()
}
return nil
}
func validateGitOpsNamespace(c *Config) error {
if c.GitOpsConfig != nil {
if err := validateSameNamespace(c, c.GitOpsConfig); err != nil {
return err
}
}
return nil
}
func setGitOpsDefaults(c *Config) error {
if c.GitOpsConfig != nil {
c.GitOpsConfig.SetDefaults()
}
return nil
}
func SetDefaultFluxGitHubConfigPath(c *Config) error {
if c.GitOpsConfig == nil {
return nil
}
gitops := c.GitOpsConfig
if gitops.Spec.Flux.Github.ClusterConfigPath != "" {
return nil
}
if c.Cluster.IsSelfManaged() {
gitops.Spec.Flux.Github.ClusterConfigPath = path.Join("clusters", c.Cluster.Name)
} else {
gitops.Spec.Flux.Github.ClusterConfigPath = path.Join("clusters", c.Cluster.ManagedBy())
}
return nil
}
func getGitOps(ctx context.Context, client Client, c *Config) error {
if c.Cluster.Spec.GitOpsRef == nil || c.Cluster.Spec.GitOpsRef.Kind != anywherev1.GitOpsConfigKind {
return nil
}
gitOps := &anywherev1.GitOpsConfig{}
if err := client.Get(ctx, c.Cluster.Spec.GitOpsRef.Name, c.Cluster.Namespace, gitOps); err != nil {
return err
}
c.GitOpsConfig = gitOps
return nil
}
| 104 |
eks-anywhere | aws | Go | package cluster_test
import (
"context"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/cluster/mocks"
)
const (
owner = "janedoe"
repository = "flux-fleet"
fluxNamespace = "test-ns"
branch = "test-branch"
clusterConfigPath = "test-path"
personal = false
)
func TestGitOpsToFluxConversionProcessing(t *testing.T) {
tests := []struct {
name string
wantConfigPath string
wantFluxSpec anywherev1.FluxConfigSpec
}{
{
name: "workload cluster with GitOpsConfig",
wantConfigPath: "testdata/cluster_gitops_1_21.yaml",
wantFluxSpec: anywherev1.FluxConfigSpec{
SystemNamespace: fluxNamespace,
ClusterConfigPath: clusterConfigPath,
Branch: branch,
Github: &anywherev1.GithubProviderConfig{
Owner: owner,
Repository: repository,
Personal: personal,
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
config, err := cluster.ParseConfigFromFile(tt.wantConfigPath)
if err != nil {
t.Fatal("cluster.ParseConfigFromFile error != nil, want nil", err)
}
g.Expect(config.FluxConfig.Spec).To(Equal(tt.wantFluxSpec))
})
}
}
func TestDefaultConfigClientBuilderGitOpsConfig(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
b := cluster.NewDefaultConfigClientBuilder()
ctrl := gomock.NewController(t)
client := mocks.NewMockClient(ctrl)
cluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "default",
},
Spec: anywherev1.ClusterSpec{
GitOpsRef: &anywherev1.Ref{
Kind: anywherev1.GitOpsConfigKind,
Name: "my-gitops",
},
},
}
gitopsConfig := &anywherev1.GitOpsConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "my-gitops",
Namespace: "default",
},
}
client.EXPECT().Get(ctx, "my-gitops", "default", &anywherev1.GitOpsConfig{}).DoAndReturn(
func(ctx context.Context, name, namespace string, obj runtime.Object) error {
c := obj.(*anywherev1.GitOpsConfig)
c.ObjectMeta = gitopsConfig.ObjectMeta
return nil
},
)
config, err := b.Build(ctx, client, cluster)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(config).NotTo(BeNil())
g.Expect(config.Cluster).To(Equal(cluster))
g.Expect(config.GitOpsConfig).To(Equal(gitopsConfig))
}
| 98 |
eks-anywhere | aws | Go | package cluster
import (
"fmt"
"os"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/pkg/types"
)
type kubeConfigCluster struct {
Name string `json:"name"`
}
type kubeConfigYAML struct {
Clusters []*kubeConfigCluster `json:"clusters"`
}
func LoadManagement(kubeconfig string) (*types.Cluster, error) {
if kubeconfig == "" {
return nil, nil
}
kubeConfigBytes, err := os.ReadFile(kubeconfig)
if err != nil {
return nil, err
}
kc := &kubeConfigYAML{}
kc.Clusters = []*kubeConfigCluster{}
err = yaml.Unmarshal(kubeConfigBytes, &kc)
if err != nil {
return nil, fmt.Errorf("parsing kubeconfig file: %v", err)
}
if len(kc.Clusters) < 1 || len(kc.Clusters[0].Name) == 0 {
return nil, fmt.Errorf("invalid kubeconfig file: %v", kubeconfig)
}
return &types.Cluster{
Name: kc.Clusters[0].Name,
KubeconfigFile: kubeconfig,
ExistingManagement: true,
}, nil
}
| 45 |
eks-anywhere | aws | Go | package cluster
import "fmt"
type (
// APIObjectGenerator returns an implementor of the APIObject interface.
APIObjectGenerator func() APIObject
// ParsedProcessor fills the Config struct from the parsed API objects in ObjectLookup.
ParsedProcessor func(*Config, ObjectLookup)
// Validation performs a validation over the Config object.
Validation func(*Config) error
// Defaulter sets defaults in a Config object.
Defaulter func(*Config) error
)
// ConfigManagerEntry allows to declare the necessary configuration to parse
// from yaml, set defaults and validate a Cluster struct for one or more types.
// It is semantically equivalent to use the individual register methods and its
// only purpose is convenience.
type ConfigManagerEntry struct {
APIObjectMapping map[string]APIObjectGenerator
Processors []ParsedProcessor
Validations []Validation
Defaulters []Defaulter
}
// NewConfigManagerEntry builds a ConfigManagerEntry with empty configuration.
func NewConfigManagerEntry() *ConfigManagerEntry {
return &ConfigManagerEntry{
APIObjectMapping: map[string]APIObjectGenerator{},
}
}
// Merge combines the configuration declared in multiple ConfigManagerEntry.
func (c *ConfigManagerEntry) Merge(entries ...*ConfigManagerEntry) error {
for _, config := range entries {
for k, v := range config.APIObjectMapping {
if err := c.RegisterMapping(k, v); err != nil {
return err
}
}
c.RegisterProcessors(config.Processors...)
c.RegisterDefaulters(config.Defaulters...)
c.RegisterValidations(config.Validations...)
}
return nil
}
// RegisterMapping records the mapping between a kubernetes Kind and an API concrete type.
func (c *ConfigManagerEntry) RegisterMapping(kind string, generator APIObjectGenerator) error {
if _, ok := c.APIObjectMapping[kind]; ok {
return fmt.Errorf("mapping for api object %s already registered", kind)
}
c.APIObjectMapping[kind] = generator
return nil
}
// RegisterProcessors records setters to fill the Config struct from the parsed API objects.
func (c *ConfigManagerEntry) RegisterProcessors(processors ...ParsedProcessor) {
c.Processors = append(c.Processors, processors...)
}
// RegisterValidations records validations for a Config struct.
func (c *ConfigManagerEntry) RegisterValidations(validations ...Validation) {
c.Validations = append(c.Validations, validations...)
}
// RegisterDefaulters records defaults for a Config struct.
func (c *ConfigManagerEntry) RegisterDefaulters(defaulters ...Defaulter) {
c.Defaulters = append(c.Defaulters, defaulters...)
}
| 75 |
eks-anywhere | aws | Go | package cluster_test
import (
"testing"
. "github.com/onsi/gomega"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
)
func TestConfigManagerEntryMerge(t *testing.T) {
g := NewWithT(t)
kind1 := "kind1"
kind2 := "kind2"
kind3 := "kind3"
generator := func() cluster.APIObject { return &anywherev1.Cluster{} }
processor := func(*cluster.Config, cluster.ObjectLookup) {}
validator := func(*cluster.Config) error { return nil }
defaulter := func(*cluster.Config) error { return nil }
c := cluster.NewConfigManagerEntry()
c2 := cluster.NewConfigManagerEntry()
g.Expect(c2.RegisterMapping(kind1, generator)).To(Succeed())
g.Expect(c2.RegisterMapping(kind2, generator)).To(Succeed())
c2.RegisterProcessors(processor)
c2.RegisterDefaulters(defaulter)
c2.RegisterValidations(validator)
c3 := cluster.NewConfigManagerEntry()
g.Expect(c3.RegisterMapping(kind3, generator)).To(Succeed())
c2.RegisterProcessors(processor)
c3.RegisterDefaulters(defaulter)
c3.RegisterValidations(validator)
g.Expect(c.Merge(c2, c3)).To(Succeed())
g.Expect(len(c.APIObjectMapping)).To(Equal(3))
g.Expect(c.APIObjectMapping[kind1]).To(Not(BeNil()))
g.Expect(c.APIObjectMapping[kind2]).To(Not(BeNil()))
g.Expect(c.APIObjectMapping[kind3]).To(Not(BeNil()))
g.Expect(c.APIObjectMapping["kind4"]).To(BeNil())
g.Expect(len(c.Processors)).To(Equal(2))
g.Expect(len(c.Defaulters)).To(Equal(2))
g.Expect(len(c.Validations)).To(Equal(2))
}
func TestConfigManagerEntryRegisterMappingError(t *testing.T) {
g := NewWithT(t)
kind1 := "kind1"
generator := func() cluster.APIObject { return &anywherev1.Cluster{} }
c := cluster.NewConfigManagerEntry()
g.Expect(c.RegisterMapping(kind1, generator)).To(Succeed())
g.Expect(c.RegisterMapping(kind1, generator)).To(MatchError(ContainSubstring("mapping for api object kind1 already registered")))
}
| 58 |
eks-anywhere | aws | Go | package cluster
import eksav1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
func BuildMapForWorkerNodeGroupsByName(workerNodeGroups []eksav1alpha1.WorkerNodeGroupConfiguration) map[string]eksav1alpha1.WorkerNodeGroupConfiguration {
workerNodeGroupConfigs := make(map[string]eksav1alpha1.WorkerNodeGroupConfiguration, len(workerNodeGroups))
for _, config := range workerNodeGroups {
workerNodeGroupConfigs[config.Name] = config
}
return workerNodeGroupConfigs
}
func NodeGroupsToDelete(currentSpec, newSpec *Spec) []eksav1alpha1.WorkerNodeGroupConfiguration {
workerConfigs := BuildMapForWorkerNodeGroupsByName(newSpec.Cluster.Spec.WorkerNodeGroupConfigurations)
nodeGroupsToDelete := make([]eksav1alpha1.WorkerNodeGroupConfiguration, 0, len(currentSpec.Cluster.Spec.WorkerNodeGroupConfigurations))
for _, prevWorkerNodeGroupConfig := range currentSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
// Current spec doesn't have the default name since we never set the defaults at the api server level
if prevWorkerNodeGroupConfig.Name == "" {
prevWorkerNodeGroupConfig.Name = "md-0"
}
if _, ok := workerConfigs[prevWorkerNodeGroupConfig.Name]; !ok {
nodeGroupsToDelete = append(nodeGroupsToDelete, prevWorkerNodeGroupConfig)
}
}
return nodeGroupsToDelete
}
| 27 |
eks-anywhere | aws | Go | package cluster_test
import (
"reflect"
"testing"
"github.com/aws/eks-anywhere/internal/test"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
)
func TestNodeGroupsToDelete(t *testing.T) {
tests := []struct {
name string
new, current *cluster.Spec
want []anywherev1.WorkerNodeGroupConfiguration
}{
{
name: "one worker node group, missing name, no changes",
current: test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.WorkerNodeGroupConfigurations = []anywherev1.WorkerNodeGroupConfiguration{
{
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.VSphereMachineConfigKind,
Name: "machine-config-1",
},
},
}
}),
new: test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.WorkerNodeGroupConfigurations = []anywherev1.WorkerNodeGroupConfiguration{
{
Name: "md-0",
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.VSphereMachineConfigKind,
Name: "machine-config-1",
},
},
}
}),
want: []anywherev1.WorkerNodeGroupConfiguration{},
},
{
name: "one worker node group, missing name, new name is not default",
current: test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.WorkerNodeGroupConfigurations = []anywherev1.WorkerNodeGroupConfiguration{
{
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.VSphereMachineConfigKind,
Name: "machine-config-1",
},
},
}
}),
new: test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.WorkerNodeGroupConfigurations = []anywherev1.WorkerNodeGroupConfiguration{
{
Name: "worker-node-group-0",
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.VSphereMachineConfigKind,
Name: "machine-config-1",
},
},
}
}),
want: []anywherev1.WorkerNodeGroupConfiguration{
{
Name: "md-0",
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.VSphereMachineConfigKind,
Name: "machine-config-1",
},
},
},
},
{
name: "new added, some removed, some stay",
current: test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.WorkerNodeGroupConfigurations = []anywherev1.WorkerNodeGroupConfiguration{
{
Name: "worker-node-group-0",
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.VSphereMachineConfigKind,
Name: "machine-config-1",
},
},
{
Name: "worker-node-group-1",
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.VSphereMachineConfigKind,
Name: "machine-config-1",
},
},
}
}),
new: test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.WorkerNodeGroupConfigurations = []anywherev1.WorkerNodeGroupConfiguration{
{
Name: "worker-node-group-0",
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.VSphereMachineConfigKind,
Name: "machine-config-1",
},
},
{
Name: "worker-node-group-2",
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.VSphereMachineConfigKind,
Name: "machine-config-1",
},
},
}
}),
want: []anywherev1.WorkerNodeGroupConfiguration{
{
Name: "worker-node-group-1",
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.VSphereMachineConfigKind,
Name: "machine-config-1",
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := cluster.NodeGroupsToDelete(tt.current, tt.new); !reflect.DeepEqual(got, tt.want) {
t.Errorf("NodeGroupsToDelete() = %v, want %v", got, tt.want)
}
})
}
}
| 133 |
eks-anywhere | aws | Go | package cluster
import (
"context"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func nutanixEntry() *ConfigManagerEntry {
return &ConfigManagerEntry{
APIObjectMapping: map[string]APIObjectGenerator{
anywherev1.NutanixDatacenterKind: func() APIObject {
return &anywherev1.NutanixDatacenterConfig{}
},
anywherev1.NutanixMachineConfigKind: func() APIObject {
return &anywherev1.NutanixMachineConfig{}
},
},
Processors: []ParsedProcessor{
processNutanixDatacenter,
machineConfigsProcessor(processNutanixMachineConfig),
},
Validations: []Validation{
func(c *Config) error {
if c.NutanixDatacenter != nil {
return c.NutanixDatacenter.Validate()
}
return nil
},
func(c *Config) error {
if c.NutanixDatacenter != nil {
if err := validateSameNamespace(c, c.NutanixDatacenter); err != nil {
return err
}
}
return nil
},
},
}
}
func processNutanixDatacenter(c *Config, objects ObjectLookup) {
if c.Cluster.Spec.DatacenterRef.Kind == anywherev1.NutanixDatacenterKind {
datacenter := objects.GetFromRef(c.Cluster.APIVersion, c.Cluster.Spec.DatacenterRef)
if datacenter != nil {
c.NutanixDatacenter = datacenter.(*anywherev1.NutanixDatacenterConfig)
}
}
}
func processNutanixMachineConfig(c *Config, objects ObjectLookup, machineRef *anywherev1.Ref) {
if machineRef == nil {
return
}
if machineRef.Kind != anywherev1.NutanixMachineConfigKind {
return
}
if c.NutanixMachineConfigs == nil {
c.NutanixMachineConfigs = map[string]*anywherev1.NutanixMachineConfig{}
}
m := objects.GetFromRef(c.Cluster.APIVersion, *machineRef)
if m == nil {
return
}
c.NutanixMachineConfigs[m.GetName()] = m.(*anywherev1.NutanixMachineConfig)
}
func getNutanixDatacenter(ctx context.Context, client Client, c *Config) error {
if c.Cluster.Spec.DatacenterRef.Kind != anywherev1.NutanixDatacenterKind {
return nil
}
datacenter := &anywherev1.NutanixDatacenterConfig{}
if err := client.Get(ctx, c.Cluster.Spec.DatacenterRef.Name, c.Cluster.Namespace, datacenter); err != nil {
return err
}
c.NutanixDatacenter = datacenter
return nil
}
func getNutanixMachineConfigs(ctx context.Context, client Client, c *Config) error {
if c.Cluster.Spec.DatacenterRef.Kind != anywherev1.NutanixDatacenterKind {
return nil
}
if c.NutanixMachineConfigs == nil {
c.NutanixMachineConfigs = map[string]*anywherev1.NutanixMachineConfig{}
}
for _, machineConfigRef := range c.Cluster.MachineConfigRefs() {
if machineConfigRef.Kind != anywherev1.NutanixMachineConfigKind {
continue
}
machineConfig := &anywherev1.NutanixMachineConfig{}
if err := client.Get(ctx, machineConfigRef.Name, c.Cluster.Namespace, machineConfig); err != nil {
return err
}
c.NutanixMachineConfigs[machineConfig.GetName()] = machineConfig
}
return nil
}
| 110 |
eks-anywhere | aws | Go | package cluster
import (
"context"
_ "embed"
"fmt"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster/mocks"
)
//go:embed testdata/nutanix/eksa-cluster.yaml
var nutanixClusterConfigSpec string
//go:embed testdata/nutanix/datacenterConfig.yaml
var nutanixDatacenterConfigSpec string
//go:embed testdata/nutanix/machineConfig.yaml
var nutanixMachineConfigSpec string
func TestValidateNutanixEntry(t *testing.T) {
clusterConf := &anywherev1.Cluster{}
err := yaml.Unmarshal([]byte(nutanixClusterConfigSpec), clusterConf)
require.NoError(t, err)
dcConf := &anywherev1.NutanixDatacenterConfig{}
err = yaml.Unmarshal([]byte(nutanixDatacenterConfigSpec), dcConf)
require.NoError(t, err)
machineConf := &anywherev1.NutanixMachineConfig{}
err = yaml.Unmarshal([]byte(nutanixMachineConfigSpec), machineConf)
require.NoError(t, err)
config := &Config{
Cluster: clusterConf,
NutanixDatacenter: dcConf,
NutanixMachineConfigs: map[string]*anywherev1.NutanixMachineConfig{
"eksa-unit-test": machineConf,
},
}
assert.Equal(t, config.NutanixMachineConfig("eksa-unit-test"), machineConf)
cm, err := NewDefaultConfigManager()
assert.NoError(t, err)
c, err := cm.Parse([]byte(nutanixClusterConfigSpec))
assert.NoError(t, err)
fmt.Println(c)
err = cm.Validate(config)
assert.NoError(t, err)
}
func TestNutanixConfigClientBuilder(t *testing.T) {
clusterConf := &anywherev1.Cluster{}
err := yaml.Unmarshal([]byte(nutanixClusterConfigSpec), clusterConf)
require.NoError(t, err)
expectedDCConf := &anywherev1.NutanixDatacenterConfig{}
err = yaml.Unmarshal([]byte(nutanixDatacenterConfigSpec), expectedDCConf)
require.NoError(t, err)
expectedMachineConf := &anywherev1.NutanixMachineConfig{}
err = yaml.Unmarshal([]byte(nutanixMachineConfigSpec), expectedMachineConf)
require.NoError(t, err)
ctrl := gomock.NewController(t)
m := mocks.NewMockClient(ctrl)
m.EXPECT().Get(gomock.Any(), clusterConf.Spec.DatacenterRef.Name, gomock.Any(), &anywherev1.NutanixDatacenterConfig{}).
DoAndReturn(func(ctx context.Context, name, namespace string, obj client.Object) error {
expectedDCConf.DeepCopyInto(obj.(*anywherev1.NutanixDatacenterConfig))
return nil
})
m.EXPECT().Get(gomock.Any(), clusterConf.Spec.ControlPlaneConfiguration.MachineGroupRef.Name, gomock.Any(), &anywherev1.NutanixMachineConfig{}).
DoAndReturn(func(ctx context.Context, name, namespace string, obj client.Object) error {
expectedMachineConf.DeepCopyInto(obj.(*anywherev1.NutanixMachineConfig))
return nil
})
ccb := NewConfigClientBuilder().Register(
getNutanixDatacenter,
getNutanixMachineConfigs,
)
conf, err := ccb.Build(context.TODO(), m, clusterConf)
assert.NoError(t, err)
assert.NotNil(t, conf)
assert.Equal(t, expectedDCConf, conf.NutanixDatacenter)
assert.Equal(t, expectedMachineConf, conf.NutanixMachineConfig(clusterConf.Spec.ControlPlaneConfiguration.MachineGroupRef.Name))
}
| 100 |
eks-anywhere | aws | Go | package cluster
import (
"context"
apierrors "k8s.io/apimachinery/pkg/api/errors"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func oidcEntry() *ConfigManagerEntry {
return &ConfigManagerEntry{
APIObjectMapping: map[string]APIObjectGenerator{
anywherev1.OIDCConfigKind: func() APIObject {
return &anywherev1.OIDCConfig{}
},
},
Processors: []ParsedProcessor{processOIDC},
Validations: []Validation{
func(c *Config) error {
for _, o := range c.OIDCConfigs {
if errs := o.Validate(); len(errs) != 0 {
return apierrors.NewInvalid(anywherev1.GroupVersion.WithKind(anywherev1.OIDCConfigKind).GroupKind(), o.Name, errs)
}
}
return nil
},
func(c *Config) error {
for _, o := range c.OIDCConfigs {
if err := validateSameNamespace(c, o); err != nil {
return err
}
}
return nil
},
},
}
}
func processOIDC(c *Config, objects ObjectLookup) {
if c.OIDCConfigs == nil {
c.OIDCConfigs = map[string]*anywherev1.OIDCConfig{}
}
for _, idr := range c.Cluster.Spec.IdentityProviderRefs {
idp := objects.GetFromRef(c.Cluster.APIVersion, idr)
if idp == nil {
return
}
if idr.Kind == anywherev1.OIDCConfigKind {
c.OIDCConfigs[idp.GetName()] = idp.(*anywherev1.OIDCConfig)
}
}
}
func getOIDC(ctx context.Context, client Client, c *Config) error {
if c.OIDCConfigs == nil {
c.OIDCConfigs = map[string]*anywherev1.OIDCConfig{}
}
for _, idr := range c.Cluster.Spec.IdentityProviderRefs {
if idr.Kind == anywherev1.OIDCConfigKind {
oidc := &anywherev1.OIDCConfig{}
if err := client.Get(ctx, idr.Name, c.Cluster.Namespace, oidc); err != nil {
return err
}
c.OIDCConfigs[oidc.Name] = oidc
}
}
return nil
}
| 73 |
eks-anywhere | aws | Go | package cluster_test
import (
"context"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/cluster/mocks"
)
func TestDefaultConfigClientBuilderOIDC(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
b := cluster.NewDefaultConfigClientBuilder()
ctrl := gomock.NewController(t)
client := mocks.NewMockClient(ctrl)
cluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "default",
},
Spec: anywherev1.ClusterSpec{
IdentityProviderRefs: []anywherev1.Ref{
{
Kind: anywherev1.OIDCConfigKind,
Name: "my-oidc",
},
},
},
}
oidcConfig := &anywherev1.OIDCConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "my-oidc",
Namespace: "default",
},
}
client.EXPECT().Get(ctx, "my-oidc", "default", &anywherev1.OIDCConfig{}).Return(nil).DoAndReturn(
func(ctx context.Context, name, namespace string, obj runtime.Object) error {
c := obj.(*anywherev1.OIDCConfig)
c.ObjectMeta = oidcConfig.ObjectMeta
return nil
},
)
config, err := b.Build(ctx, client, cluster)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(config).NotTo(BeNil())
g.Expect(config.Cluster).To(Equal(cluster))
g.Expect(len(config.OIDCConfigs)).To(Equal(1))
g.Expect(config.OIDCConfigs["my-oidc"]).To(Equal(oidcConfig))
}
func TestConfigManagerValidateOIDCConfigSuccess(t *testing.T) {
g := NewWithT(t)
c := clusterConfigFromFile(t, "testdata/docker_cluster_oidc_awsiam_flux.yaml")
m, err := cluster.NewDefaultConfigManager()
g.Expect(err).To(BeNil())
err = m.Validate(c)
g.Expect(err).To(Succeed())
}
func TestConfigManagerValidateOIDCConfigMultipleErrors(t *testing.T) {
g := NewWithT(t)
c := clusterConfigFromFile(t, "testdata/docker_cluster_oidc_awsiam_flux.yaml")
c.OIDCConfigs["eksa-unit-test"] = &anywherev1.OIDCConfig{
Spec: anywherev1.OIDCConfigSpec{
ClientId: "",
},
}
m, err := cluster.NewDefaultConfigManager()
g.Expect(err).To(BeNil())
err = m.Validate(c)
g.Expect(err).To(MatchError(ContainSubstring("clientId is required")))
}
| 84 |
eks-anywhere | aws | Go | package cluster
import (
"fmt"
"os"
)
// ParseConfig reads yaml file with at least one Cluster object and generates the corresponding Config
// using the default package config manager.
func ParseConfigFromFile(path string) (*Config, error) {
content, err := os.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("reading cluster config file: %v", err)
}
return ParseConfig(content)
}
// ParseConfig reads yaml manifest with at least one Cluster object and generates the corresponding Config
// using the default package config manager.
func ParseConfig(yamlManifest []byte) (*Config, error) {
return manager().Parse(yamlManifest)
}
| 24 |
eks-anywhere | aws | Go | package cluster_test
import (
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/internal/test"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
func TestParseConfig(t *testing.T) {
tests := []struct {
name string
yamlManifest []byte
wantCluster *anywherev1.Cluster
wantCloudStackDatacenter *anywherev1.CloudStackDatacenterConfig
wantVsphereDatacenter *anywherev1.VSphereDatacenterConfig
wantDockerDatacenter *anywherev1.DockerDatacenterConfig
wantVsphereMachineConfigs []*anywherev1.VSphereMachineConfig
wantCloudStackMachineConfigs []*anywherev1.CloudStackMachineConfig
wantOIDCConfigs []*anywherev1.OIDCConfig
wantAWSIamConfigs []*anywherev1.AWSIamConfig
wantGitOpsConfig *anywherev1.GitOpsConfig
wantFluxConfig *anywherev1.FluxConfig
}{
{
name: "vsphere cluster",
yamlManifest: []byte(test.ReadFile(t, "testdata/cluster_1_19.yaml")),
wantCluster: &anywherev1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: anywherev1.ClusterSpec{
KubernetesVersion: "1.19",
ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{
Count: 1,
Endpoint: &anywherev1.Endpoint{Host: "myHostIp"},
MachineGroupRef: &anywherev1.Ref{
Kind: "VSphereMachineConfig",
Name: "eksa-unit-test-cp",
},
},
WorkerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{
{
Name: "workers-1",
Count: ptr.Int(1),
MachineGroupRef: &anywherev1.Ref{
Kind: "VSphereMachineConfig",
Name: "eksa-unit-test",
},
},
},
DatacenterRef: anywherev1.Ref{
Kind: "VSphereDatacenterConfig",
Name: "eksa-unit-test",
},
ClusterNetwork: anywherev1.ClusterNetwork{
Pods: anywherev1.Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: anywherev1.Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
CNI: "cilium",
},
},
},
wantVsphereDatacenter: &anywherev1.VSphereDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.VSphereDatacenterKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: anywherev1.VSphereDatacenterConfigSpec{
Datacenter: "myDatacenter",
Network: "/myDatacenter/network-1",
Server: "myServer",
Thumbprint: "myTlsThumbprint",
Insecure: false,
},
},
wantVsphereMachineConfigs: []*anywherev1.VSphereMachineConfig{
{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.VSphereMachineConfigKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test-cp",
},
Spec: anywherev1.VSphereMachineConfigSpec{
Datastore: "myDatastore",
DiskGiB: 25,
MemoryMiB: 8192,
NumCPUs: 2,
OSFamily: anywherev1.Ubuntu,
ResourcePool: "myResourcePool",
Users: []anywherev1.UserConfiguration{{
Name: "mySshUsername",
SshAuthorizedKeys: []string{"mySshAuthorizedKey"},
}},
},
},
{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.VSphereMachineConfigKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: anywherev1.VSphereMachineConfigSpec{
Datastore: "myDatastore",
DiskGiB: 25,
MemoryMiB: 8192,
NumCPUs: 2,
OSFamily: anywherev1.Ubuntu,
ResourcePool: "myResourcePool",
Users: []anywherev1.UserConfiguration{{
Name: "mySshUsername",
SshAuthorizedKeys: []string{"mySshAuthorizedKey"},
}},
},
},
},
},
{
name: "cloudstack cluster",
yamlManifest: []byte(test.ReadFile(t, "testdata/cluster_1_20_cloudstack.yaml")),
wantCluster: &anywherev1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: anywherev1.ClusterSpec{
KubernetesVersion: "1.20",
ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{
Count: 3,
Endpoint: &anywherev1.Endpoint{Host: "test-ip"},
MachineGroupRef: &anywherev1.Ref{
Kind: "CloudStackMachineConfig",
Name: "eksa-unit-test",
},
},
WorkerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{
{
Count: ptr.Int(3),
MachineGroupRef: &anywherev1.Ref{
Kind: "CloudStackMachineConfig",
Name: "eksa-unit-test",
},
},
},
DatacenterRef: anywherev1.Ref{
Kind: "CloudStackDatacenterConfig",
Name: "eksa-unit-test",
},
ClusterNetwork: anywherev1.ClusterNetwork{
Pods: anywherev1.Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: anywherev1.Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
CNI: "cilium",
},
},
},
wantCloudStackDatacenter: &anywherev1.CloudStackDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.CloudStackDatacenterKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: anywherev1.CloudStackDatacenterConfigSpec{
Account: "admin",
Domain: "domain1",
Zones: []anywherev1.CloudStackZone{
{
Name: "zone1",
Network: anywherev1.CloudStackResourceIdentifier{
Name: "net1",
},
},
},
ManagementApiEndpoint: "https://127.0.0.1:8080/client/api",
},
},
wantCloudStackMachineConfigs: []*anywherev1.CloudStackMachineConfig{
{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.CloudStackMachineConfigKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: anywherev1.CloudStackMachineConfigSpec{
ComputeOffering: anywherev1.CloudStackResourceIdentifier{
Name: "m4-large",
},
Template: anywherev1.CloudStackResourceIdentifier{
Name: "centos7-k8s-120",
},
Users: []anywherev1.UserConfiguration{{
Name: "mySshUsername",
SshAuthorizedKeys: []string{"mySshAuthorizedKey"},
}},
Affinity: "pro",
UserCustomDetails: map[string]string{"foo": "bar"},
Symlinks: map[string]string{"/var/log/kubernetes": "/data/var/log/kubernetes"},
},
},
},
},
{
name: "docker cluster with oidc, awsiam and gitops",
yamlManifest: []byte(test.ReadFile(t, "testdata/docker_cluster_oidc_awsiam_gitops.yaml")),
wantCluster: &anywherev1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "m-docker",
},
Spec: anywherev1.ClusterSpec{
KubernetesVersion: "1.21",
ManagementCluster: anywherev1.ManagementCluster{
Name: "m-docker",
},
ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{
Count: 1,
},
WorkerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{
{
Name: "workers-1",
Count: ptr.Int(1),
},
},
DatacenterRef: anywherev1.Ref{
Kind: anywherev1.DockerDatacenterKind,
Name: "m-docker",
},
ClusterNetwork: anywherev1.ClusterNetwork{
Pods: anywherev1.Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: anywherev1.Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
CNI: "cilium",
},
IdentityProviderRefs: []anywherev1.Ref{
{
Kind: anywherev1.OIDCConfigKind,
Name: "eksa-unit-test",
},
{
Kind: anywherev1.AWSIamConfigKind,
Name: "eksa-unit-test",
},
},
GitOpsRef: &anywherev1.Ref{
Kind: anywherev1.GitOpsConfigKind,
Name: "eksa-unit-test",
},
},
},
wantDockerDatacenter: &anywherev1.DockerDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.DockerDatacenterKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "m-docker",
},
},
wantGitOpsConfig: &anywherev1.GitOpsConfig{
TypeMeta: metav1.TypeMeta{
Kind: "GitOpsConfig",
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: anywherev1.GitOpsConfigSpec{
Flux: anywherev1.Flux{
Github: anywherev1.Github{
Owner: "janedoe",
Repository: "flux-fleet",
},
},
},
},
wantFluxConfig: &anywherev1.FluxConfig{
TypeMeta: metav1.TypeMeta{
Kind: "FluxConfig",
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: anywherev1.FluxConfigSpec{
Github: &anywherev1.GithubProviderConfig{
Owner: "janedoe",
Repository: "flux-fleet",
},
},
},
wantOIDCConfigs: []*anywherev1.OIDCConfig{
{
TypeMeta: metav1.TypeMeta{
Kind: "OIDCConfig",
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: anywherev1.OIDCConfigSpec{
ClientId: "id12",
GroupsClaim: "claim1",
GroupsPrefix: "prefix-for-groups",
IssuerUrl: "https://mydomain.com/issuer",
RequiredClaims: []anywherev1.OIDCConfigRequiredClaim{
{
Claim: "sub",
Value: "test",
},
},
UsernameClaim: "username-claim",
UsernamePrefix: "username-prefix",
},
},
},
wantAWSIamConfigs: []*anywherev1.AWSIamConfig{
{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.AWSIamConfigKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: anywherev1.AWSIamConfigSpec{
AWSRegion: "test-region",
BackendMode: []string{"mode1", "mode2"},
MapRoles: []anywherev1.MapRoles{
{
RoleARN: "test-role-arn",
Username: "test",
Groups: []string{"group1", "group2"},
},
},
MapUsers: []anywherev1.MapUsers{
{
UserARN: "test-user-arn",
Username: "test",
Groups: []string{"group1", "group2"},
},
},
Partition: "aws",
},
},
},
},
{
name: "docker cluster with oidc, awsiam and flux",
yamlManifest: []byte(test.ReadFile(t, "testdata/docker_cluster_oidc_awsiam_flux.yaml")),
wantCluster: &anywherev1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "m-docker",
},
Spec: anywherev1.ClusterSpec{
KubernetesVersion: "1.21",
ManagementCluster: anywherev1.ManagementCluster{
Name: "m-docker",
},
ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{
Count: 1,
},
WorkerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{
{
Name: "workers-1",
Count: ptr.Int(1),
},
},
DatacenterRef: anywherev1.Ref{
Kind: anywherev1.DockerDatacenterKind,
Name: "m-docker",
},
ClusterNetwork: anywherev1.ClusterNetwork{
Pods: anywherev1.Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: anywherev1.Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
CNI: "cilium",
},
IdentityProviderRefs: []anywherev1.Ref{
{
Kind: anywherev1.OIDCConfigKind,
Name: "eksa-unit-test",
},
{
Kind: anywherev1.AWSIamConfigKind,
Name: "eksa-unit-test",
},
},
GitOpsRef: &anywherev1.Ref{
Kind: anywherev1.FluxConfigKind,
Name: "eksa-unit-test",
},
},
},
wantDockerDatacenter: &anywherev1.DockerDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.DockerDatacenterKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "m-docker",
},
},
wantFluxConfig: &anywherev1.FluxConfig{
TypeMeta: metav1.TypeMeta{
Kind: "FluxConfig",
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: anywherev1.FluxConfigSpec{
Github: &anywherev1.GithubProviderConfig{
Owner: "janedoe",
Repository: "flux-fleet",
},
},
},
wantOIDCConfigs: []*anywherev1.OIDCConfig{
{
TypeMeta: metav1.TypeMeta{
Kind: "OIDCConfig",
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: anywherev1.OIDCConfigSpec{
ClientId: "id12",
GroupsClaim: "claim1",
GroupsPrefix: "prefix-for-groups",
IssuerUrl: "https://mydomain.com/issuer",
RequiredClaims: []anywherev1.OIDCConfigRequiredClaim{
{
Claim: "sub",
Value: "test",
},
},
UsernameClaim: "username-claim",
UsernamePrefix: "username-prefix",
},
},
},
wantAWSIamConfigs: []*anywherev1.AWSIamConfig{
{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.AWSIamConfigKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: anywherev1.AWSIamConfigSpec{
AWSRegion: "test-region",
BackendMode: []string{"mode1", "mode2"},
MapRoles: []anywherev1.MapRoles{
{
RoleARN: "test-role-arn",
Username: "test",
Groups: []string{"group1", "group2"},
},
},
MapUsers: []anywherev1.MapUsers{
{
UserARN: "test-user-arn",
Username: "test",
Groups: []string{"group1", "group2"},
},
},
Partition: "aws",
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
got, err := cluster.ParseConfig(tt.yamlManifest)
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(got.Cluster).To(Equal(tt.wantCluster))
g.Expect(got.VSphereDatacenter).To(Equal(tt.wantVsphereDatacenter))
g.Expect(got.DockerDatacenter).To(Equal(tt.wantDockerDatacenter))
g.Expect(len(got.VSphereMachineConfigs)).To(Equal(len(tt.wantVsphereMachineConfigs)), "it should return the right number of VSphereMachineConfigs")
for _, m := range tt.wantVsphereMachineConfigs {
g.Expect(got.VsphereMachineConfig(m.Name)).To(Equal(m))
}
g.Expect(len(got.OIDCConfigs)).To(Equal(len(tt.wantOIDCConfigs)), "it should return the right number of OIDCConfigs")
for _, o := range tt.wantOIDCConfigs {
g.Expect(got.OIDCConfig(o.Name)).To(Equal(o))
}
g.Expect(len(got.AWSIAMConfigs)).To(Equal(len(tt.wantAWSIamConfigs)), "it should return the right number of AWSIAMConfigs")
for _, a := range tt.wantAWSIamConfigs {
g.Expect(got.AWSIamConfig(a.Name)).To(Equal(a))
}
g.Expect(got.GitOpsConfig).To(Equal(tt.wantGitOpsConfig))
g.Expect(got.FluxConfig).To(Equal(tt.wantFluxConfig))
})
}
}
func TestParseConfigForSnow(t *testing.T) {
tests := []struct {
name string
yamlManifest []byte
wantCluster *anywherev1.Cluster
wantSnowDatacenter *anywherev1.SnowDatacenterConfig
wantSnowMachineConfigs []*anywherev1.SnowMachineConfig
wantSnowIPPools []*anywherev1.SnowIPPool
}{
{
name: "snow cluster basic",
yamlManifest: []byte(test.ReadFile(t, "testdata/cluster_snow_1_21.yaml")),
wantCluster: &anywherev1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: anywherev1.ClusterSpec{
KubernetesVersion: "1.21",
ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{
Count: 1,
Endpoint: &anywherev1.Endpoint{Host: "myHostIp"},
MachineGroupRef: &anywherev1.Ref{
Kind: "SnowMachineConfig",
Name: "eksa-unit-test-cp",
},
},
WorkerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{
{
Name: "workers-1",
Count: ptr.Int(1),
MachineGroupRef: &anywherev1.Ref{
Kind: "SnowMachineConfig",
Name: "eksa-unit-test",
},
},
},
DatacenterRef: anywherev1.Ref{
Kind: "SnowDatacenterConfig",
Name: "eksa-unit-test",
},
ClusterNetwork: anywherev1.ClusterNetwork{
Pods: anywherev1.Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: anywherev1.Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
CNI: "cilium",
},
},
},
wantSnowDatacenter: &anywherev1.SnowDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.SnowDatacenterKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: anywherev1.SnowDatacenterConfigSpec{},
},
wantSnowMachineConfigs: []*anywherev1.SnowMachineConfig{
{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.SnowMachineConfigKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test-cp",
},
Spec: anywherev1.SnowMachineConfigSpec{
AMIID: "eks-d-v1-21-ami",
InstanceType: "sbe-c.large",
SshKeyName: "default",
},
},
{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.SnowMachineConfigKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: anywherev1.SnowMachineConfigSpec{
AMIID: "eks-d-v1-21-ami",
InstanceType: "sbe-c.xlarge",
SshKeyName: "default",
},
},
},
},
{
name: "snow cluster with ip pool",
yamlManifest: []byte(test.ReadFile(t, "testdata/cluster_snow_with_ip_pool.yaml")),
wantCluster: &anywherev1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: anywherev1.ClusterSpec{
KubernetesVersion: "1.21",
ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{
Count: 1,
Endpoint: &anywherev1.Endpoint{Host: "myHostIp"},
MachineGroupRef: &anywherev1.Ref{
Kind: "SnowMachineConfig",
Name: "eksa-unit-test-cp",
},
},
WorkerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{
{
Name: "workers-1",
Count: ptr.Int(1),
MachineGroupRef: &anywherev1.Ref{
Kind: "SnowMachineConfig",
Name: "eksa-unit-test-worker-1",
},
},
{
Name: "workers-2",
Count: ptr.Int(1),
MachineGroupRef: &anywherev1.Ref{
Kind: "SnowMachineConfig",
Name: "eksa-unit-test-worker-2",
},
},
},
DatacenterRef: anywherev1.Ref{
Kind: "SnowDatacenterConfig",
Name: "eksa-unit-test",
},
ClusterNetwork: anywherev1.ClusterNetwork{
Pods: anywherev1.Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: anywherev1.Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
CNI: "cilium",
},
},
},
wantSnowDatacenter: &anywherev1.SnowDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.SnowDatacenterKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: anywherev1.SnowDatacenterConfigSpec{},
},
wantSnowMachineConfigs: []*anywherev1.SnowMachineConfig{
{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.SnowMachineConfigKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test-cp",
},
Spec: anywherev1.SnowMachineConfigSpec{
AMIID: "eks-d-v1-21-ami",
InstanceType: "sbe-c.large",
SshKeyName: "default",
Network: anywherev1.SnowNetwork{
DirectNetworkInterfaces: []anywherev1.SnowDirectNetworkInterface{
{
Index: 1,
IPPoolRef: &anywherev1.Ref{
Kind: anywherev1.SnowIPPoolKind,
Name: "ip-pool-1",
},
Primary: true,
},
{
Index: 2,
IPPoolRef: &anywherev1.Ref{
Kind: anywherev1.SnowIPPoolKind,
Name: "ip-pool-2",
},
Primary: false,
},
},
},
},
},
{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.SnowMachineConfigKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test-worker-1",
},
Spec: anywherev1.SnowMachineConfigSpec{
AMIID: "eks-d-v1-21-ami",
InstanceType: "sbe-c.xlarge",
SshKeyName: "default",
Network: anywherev1.SnowNetwork{
DirectNetworkInterfaces: []anywherev1.SnowDirectNetworkInterface{
{
Index: 1,
IPPoolRef: &anywherev1.Ref{
Kind: anywherev1.SnowIPPoolKind,
Name: "ip-pool-1",
},
Primary: true,
},
},
},
},
},
{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.SnowMachineConfigKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test-worker-2",
},
Spec: anywherev1.SnowMachineConfigSpec{
AMIID: "eks-d-v1-21-ami",
InstanceType: "sbe-c.xlarge",
SshKeyName: "default",
Network: anywherev1.SnowNetwork{
DirectNetworkInterfaces: []anywherev1.SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
},
},
},
},
wantSnowIPPools: []*anywherev1.SnowIPPool{
{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.SnowIPPoolKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "ip-pool-1",
},
Spec: anywherev1.SnowIPPoolSpec{
Pools: []anywherev1.IPPool{
{
IPStart: "start-1",
IPEnd: "end-1",
Subnet: "subnet-1",
Gateway: "gateway-1",
},
},
},
},
{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.SnowIPPoolKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "ip-pool-2",
},
Spec: anywherev1.SnowIPPoolSpec{
Pools: []anywherev1.IPPool{
{
IPStart: "start-2",
IPEnd: "end-2",
Subnet: "subnet-2",
Gateway: "gateway-2",
},
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
got, err := cluster.ParseConfig(tt.yamlManifest)
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(got.Cluster).To(Equal(tt.wantCluster))
g.Expect(got.SnowDatacenter).To(Equal(tt.wantSnowDatacenter))
g.Expect(len(got.SnowMachineConfigs)).To(Equal(len(tt.wantSnowMachineConfigs)), "it should return the right number of SnowMachineConfigs")
for _, m := range tt.wantSnowMachineConfigs {
g.Expect(got.SnowMachineConfig(m.Name)).To(Equal(m))
}
for _, p := range tt.wantSnowIPPools {
g.Expect(got.SnowIPPool(p.Name)).To(Equal(p))
}
})
}
}
| 851 |
eks-anywhere | aws | Go | package cluster
import (
"context"
"errors"
"fmt"
corev1 "k8s.io/api/core/v1"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func snowEntry() *ConfigManagerEntry {
return &ConfigManagerEntry{
APIObjectMapping: map[string]APIObjectGenerator{
anywherev1.SnowDatacenterKind: func() APIObject {
return &anywherev1.SnowDatacenterConfig{}
},
anywherev1.SnowMachineConfigKind: func() APIObject {
return &anywherev1.SnowMachineConfig{}
},
anywherev1.SnowIPPoolKind: func() APIObject {
return &anywherev1.SnowIPPool{}
},
},
Processors: []ParsedProcessor{
processSnowDatacenter,
machineConfigsProcessor(processSnowMachineConfig),
snowIPPoolsProcessor,
},
Defaulters: []Defaulter{
func(c *Config) error {
if c.SnowDatacenter != nil {
SetSnowDatacenterIndentityRefDefault(c.SnowDatacenter)
}
return nil
},
func(c *Config) error {
for _, m := range c.SnowMachineConfigs {
m.SetDefaults()
}
return nil
},
SetSnowMachineConfigsAnnotations,
},
Validations: []Validation{
func(c *Config) error {
if c.SnowDatacenter != nil {
return c.SnowDatacenter.Validate()
}
return nil
},
func(c *Config) error {
for _, m := range c.SnowMachineConfigs {
if err := m.Validate(); err != nil {
return err
}
}
return nil
},
func(c *Config) error {
for _, p := range c.SnowIPPools {
if err := p.Validate(); err != nil {
return err
}
}
return nil
},
func(c *Config) error {
if c.SnowDatacenter != nil {
if err := validateSameNamespace(c, c.SnowDatacenter); err != nil {
return err
}
}
return nil
},
func(c *Config) error {
for _, v := range c.SnowMachineConfigs {
if err := validateSameNamespace(c, v); err != nil {
return err
}
}
return nil
},
func(c *Config) error {
return ValidateSnowMachineRefExists(c)
},
func(c *Config) error {
return validateSnowUnstackedEtcd(c)
},
},
}
}
func processSnowDatacenter(c *Config, objects ObjectLookup) {
if c.Cluster.Spec.DatacenterRef.Kind == anywherev1.SnowDatacenterKind {
datacenter := objects.GetFromRef(c.Cluster.APIVersion, c.Cluster.Spec.DatacenterRef)
if datacenter != nil {
c.SnowDatacenter = datacenter.(*anywherev1.SnowDatacenterConfig)
}
}
}
func snowIPPoolsProcessor(c *Config, o ObjectLookup) {
for _, m := range c.SnowMachineConfigs {
for _, pool := range m.IPPoolRefs() {
processSnowIPPool(c, o, &pool)
}
}
}
func processSnowIPPool(c *Config, objects ObjectLookup, ipPoolRef *anywherev1.Ref) {
if ipPoolRef == nil {
return
}
if ipPoolRef.Kind != anywherev1.SnowIPPoolKind {
return
}
if c.SnowIPPools == nil {
c.SnowIPPools = map[string]*anywherev1.SnowIPPool{}
}
p := objects.GetFromRef(c.Cluster.APIVersion, *ipPoolRef)
if p == nil {
return
}
c.SnowIPPools[p.GetName()] = p.(*anywherev1.SnowIPPool)
}
func processSnowMachineConfig(c *Config, objects ObjectLookup, machineRef *anywherev1.Ref) {
if machineRef == nil {
return
}
if machineRef.Kind != anywherev1.SnowMachineConfigKind {
return
}
if c.SnowMachineConfigs == nil {
c.SnowMachineConfigs = map[string]*anywherev1.SnowMachineConfig{}
}
m := objects.GetFromRef(c.Cluster.APIVersion, *machineRef)
if m == nil {
return
}
c.SnowMachineConfigs[m.GetName()] = m.(*anywherev1.SnowMachineConfig)
}
func SetSnowMachineConfigsAnnotations(c *Config) error {
if c.SnowMachineConfigs == nil {
return nil
}
c.SnowMachineConfigs[c.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name].SetControlPlaneAnnotation()
if c.Cluster.Spec.ExternalEtcdConfiguration != nil {
c.SnowMachineConfigs[c.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name].SetEtcdAnnotation()
}
if c.Cluster.IsManaged() {
for _, mc := range c.SnowMachineConfigs {
mc.SetManagedBy(c.Cluster.ManagedBy())
}
}
return nil
}
func getSnowDatacenter(ctx context.Context, client Client, c *Config) error {
if c.Cluster.Spec.DatacenterRef.Kind != anywherev1.SnowDatacenterKind {
return nil
}
datacenter := &anywherev1.SnowDatacenterConfig{}
if err := client.Get(ctx, c.Cluster.Spec.DatacenterRef.Name, c.Cluster.Namespace, datacenter); err != nil {
return err
}
c.SnowDatacenter = datacenter
return nil
}
func getSnowMachineConfigsAndIPPools(ctx context.Context, client Client, c *Config) error {
if c.Cluster.Spec.DatacenterRef.Kind != anywherev1.SnowDatacenterKind {
return nil
}
if c.SnowMachineConfigs == nil {
c.SnowMachineConfigs = map[string]*anywherev1.SnowMachineConfig{}
}
for _, machineRef := range c.Cluster.MachineConfigRefs() {
if machineRef.Kind != anywherev1.SnowMachineConfigKind {
continue
}
machine := &anywherev1.SnowMachineConfig{}
if err := client.Get(ctx, machineRef.Name, c.Cluster.Namespace, machine); err != nil {
return err
}
c.SnowMachineConfigs[machine.Name] = machine
if err := getSnowIPPools(ctx, client, c, machine); err != nil {
return err
}
}
return nil
}
func getSnowIPPools(ctx context.Context, client Client, c *Config, machine *anywherev1.SnowMachineConfig) error {
if c.SnowIPPools == nil {
c.SnowIPPools = map[string]*anywherev1.SnowIPPool{}
}
for _, dni := range machine.Spec.Network.DirectNetworkInterfaces {
if dni.IPPoolRef == nil {
continue
}
if _, ok := c.SnowIPPools[dni.IPPoolRef.Name]; ok {
continue
}
pool := &anywherev1.SnowIPPool{}
if err := client.Get(ctx, dni.IPPoolRef.Name, c.Cluster.Namespace, pool); err != nil {
return err
}
c.SnowIPPools[pool.Name] = pool
}
return nil
}
func getSnowIdentitySecret(ctx context.Context, client Client, c *Config) error {
if c.Cluster.Spec.DatacenterRef.Kind != anywherev1.SnowDatacenterKind {
return nil
}
if c.SnowDatacenter == nil {
return errors.New("snow datacenter has to be processed before snow identityRef credentials secret")
}
secret := &corev1.Secret{}
if err := client.Get(ctx, c.SnowDatacenter.Spec.IdentityRef.Name, c.Cluster.Namespace, secret); err != nil {
return err
}
c.SnowCredentialsSecret = secret
return nil
}
// SetSnowDatacenterIndentityRefDefault sets a default secret as the identity reference
// The secret will need to be created by the CLI flow as it's not provided by the user
// This only runs in CLI. snowDatacenterConfig.SetDefaults() will run in both CLI and webhook.
func SetSnowDatacenterIndentityRefDefault(s *anywherev1.SnowDatacenterConfig) {
if len(s.Spec.IdentityRef.Kind) == 0 && len(s.Spec.IdentityRef.Name) == 0 {
s.Spec.IdentityRef = anywherev1.Ref{
Kind: anywherev1.SnowIdentityKind,
Name: fmt.Sprintf("%s-snow-credentials", s.GetName()),
}
}
}
// ValidateSnowMachineRefExists checks the cluster spec machine refs and makes sure
// the snowmachineconfig object exists for each ref with kind == snowmachineconfig.
func ValidateSnowMachineRefExists(c *Config) error {
for _, machineRef := range c.Cluster.MachineConfigRefs() {
if machineRef.Kind == anywherev1.SnowMachineConfigKind && c.SnowMachineConfig(machineRef.Name) == nil {
return fmt.Errorf("unable to find SnowMachineConfig %s", machineRef.Name)
}
}
return nil
}
func validateSnowUnstackedEtcd(c *Config) error {
if c.Cluster.Spec.DatacenterRef.Kind != anywherev1.SnowDatacenterKind {
return nil
}
if c.Cluster.Spec.ExternalEtcdConfiguration == nil || c.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef == nil {
return nil
}
mc := c.SnowMachineConfig(c.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name)
for _, dni := range mc.Spec.Network.DirectNetworkInterfaces {
if dni.DHCP {
return errors.New("creating unstacked etcd machine with DHCP is not supported for snow. Please use static IP for DNI configuration")
}
if dni.IPPoolRef == nil {
return errors.New("snow machine config ip pool must be specified when using static IP")
}
}
return nil
}
| 304 |
eks-anywhere | aws | Go | package cluster_test
import (
"context"
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/cluster/mocks"
)
func TestSetSnowMachineConfigsAnnotations(t *testing.T) {
tests := []struct {
name string
config *cluster.Config
wantSnowMachineConfigs map[string]*anywherev1.SnowMachineConfig
}{
{
name: "workload cluster with external etcd",
config: &cluster.Config{
Cluster: &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
},
Spec: anywherev1.ClusterSpec{
ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{
MachineGroupRef: &anywherev1.Ref{
Name: "cp-machine",
},
},
ExternalEtcdConfiguration: &anywherev1.ExternalEtcdConfiguration{
MachineGroupRef: &anywherev1.Ref{
Name: "etcd-machine",
},
},
ManagementCluster: anywherev1.ManagementCluster{
Name: "mgmt-cluster",
},
},
},
SnowMachineConfigs: map[string]*anywherev1.SnowMachineConfig{
"cp-machine": {
ObjectMeta: metav1.ObjectMeta{
Name: "cp-machine",
},
},
"etcd-machine": {
ObjectMeta: metav1.ObjectMeta{
Name: "etcd-machine",
},
},
},
},
wantSnowMachineConfigs: map[string]*anywherev1.SnowMachineConfig{
"cp-machine": {
ObjectMeta: metav1.ObjectMeta{
Name: "cp-machine",
Annotations: map[string]string{
"anywhere.eks.amazonaws.com/control-plane": "true",
"anywhere.eks.amazonaws.com/managed-by": "mgmt-cluster",
},
},
},
"etcd-machine": {
ObjectMeta: metav1.ObjectMeta{
Name: "etcd-machine",
Annotations: map[string]string{
"anywhere.eks.amazonaws.com/etcd": "true",
"anywhere.eks.amazonaws.com/managed-by": "mgmt-cluster",
},
},
},
},
},
{
name: "management cluster",
config: &cluster.Config{
Cluster: &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
},
Spec: anywherev1.ClusterSpec{
ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{
MachineGroupRef: &anywherev1.Ref{
Name: "cp-machine",
},
},
},
},
SnowMachineConfigs: map[string]*anywherev1.SnowMachineConfig{
"cp-machine": {
ObjectMeta: metav1.ObjectMeta{
Name: "cp-machine",
},
},
},
},
wantSnowMachineConfigs: map[string]*anywherev1.SnowMachineConfig{
"cp-machine": {
ObjectMeta: metav1.ObjectMeta{
Name: "cp-machine",
Annotations: map[string]string{
"anywhere.eks.amazonaws.com/control-plane": "true",
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
err := cluster.SetSnowMachineConfigsAnnotations(tt.config)
g.Expect(err).To(Succeed())
g.Expect(tt.config.SnowMachineConfigs).To(Equal(tt.wantSnowMachineConfigs))
})
}
}
func TestDefaultConfigClientBuilderSnowCluster(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
b := cluster.NewDefaultConfigClientBuilder()
ctrl := gomock.NewController(t)
client := mocks.NewMockClient(ctrl)
cluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "default",
},
Spec: anywherev1.ClusterSpec{
DatacenterRef: anywherev1.Ref{
Kind: anywherev1.SnowDatacenterKind,
Name: "datacenter",
},
ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.SnowMachineConfigKind,
Name: "machine-1",
},
},
WorkerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{
{
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.SnowMachineConfigKind,
Name: "machine-2",
},
},
{
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.VSphereMachineConfigKind,
Name: "machine-3",
},
},
},
},
}
secret := &corev1.Secret{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Secret",
},
ObjectMeta: metav1.ObjectMeta{
Name: "snow-secret",
Namespace: "default",
},
Data: map[string][]byte{
"credentials": []byte("creds"),
"ca-bundle": []byte("certs"),
},
Type: "Opaque",
}
datacenter := &anywherev1.SnowDatacenterConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "datacenter",
Namespace: "default",
},
Spec: anywherev1.SnowDatacenterConfigSpec{
IdentityRef: anywherev1.Ref{
Kind: "Secret",
Name: secret.Name,
},
},
}
machineControlPlane := &anywherev1.SnowMachineConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "machine-1",
Namespace: "default",
},
Spec: anywherev1.SnowMachineConfigSpec{
Network: anywherev1.SnowNetwork{
DirectNetworkInterfaces: []anywherev1.SnowDirectNetworkInterface{
{
Index: 1,
IPPoolRef: &anywherev1.Ref{
Kind: anywherev1.SnowIPPoolKind,
Name: "ip-pool-1",
},
Primary: true,
},
{
Index: 2,
IPPoolRef: &anywherev1.Ref{
Kind: anywherev1.SnowIPPoolKind,
Name: "ip-pool-2",
},
Primary: false,
},
{
Index: 3,
Primary: false,
},
},
},
},
}
machineWorker := &anywherev1.SnowMachineConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "machine-2",
Namespace: "default",
},
Spec: anywherev1.SnowMachineConfigSpec{
Network: anywherev1.SnowNetwork{
DirectNetworkInterfaces: []anywherev1.SnowDirectNetworkInterface{
{
Index: 1,
IPPoolRef: &anywherev1.Ref{
Kind: anywherev1.SnowIPPoolKind,
Name: "ip-pool-1",
},
Primary: true,
},
},
},
},
}
pool1 := &anywherev1.SnowIPPool{
ObjectMeta: metav1.ObjectMeta{
Name: "ip-pool-1",
},
Spec: anywherev1.SnowIPPoolSpec{
Pools: []anywherev1.IPPool{
{
IPStart: "start-1",
IPEnd: "end-1",
Subnet: "subnet-1",
Gateway: "gateway-1",
},
},
},
}
pool2 := &anywherev1.SnowIPPool{
ObjectMeta: metav1.ObjectMeta{
Name: "ip-pool-2",
},
Spec: anywherev1.SnowIPPoolSpec{
Pools: []anywherev1.IPPool{
{
IPStart: "start-2",
IPEnd: "end-2",
Subnet: "subnet-2",
Gateway: "gateway-2",
},
},
},
}
client.EXPECT().Get(ctx, "datacenter", "default", &anywherev1.SnowDatacenterConfig{}).Return(nil).DoAndReturn(
func(ctx context.Context, name, namespace string, obj runtime.Object) error {
d := obj.(*anywherev1.SnowDatacenterConfig)
d.ObjectMeta = datacenter.ObjectMeta
d.Spec = datacenter.Spec
return nil
},
)
client.EXPECT().Get(ctx, "machine-1", "default", &anywherev1.SnowMachineConfig{}).Return(nil).DoAndReturn(
func(ctx context.Context, name, namespace string, obj runtime.Object) error {
m := obj.(*anywherev1.SnowMachineConfig)
m.ObjectMeta = machineControlPlane.ObjectMeta
m.Spec = machineControlPlane.Spec
return nil
},
)
client.EXPECT().Get(ctx, "ip-pool-1", "default", &anywherev1.SnowIPPool{}).Return(nil).DoAndReturn(
func(ctx context.Context, name, namespace string, obj runtime.Object) error {
p := obj.(*anywherev1.SnowIPPool)
p.ObjectMeta = pool1.ObjectMeta
p.Spec = pool1.Spec
return nil
},
)
client.EXPECT().Get(ctx, "ip-pool-2", "default", &anywherev1.SnowIPPool{}).Return(nil).DoAndReturn(
func(ctx context.Context, name, namespace string, obj runtime.Object) error {
p := obj.(*anywherev1.SnowIPPool)
p.ObjectMeta = pool2.ObjectMeta
p.Spec = pool2.Spec
return nil
},
)
client.EXPECT().Get(ctx, secret.Name, "default", &corev1.Secret{}).Return(nil).DoAndReturn(
func(ctx context.Context, name, namespace string, obj runtime.Object) error {
d := obj.(*corev1.Secret)
d.ObjectMeta = secret.ObjectMeta
d.TypeMeta = secret.TypeMeta
d.Data = secret.Data
d.Type = secret.Type
return nil
},
)
client.EXPECT().Get(ctx, "machine-2", "default", &anywherev1.SnowMachineConfig{}).Return(nil).DoAndReturn(
func(ctx context.Context, name, namespace string, obj runtime.Object) error {
m := obj.(*anywherev1.SnowMachineConfig)
m.ObjectMeta = machineWorker.ObjectMeta
m.Spec = machineWorker.Spec
return nil
},
)
config, err := b.Build(ctx, client, cluster)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(config).NotTo(BeNil())
g.Expect(config.Cluster).To(Equal(cluster))
g.Expect(config.SnowDatacenter).To(Equal(datacenter))
g.Expect(len(config.SnowMachineConfigs)).To(Equal(2))
g.Expect(config.SnowMachineConfigs["machine-1"]).To(Equal(machineControlPlane))
g.Expect(config.SnowMachineConfigs["machine-2"]).To(Equal(machineWorker))
g.Expect(config.SnowCredentialsSecret).To(Equal(secret))
g.Expect(config.SnowIPPools["ip-pool-1"]).To(Equal(pool1))
g.Expect(config.SnowIPPools["ip-pool-2"]).To(Equal(pool2))
}
func TestDefaultConfigClientBuilderSnowClusterGetIPPoolError(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
b := cluster.NewDefaultConfigClientBuilder()
ctrl := gomock.NewController(t)
client := mocks.NewMockClient(ctrl)
cluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "default",
},
Spec: anywherev1.ClusterSpec{
DatacenterRef: anywherev1.Ref{
Kind: anywherev1.SnowDatacenterKind,
Name: "datacenter",
},
ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.SnowMachineConfigKind,
Name: "machine-1",
},
},
},
}
machineControlPlane := &anywherev1.SnowMachineConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "machine-1",
Namespace: "default",
},
Spec: anywherev1.SnowMachineConfigSpec{
Network: anywherev1.SnowNetwork{
DirectNetworkInterfaces: []anywherev1.SnowDirectNetworkInterface{
{
IPPoolRef: &anywherev1.Ref{
Kind: anywherev1.SnowIPPoolKind,
Name: "ip-pool-1",
},
},
},
},
},
}
client.EXPECT().Get(ctx, "datacenter", "default", &anywherev1.SnowDatacenterConfig{}).Return(nil)
client.EXPECT().Get(ctx, "machine-1", "default", &anywherev1.SnowMachineConfig{}).Return(nil).DoAndReturn(
func(ctx context.Context, name, namespace string, obj runtime.Object) error {
m := obj.(*anywherev1.SnowMachineConfig)
m.ObjectMeta = machineControlPlane.ObjectMeta
m.Spec = machineControlPlane.Spec
return nil
},
)
client.EXPECT().Get(ctx, "ip-pool-1", "default", &anywherev1.SnowIPPool{}).Return(errors.New("error get ip pool"))
config, err := b.Build(ctx, client, cluster)
g.Expect(err).To(MatchError(ContainSubstring("error get ip pool")))
g.Expect(config).To(BeNil())
}
func TestParseConfigMissingSnowDatacenter(t *testing.T) {
g := NewWithT(t)
got, err := cluster.ParseConfigFromFile("testdata/cluster_snow_missing_datacenter.yaml")
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(got.DockerDatacenter).To(BeNil())
}
func TestSetSnowDatacenterIndentityRefDefault(t *testing.T) {
tests := []struct {
name string
before *anywherev1.SnowDatacenterConfig
after *anywherev1.SnowDatacenterConfig
}{
{
name: "identity ref empty",
before: &anywherev1.SnowDatacenterConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
Spec: anywherev1.SnowDatacenterConfigSpec{},
},
after: &anywherev1.SnowDatacenterConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
Spec: anywherev1.SnowDatacenterConfigSpec{
IdentityRef: anywherev1.Ref{
Name: "test-snow-credentials",
Kind: "Secret",
},
},
},
},
{
name: "identity ref exists",
before: &anywherev1.SnowDatacenterConfig{
Spec: anywherev1.SnowDatacenterConfigSpec{
IdentityRef: anywherev1.Ref{
Name: "creds-1",
Kind: "Secret",
},
},
},
after: &anywherev1.SnowDatacenterConfig{
Spec: anywherev1.SnowDatacenterConfigSpec{
IdentityRef: anywherev1.Ref{
Name: "creds-1",
Kind: "Secret",
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
cluster.SetSnowDatacenterIndentityRefDefault(tt.before)
g.Expect(tt.before).To(Equal(tt.after))
})
}
}
func TestValidateSnowMachineRefExistsError(t *testing.T) {
g := NewWithT(t)
c := &cluster.Config{
Cluster: &anywherev1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
Namespace: "ns-1",
},
Spec: anywherev1.ClusterSpec{
WorkerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{
{
MachineGroupRef: &anywherev1.Ref{
Name: "worker-not-exists",
Kind: "SnowMachineConfig",
},
},
},
},
},
SnowMachineConfigs: map[string]*anywherev1.SnowMachineConfig{
"worker-1": {
ObjectMeta: metav1.ObjectMeta{
Name: "worker-1",
},
},
},
}
g.Expect(cluster.ValidateConfig(c)).To(
MatchError(ContainSubstring("unable to find SnowMachineConfig worker-not-exists")),
)
}
func TestValidateSnowUnstackedEtcdWithDHCPError(t *testing.T) {
g := NewWithT(t)
c := &cluster.Config{
Cluster: &anywherev1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
Namespace: "ns-1",
},
Spec: anywherev1.ClusterSpec{
DatacenterRef: anywherev1.Ref{
Kind: anywherev1.SnowDatacenterKind,
},
ExternalEtcdConfiguration: &anywherev1.ExternalEtcdConfiguration{
MachineGroupRef: &anywherev1.Ref{
Name: "etcd-1",
},
},
},
},
SnowMachineConfigs: map[string]*anywherev1.SnowMachineConfig{
"etcd-1": {
ObjectMeta: metav1.ObjectMeta{
Name: "etcd-1",
},
Spec: anywherev1.SnowMachineConfigSpec{
Network: anywherev1.SnowNetwork{
DirectNetworkInterfaces: []anywherev1.SnowDirectNetworkInterface{
{
Index: 1,
DHCP: true,
Primary: true,
},
},
},
},
},
},
}
g.Expect(cluster.ValidateConfig(c)).To(
MatchError(ContainSubstring("creating unstacked etcd machine with DHCP is not supported for snow")),
)
}
func TestValidateSnowUnstackedEtcdMissIPPoolError(t *testing.T) {
g := NewWithT(t)
c := &cluster.Config{
Cluster: &anywherev1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
Namespace: "ns-1",
},
Spec: anywherev1.ClusterSpec{
DatacenterRef: anywherev1.Ref{
Kind: anywherev1.SnowDatacenterKind,
},
ExternalEtcdConfiguration: &anywherev1.ExternalEtcdConfiguration{
MachineGroupRef: &anywherev1.Ref{
Name: "etcd-1",
},
},
},
},
SnowMachineConfigs: map[string]*anywherev1.SnowMachineConfig{
"etcd-1": {
ObjectMeta: metav1.ObjectMeta{
Name: "etcd-1",
},
Spec: anywherev1.SnowMachineConfigSpec{
Network: anywherev1.SnowNetwork{
DirectNetworkInterfaces: []anywherev1.SnowDirectNetworkInterface{
{
Index: 1,
DHCP: false,
Primary: true,
},
},
},
},
},
},
}
g.Expect(cluster.ValidateConfig(c)).To(
MatchError(ContainSubstring("snow machine config ip pool must be specified when using static IP")),
)
}
| 601 |
eks-anywhere | aws | Go | package cluster
import (
"fmt"
"strings"
eksdv1alpha1 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1"
eksav1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type Spec struct {
*Config
Bundles *v1alpha1.Bundles
VersionsBundle *VersionsBundle
eksdRelease *eksdv1alpha1.Release
OIDCConfig *eksav1alpha1.OIDCConfig
AWSIamConfig *eksav1alpha1.AWSIamConfig
ManagementCluster *types.Cluster // TODO(g-gaston): cleanup, this doesn't belong here
}
func (s *Spec) DeepCopy() *Spec {
return &Spec{
Config: s.Config.DeepCopy(),
OIDCConfig: s.OIDCConfig.DeepCopy(),
AWSIamConfig: s.AWSIamConfig.DeepCopy(),
VersionsBundle: &VersionsBundle{
VersionsBundle: s.VersionsBundle.VersionsBundle.DeepCopy(),
KubeDistro: s.VersionsBundle.KubeDistro.deepCopy(),
},
eksdRelease: s.eksdRelease.DeepCopy(),
Bundles: s.Bundles.DeepCopy(),
}
}
type VersionsBundle struct {
*v1alpha1.VersionsBundle
KubeDistro *KubeDistro
}
// EKSD represents an eks-d release.
type EKSD struct {
// Channel is the minor Kubernetes version for the eks-d release (eg. "1.23", "1.24", etc.)
Channel string
// Number is the monotonically increasing number that distinguishes the different eks-d releases
// for the same Kubernetes minor version (channel).
Number int
}
type KubeDistro struct {
EKSD EKSD
Kubernetes VersionedRepository
CoreDNS VersionedRepository
Etcd VersionedRepository
NodeDriverRegistrar v1alpha1.Image
LivenessProbe v1alpha1.Image
ExternalAttacher v1alpha1.Image
ExternalProvisioner v1alpha1.Image
Pause v1alpha1.Image
EtcdImage v1alpha1.Image
EtcdVersion string
AwsIamAuthImage v1alpha1.Image
KubeProxy v1alpha1.Image
}
func (k *KubeDistro) deepCopy() *KubeDistro {
k2 := *k
return &k2
}
type VersionedRepository struct {
Repository, Tag string
}
// NewSpec builds a new [Spec].
func NewSpec(config *Config, bundles *v1alpha1.Bundles, eksdRelease *eksdv1alpha1.Release) (*Spec, error) {
s := &Spec{}
versionsBundle, err := GetVersionsBundle(config.Cluster, bundles)
if err != nil {
return nil, err
}
kubeDistro, err := buildKubeDistro(eksdRelease)
if err != nil {
return nil, err
}
s.Bundles = bundles
s.Config = config
s.VersionsBundle = &VersionsBundle{
VersionsBundle: versionsBundle,
KubeDistro: kubeDistro,
}
s.eksdRelease = eksdRelease
// Get first aws iam config if it exists
// Config supports multiple configs because Cluster references a slice
// But we validate that only one of each type is referenced
for _, ac := range s.Config.AWSIAMConfigs {
s.AWSIamConfig = ac
break
}
// Get first oidc config if it exists
for _, oc := range s.Config.OIDCConfigs {
s.OIDCConfig = oc
break
}
return s, nil
}
func (s *Spec) KubeDistroImages() []v1alpha1.Image {
images := []v1alpha1.Image{}
for _, component := range s.eksdRelease.Status.Components {
for _, asset := range component.Assets {
if asset.Image != nil {
images = append(images, v1alpha1.Image{URI: asset.Image.URI})
}
}
}
return images
}
func buildKubeDistro(eksd *eksdv1alpha1.Release) (*KubeDistro, error) {
kubeDistro := &KubeDistro{
EKSD: EKSD{
Channel: eksd.Spec.Channel,
Number: eksd.Spec.Number,
},
}
assets := make(map[string]*eksdv1alpha1.AssetImage)
for _, component := range eksd.Status.Components {
for _, asset := range component.Assets {
if asset.Image != nil {
assets[asset.Name] = asset.Image
}
}
if component.Name == "etcd" {
kubeDistro.EtcdVersion = strings.TrimPrefix(component.GitTag, "v")
}
}
kubeDistroComponents := map[string]*v1alpha1.Image{
"node-driver-registrar-image": &kubeDistro.NodeDriverRegistrar,
"livenessprobe-image": &kubeDistro.LivenessProbe,
"external-attacher-image": &kubeDistro.ExternalAttacher,
"external-provisioner-image": &kubeDistro.ExternalProvisioner,
"pause-image": &kubeDistro.Pause,
"etcd-image": &kubeDistro.EtcdImage,
"aws-iam-authenticator-image": &kubeDistro.AwsIamAuthImage,
"kube-proxy-image": &kubeDistro.KubeProxy,
}
for assetName, image := range kubeDistroComponents {
i := assets[assetName]
if i == nil {
return nil, fmt.Errorf("asset %s is no present in eksd release %s", assetName, eksd.Spec.Channel)
}
image.URI = i.URI
}
kubeDistroRepositories := map[string]*VersionedRepository{
"coredns-image": &kubeDistro.CoreDNS,
"etcd-image": &kubeDistro.Etcd,
"kube-apiserver-image": &kubeDistro.Kubernetes,
}
for assetName, image := range kubeDistroRepositories {
i := assets[assetName]
if i == nil {
return nil, fmt.Errorf("asset %s is not present in eksd release %s", assetName, eksd.Spec.Channel)
}
image.Repository, image.Tag = kubeDistroRepository(i)
}
return kubeDistro, nil
}
func kubeDistroRepository(image *eksdv1alpha1.AssetImage) (repo, tag string) {
i := v1alpha1.Image{
URI: image.URI,
}
lastInd := strings.LastIndex(i.Image(), "/")
if lastInd == -1 {
return i.Image(), i.Tag()
}
return i.Image()[:lastInd], i.Tag()
}
func (vb *VersionsBundle) KubeDistroImages() []v1alpha1.Image {
var images []v1alpha1.Image
images = append(images, vb.KubeDistro.EtcdImage)
images = append(images, vb.KubeDistro.ExternalAttacher)
images = append(images, vb.KubeDistro.ExternalProvisioner)
images = append(images, vb.KubeDistro.LivenessProbe)
images = append(images, vb.KubeDistro.NodeDriverRegistrar)
images = append(images, vb.KubeDistro.Pause)
return images
}
func (vb *VersionsBundle) Images() []v1alpha1.Image {
var images []v1alpha1.Image
images = append(images, vb.VersionsBundle.Images()...)
images = append(images, vb.KubeDistroImages()...)
return images
}
func (vb *VersionsBundle) Ovas() []v1alpha1.Archive {
return vb.VersionsBundle.Ovas()
}
func BundlesRefDefaulter() Defaulter {
return func(c *Config) error {
if c.Cluster.Spec.BundlesRef == nil {
c.Cluster.Spec.BundlesRef = &eksav1alpha1.BundlesRef{}
}
return nil
}
}
| 230 |
eks-anywhere | aws | Go | package cluster_test
import (
"embed"
"testing"
eksdv1 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/internal/test"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/files"
"github.com/aws/eks-anywhere/pkg/manifests/eksd"
releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
//go:embed testdata
var testdataFS embed.FS
func TestNewSpecError(t *testing.T) {
tests := []struct {
name string
config *cluster.Config
bundles *releasev1.Bundles
eksdRelease *eksdv1.Release
error string
}{
{
name: "no VersionsBundle for kube version",
config: &cluster.Config{
Cluster: &anywherev1.Cluster{
Spec: anywherev1.ClusterSpec{
KubernetesVersion: anywherev1.Kube124,
},
},
},
bundles: &releasev1.Bundles{
Spec: releasev1.BundlesSpec{
Number: 2,
},
},
eksdRelease: &eksdv1.Release{},
error: "kubernetes version 1.24 is not supported by bundles manifest 2",
},
{
name: "invalid eks-d release",
config: &cluster.Config{
Cluster: &anywherev1.Cluster{
Spec: anywherev1.ClusterSpec{
KubernetesVersion: anywherev1.Kube124,
},
},
},
bundles: &releasev1.Bundles{
Spec: releasev1.BundlesSpec{
Number: 2,
VersionsBundles: []releasev1.VersionsBundle{
{
KubeVersion: "1.24",
},
},
},
},
eksdRelease: &eksdv1.Release{},
error: "is no present in eksd release",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(cluster.NewSpec(tt.config, tt.bundles, tt.eksdRelease)).Error().To(
MatchError(ContainSubstring(tt.error)),
)
})
}
}
func TestNewSpecValid(t *testing.T) {
g := NewWithT(t)
config := &cluster.Config{
Cluster: &anywherev1.Cluster{
Spec: anywherev1.ClusterSpec{
KubernetesVersion: anywherev1.Kube124,
},
},
OIDCConfigs: map[string]*anywherev1.OIDCConfig{
"myconfig": {},
},
AWSIAMConfigs: map[string]*anywherev1.AWSIamConfig{
"myconfig": {},
},
}
bundles := &releasev1.Bundles{
Spec: releasev1.BundlesSpec{
Number: 2,
VersionsBundles: []releasev1.VersionsBundle{
{
KubeVersion: "1.24",
},
},
},
}
eksdRelease := readEksdRelease(t, "testdata/eksd_valid.yaml")
spec, err := cluster.NewSpec(config, bundles, eksdRelease)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(spec.AWSIamConfig).NotTo(BeNil())
g.Expect(spec.OIDCConfig).NotTo(BeNil())
}
func TestSpecDeepCopy(t *testing.T) {
g := NewWithT(t)
r := files.NewReader()
yaml, err := r.ReadFile("testdata/docker_cluster_oidc_awsiam_flux.yaml")
g.Expect(err).To(Succeed())
config, err := cluster.ParseConfig(yaml)
g.Expect(err).To(Succeed())
bundles := test.Bundles(t)
eksd := test.EksdRelease()
spec, err := cluster.NewSpec(config, bundles, eksd)
g.Expect(err).To(Succeed())
g.Expect(spec.DeepCopy()).To(Equal(spec))
}
func TestBundlesRefDefaulter(t *testing.T) {
tests := []struct {
name string
bundles *releasev1.Bundles
config, want *cluster.Config
}{
{
name: "no bundles ref",
bundles: &releasev1.Bundles{
ObjectMeta: metav1.ObjectMeta{
Name: "bundles-1",
Namespace: "eksa-system",
},
},
config: &cluster.Config{
Cluster: &anywherev1.Cluster{},
},
want: &cluster.Config{
Cluster: &anywherev1.Cluster{
Spec: anywherev1.ClusterSpec{
BundlesRef: &anywherev1.BundlesRef{},
},
},
},
},
{
name: "with previous bundles ref",
bundles: &releasev1.Bundles{
ObjectMeta: metav1.ObjectMeta{
Name: "bundles-1",
},
},
config: &cluster.Config{
Cluster: &anywherev1.Cluster{
Spec: anywherev1.ClusterSpec{
BundlesRef: &anywherev1.BundlesRef{
Name: "bundles-2",
Namespace: "default",
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
},
},
},
},
want: &cluster.Config{
Cluster: &anywherev1.Cluster{
Spec: anywherev1.ClusterSpec{
BundlesRef: &anywherev1.BundlesRef{
Name: "bundles-2",
Namespace: "default",
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
defaulter := cluster.BundlesRefDefaulter()
g.Expect(defaulter(tt.config)).To(Succeed())
g.Expect(tt.config).To(Equal(tt.want))
})
}
}
func validateSpecFromSimpleBundle(t *testing.T, gotSpec *cluster.Spec) {
validateVersionedRepo(t, gotSpec.VersionsBundle.KubeDistro.Kubernetes, "public.ecr.aws/eks-distro/kubernetes", "v1.19.8-eks-1-19-4")
validateVersionedRepo(t, gotSpec.VersionsBundle.KubeDistro.CoreDNS, "public.ecr.aws/eks-distro/coredns", "v1.8.0-eks-1-19-4")
validateVersionedRepo(t, gotSpec.VersionsBundle.KubeDistro.Etcd, "public.ecr.aws/eks-distro/etcd-io", "v3.4.14-eks-1-19-4")
validateImageURI(t, gotSpec.VersionsBundle.KubeDistro.NodeDriverRegistrar, "public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar:v2.1.0-eks-1-19-4")
validateImageURI(t, gotSpec.VersionsBundle.KubeDistro.LivenessProbe, "public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe:v2.2.0-eks-1-19-4")
validateImageURI(t, gotSpec.VersionsBundle.KubeDistro.ExternalAttacher, "public.ecr.aws/eks-distro/kubernetes-csi/external-attacher:v3.1.0-eks-1-19-4")
validateImageURI(t, gotSpec.VersionsBundle.KubeDistro.ExternalProvisioner, "public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner:v2.1.1-eks-1-19-4")
validateImageURI(t, gotSpec.VersionsBundle.KubeDistro.EtcdImage, "public.ecr.aws/eks-distro/etcd-io/etcd:v3.4.14-eks-1-19-4")
validateImageURI(t, gotSpec.VersionsBundle.KubeDistro.KubeProxy, "public.ecr.aws/eks-distro/kubernetes/kube-proxy:v1.19.8-eks-1-19-4")
if gotSpec.VersionsBundle.KubeDistro.EtcdVersion != "3.4.14" {
t.Errorf("GetNewSpec() = Spec: Invalid etcd version, got %s, want 3.4.14", gotSpec.VersionsBundle.KubeDistro.EtcdVersion)
}
}
func validateImageURI(t *testing.T, gotImage releasev1.Image, wantURI string) {
if gotImage.URI != wantURI {
t.Errorf("GetNewSpec() = Spec: Invalid kubernetes URI, got %s, want %s", gotImage.URI, wantURI)
}
}
func validateVersionedRepo(t *testing.T, gotImage cluster.VersionedRepository, wantRepo, wantTag string) {
if gotImage.Repository != wantRepo {
t.Errorf("GetNewSpec() = Spec: Invalid kubernetes repo, got %s, want %s", gotImage.Repository, wantRepo)
}
if gotImage.Tag != wantTag {
t.Errorf("GetNewSpec() = Spec: Invalid kubernetes repo, got %s, want %s", gotImage.Tag, wantTag)
}
}
func readEksdRelease(tb testing.TB, url string) *eksdv1.Release {
tb.Helper()
r := files.NewReader()
release, err := eksd.ReadManifest(r, url)
if err != nil {
tb.Fatalf("Failed reading eks-d manifest: %s", err)
}
return release
}
| 235 |
eks-anywhere | aws | Go | package cluster
import (
"context"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
// tinkerbellEntry is unimplemented. Its boiler plate to mute warnings that could confuse the customer until we
// get round to implementing it.
func tinkerbellEntry() *ConfigManagerEntry {
return &ConfigManagerEntry{
APIObjectMapping: map[string]APIObjectGenerator{
anywherev1.TinkerbellDatacenterKind: func() APIObject {
return &anywherev1.TinkerbellDatacenterConfig{}
},
anywherev1.TinkerbellMachineConfigKind: func() APIObject {
return &anywherev1.TinkerbellMachineConfig{}
},
anywherev1.TinkerbellTemplateConfigKind: func() APIObject {
return &anywherev1.TinkerbellTemplateConfig{}
},
},
Processors: []ParsedProcessor{
processTinkerbellDatacenter,
machineConfigsProcessor(processTinkerbellMachineConfig),
processTinkerbellTemplateConfigs,
},
}
}
func processTinkerbellDatacenter(c *Config, objects ObjectLookup) {
if c.Cluster.Spec.DatacenterRef.Kind == anywherev1.TinkerbellDatacenterKind {
datacenter := objects.GetFromRef(c.Cluster.APIVersion, c.Cluster.Spec.DatacenterRef)
if datacenter != nil {
c.TinkerbellDatacenter = datacenter.(*anywherev1.TinkerbellDatacenterConfig)
}
}
}
func processTinkerbellTemplateConfigs(c *Config, objects ObjectLookup) {
if c.TinkerbellTemplateConfigs == nil {
c.TinkerbellTemplateConfigs = map[string]*anywherev1.TinkerbellTemplateConfig{}
}
if c.Cluster.Spec.DatacenterRef.Kind == anywherev1.TinkerbellDatacenterKind {
for _, o := range objects {
mt, ok := o.(*anywherev1.TinkerbellTemplateConfig)
if ok {
c.TinkerbellTemplateConfigs[mt.Name] = mt
}
}
}
}
func processTinkerbellMachineConfig(c *Config, objects ObjectLookup, machineRef *anywherev1.Ref) {
if machineRef == nil {
return
}
if machineRef.Kind != anywherev1.TinkerbellMachineConfigKind {
return
}
if c.TinkerbellMachineConfigs == nil {
c.TinkerbellMachineConfigs = map[string]*anywherev1.TinkerbellMachineConfig{}
}
m := objects.GetFromRef(c.Cluster.APIVersion, *machineRef)
if m == nil {
return
}
c.TinkerbellMachineConfigs[m.GetName()] = m.(*anywherev1.TinkerbellMachineConfig)
}
func getTinkerbellDatacenter(ctx context.Context, client Client, c *Config) error {
if c.Cluster.Spec.DatacenterRef.Kind != anywherev1.TinkerbellDatacenterKind {
return nil
}
datacenter := &anywherev1.TinkerbellDatacenterConfig{}
if err := client.Get(ctx, c.Cluster.Spec.DatacenterRef.Name, c.Cluster.Namespace, datacenter); err != nil {
return err
}
c.TinkerbellDatacenter = datacenter
return nil
}
func getTinkerbellMachineConfigs(ctx context.Context, client Client, c *Config) error {
if c.Cluster.Spec.DatacenterRef.Kind != anywherev1.TinkerbellDatacenterKind {
return nil
}
if c.TinkerbellMachineConfigs == nil {
c.TinkerbellMachineConfigs = map[string]*anywherev1.TinkerbellMachineConfig{}
}
for _, machineRef := range c.Cluster.MachineConfigRefs() {
if machineRef.Kind != anywherev1.TinkerbellMachineConfigKind {
continue
}
machineConfig := &anywherev1.TinkerbellMachineConfig{}
if err := client.Get(ctx, machineRef.Name, c.Cluster.Namespace, machineConfig); err != nil {
return err
}
c.TinkerbellMachineConfigs[machineConfig.Name] = machineConfig
}
return nil
}
| 113 |
eks-anywhere | aws | Go | package cluster_test
import (
"context"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1/thirdparty/tinkerbell"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/cluster/mocks"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
func TestParseConfigFromFileTinkerbellCluster(t *testing.T) {
g := NewWithT(t)
got, err := cluster.ParseConfigFromFile("testdata/cluster_tinkerbell_1_19.yaml")
g.Expect(err).NotTo(HaveOccurred())
g.Expect(got.Cluster).To(BeComparableTo(
&anywherev1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "test-namespace",
},
Spec: anywherev1.ClusterSpec{
KubernetesVersion: "1.19",
ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{
Count: 1,
Endpoint: &anywherev1.Endpoint{Host: "1.2.3.4"},
MachineGroupRef: &anywherev1.Ref{
Kind: "TinkerbellMachineConfig",
Name: "test-cp",
},
Taints: nil,
Labels: nil,
UpgradeRolloutStrategy: nil,
},
WorkerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{
{
Count: ptr.Int(1),
MachineGroupRef: &anywherev1.Ref{
Kind: "TinkerbellMachineConfig",
Name: "test-md",
},
},
},
DatacenterRef: anywherev1.Ref{
Kind: "TinkerbellDatacenterConfig",
Name: "test",
},
IdentityProviderRefs: nil,
GitOpsRef: nil,
ClusterNetwork: anywherev1.ClusterNetwork{
Pods: anywherev1.Pods{
CidrBlocks: []string{"192.168.0.0/16"},
},
Services: anywherev1.Services{
CidrBlocks: []string{"10.96.0.0/12"},
},
CNI: "cilium",
},
ExternalEtcdConfiguration: &anywherev1.ExternalEtcdConfiguration{
Count: 1,
MachineGroupRef: &anywherev1.Ref{
Kind: "TinkerbellMachineConfig",
Name: "test-cp",
},
},
ManagementCluster: anywherev1.ManagementCluster{Name: "test"},
},
},
))
g.Expect(got.TinkerbellDatacenter).To(BeComparableTo(
&anywherev1.TinkerbellDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: "TinkerbellDatacenterConfig",
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
Spec: anywherev1.TinkerbellDatacenterConfigSpec{
TinkerbellIP: "1.2.3.4",
},
},
))
g.Expect(got.TinkerbellMachineConfigs).To(HaveLen(1))
g.Expect(got.TinkerbellMachineConfigs["test-cp"]).To(
BeComparableTo(
&anywherev1.TinkerbellMachineConfig{
TypeMeta: metav1.TypeMeta{
Kind: "TinkerbellMachineConfig",
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-cp",
Namespace: "test-namespace",
},
Spec: anywherev1.TinkerbellMachineConfigSpec{
TemplateRef: anywherev1.Ref{
Kind: "TinkerbellTemplateConfig",
Name: "tink-test",
},
OSFamily: "ubuntu",
Users: []anywherev1.UserConfiguration{
{
Name: "tink-user",
SshAuthorizedKeys: []string{"ssh-rsa AAAAB3"},
},
},
},
},
),
)
g.Expect(got.TinkerbellTemplateConfigs).To(HaveLen(1))
g.Expect(got.TinkerbellTemplateConfigs["tink-test"]).To(
BeComparableTo(
&anywherev1.TinkerbellTemplateConfig{
TypeMeta: metav1.TypeMeta{
Kind: "TinkerbellTemplateConfig",
APIVersion: "anywhere.eks.amazonaws.com/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "tink-test",
},
Spec: anywherev1.TinkerbellTemplateConfigSpec{
Template: tinkerbell.Workflow{
Version: "0.1",
Name: "tink-test",
GlobalTimeout: 6000,
Tasks: []tinkerbell.Task{
{
Name: "tink-test",
WorkerAddr: "{{.device_1}}",
Actions: []tinkerbell.Action{
{
Name: "stream-image",
Image: "image2disk:v1.0.0",
Timeout: 360,
Environment: map[string]string{
"COMPRESSED": "true",
"DEST_DISK": "/dev/sda",
"IMG_URL": "",
},
},
},
Volumes: []string{
"/dev:/dev",
"/dev/console:/dev/console",
"/lib/firmware:/lib/firmware:ro",
},
},
},
},
},
},
),
)
}
func TestDefaultConfigClientBuilderTinkerbellCluster(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
b := cluster.NewDefaultConfigClientBuilder()
ctrl := gomock.NewController(t)
client := mocks.NewMockClient(ctrl)
cluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "default",
},
Spec: anywherev1.ClusterSpec{
DatacenterRef: anywherev1.Ref{
Kind: anywherev1.TinkerbellDatacenterKind,
Name: "datacenter",
},
ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.TinkerbellMachineConfigKind,
Name: "machine-1",
},
},
WorkerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{
{
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.TinkerbellMachineConfigKind,
Name: "machine-2",
},
},
{
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.CloudStackMachineConfigKind, // Should not process this one
Name: "machine-3",
},
},
},
},
}
datacenter := &anywherev1.TinkerbellDatacenterConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "datacenter",
Namespace: "default",
},
}
machineControlPlane := &anywherev1.TinkerbellMachineConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "machine-1",
Namespace: "default",
},
}
machineWorker := &anywherev1.TinkerbellMachineConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "machine-2",
Namespace: "default",
},
}
client.EXPECT().Get(ctx, "datacenter", "default", &anywherev1.TinkerbellDatacenterConfig{}).Return(nil).DoAndReturn(
func(ctx context.Context, name, namespace string, obj runtime.Object) error {
d := obj.(*anywherev1.TinkerbellDatacenterConfig)
d.ObjectMeta = datacenter.ObjectMeta
d.Spec = datacenter.Spec
return nil
},
)
client.EXPECT().Get(ctx, "machine-1", "default", &anywherev1.TinkerbellMachineConfig{}).Return(nil).DoAndReturn(
func(ctx context.Context, name, namespace string, obj runtime.Object) error {
m := obj.(*anywherev1.TinkerbellMachineConfig)
m.ObjectMeta = machineControlPlane.ObjectMeta
return nil
},
)
client.EXPECT().Get(ctx, "machine-2", "default", &anywherev1.TinkerbellMachineConfig{}).Return(nil).DoAndReturn(
func(ctx context.Context, name, namespace string, obj runtime.Object) error {
m := obj.(*anywherev1.TinkerbellMachineConfig)
m.ObjectMeta = machineWorker.ObjectMeta
return nil
},
)
config, err := b.Build(ctx, client, cluster)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(config).NotTo(BeNil())
g.Expect(config.Cluster).To(Equal(cluster))
g.Expect(config.TinkerbellDatacenter).To(Equal(datacenter))
g.Expect(len(config.TinkerbellMachineConfigs)).To(Equal(2))
g.Expect(config.TinkerbellMachineConfigs["machine-1"]).To(Equal(machineControlPlane))
g.Expect(config.TinkerbellMachineConfigs["machine-2"]).To(Equal(machineWorker))
}
| 260 |
eks-anywhere | aws | Go | package cluster
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime"
)
func ValidateConfig(c *Config) error {
return manager().Validate(c)
}
type namespaceObject interface {
runtime.Object
GetNamespace() string
}
func validateSameNamespace(c *Config, o namespaceObject) error {
if c.Cluster.Namespace != o.GetNamespace() {
return fmt.Errorf("%s and Cluster objects must have the same namespace specified", o.GetObjectKind().GroupVersionKind().Kind)
}
return nil
}
| 25 |
eks-anywhere | aws | Go | package cluster_test
import (
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
)
func TestValidateConfig(t *testing.T) {
tests := []struct {
name string
// Using testdata file here to avoid specifying structs in code that
// we already have. If you need to test specific logic, create the structs
// in this package to avoid testadat file explosion
config *cluster.Config
}{
{
name: "vsphere cluster",
config: clusterConfigFromFile(t, "testdata/cluster_1_19.yaml"),
},
{
name: "docker cluster gitops",
config: clusterConfigFromFile(t, "testdata/docker_cluster_oidc_awsiam_gitops.yaml"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(cluster.ValidateConfig(tt.config)).To(Succeed())
})
}
}
func clusterConfigFromFile(t *testing.T, path string) *cluster.Config {
t.Helper()
c, err := cluster.ParseConfigFromFile(path)
if err != nil {
t.Fatal(err)
}
return c
}
func TestValidateConfigDifferentNamespace(t *testing.T) {
g := NewWithT(t)
c := &cluster.Config{
Cluster: &anywherev1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.ClusterKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
Namespace: "ns-1",
},
},
VSphereDatacenter: &anywherev1.VSphereDatacenterConfig{
TypeMeta: metav1.TypeMeta{
Kind: anywherev1.VSphereDatacenterKind,
APIVersion: anywherev1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
Namespace: "ns-2",
},
},
}
g.Expect(cluster.ValidateConfig(c)).To(
MatchError(ContainSubstring("VSphereDatacenterConfig and Cluster objects must have the same namespace specified")),
)
}
| 78 |
eks-anywhere | aws | Go | package cluster
import (
"context"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
func vsphereEntry() *ConfigManagerEntry {
return &ConfigManagerEntry{
APIObjectMapping: map[string]APIObjectGenerator{
anywherev1.VSphereDatacenterKind: func() APIObject {
return &anywherev1.VSphereDatacenterConfig{}
},
anywherev1.VSphereMachineConfigKind: func() APIObject {
return &anywherev1.VSphereMachineConfig{}
},
},
Processors: []ParsedProcessor{
processVSphereDatacenter,
machineConfigsProcessor(processVSphereMachineConfig),
},
Defaulters: []Defaulter{
func(c *Config) error {
if c.VSphereDatacenter != nil {
c.VSphereDatacenter.SetDefaults()
}
return nil
},
func(c *Config) error {
for _, m := range c.VSphereMachineConfigs {
m.SetDefaults()
m.SetUserDefaults()
}
return nil
},
},
Validations: []Validation{
func(c *Config) error {
if c.VSphereDatacenter != nil {
return c.VSphereDatacenter.Validate()
}
return nil
},
func(c *Config) error {
for _, m := range c.VSphereMachineConfigs {
if err := m.Validate(); err != nil {
return err
}
}
return nil
},
func(c *Config) error {
if c.VSphereDatacenter != nil {
if err := validateSameNamespace(c, c.VSphereDatacenter); err != nil {
return err
}
}
return nil
},
func(c *Config) error {
for _, v := range c.VSphereMachineConfigs {
if err := validateSameNamespace(c, v); err != nil {
return err
}
}
return nil
},
},
}
}
func processVSphereDatacenter(c *Config, objects ObjectLookup) {
if c.Cluster.Spec.DatacenterRef.Kind == anywherev1.VSphereDatacenterKind {
datacenter := objects.GetFromRef(c.Cluster.APIVersion, c.Cluster.Spec.DatacenterRef)
if datacenter != nil {
c.VSphereDatacenter = datacenter.(*anywherev1.VSphereDatacenterConfig)
}
}
}
func processVSphereMachineConfig(c *Config, objects ObjectLookup, machineRef *anywherev1.Ref) {
if machineRef == nil {
return
}
if machineRef.Kind != anywherev1.VSphereMachineConfigKind {
return
}
if c.VSphereMachineConfigs == nil {
c.VSphereMachineConfigs = map[string]*anywherev1.VSphereMachineConfig{}
}
m := objects.GetFromRef(c.Cluster.APIVersion, *machineRef)
if m == nil {
return
}
c.VSphereMachineConfigs[m.GetName()] = m.(*anywherev1.VSphereMachineConfig)
}
func getVSphereDatacenter(ctx context.Context, client Client, c *Config) error {
if c.Cluster.Spec.DatacenterRef.Kind != anywherev1.VSphereDatacenterKind {
return nil
}
datacenter := &anywherev1.VSphereDatacenterConfig{}
if err := client.Get(ctx, c.Cluster.Spec.DatacenterRef.Name, c.Cluster.Namespace, datacenter); err != nil {
return err
}
c.VSphereDatacenter = datacenter
return nil
}
func getVSphereMachineConfigs(ctx context.Context, client Client, c *Config) error {
if c.Cluster.Spec.DatacenterRef.Kind != anywherev1.VSphereDatacenterKind {
return nil
}
if c.VSphereMachineConfigs == nil {
c.VSphereMachineConfigs = map[string]*anywherev1.VSphereMachineConfig{}
}
for _, machineRef := range c.Cluster.MachineConfigRefs() {
if machineRef.Kind != anywherev1.VSphereMachineConfigKind {
continue
}
machine := &anywherev1.VSphereMachineConfig{}
if err := client.Get(ctx, machineRef.Name, c.Cluster.Namespace, machine); err != nil {
return err
}
c.VSphereMachineConfigs[machine.Name] = machine
}
return nil
}
| 141 |
eks-anywhere | aws | Go | package cluster_test
import (
"context"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/cluster/mocks"
)
func TestParseConfigMissingVSphereDatacenter(t *testing.T) {
g := NewWithT(t)
got, err := cluster.ParseConfigFromFile("testdata/cluster_vsphere_missing_datacenter.yaml")
g.Expect(err).To(Not(HaveOccurred()))
g.Expect(got.VSphereDatacenter).To(BeNil())
}
func TestDefaultConfigClientBuilderVSphereCluster(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
b := cluster.NewDefaultConfigClientBuilder()
ctrl := gomock.NewController(t)
client := mocks.NewMockClient(ctrl)
cluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "default",
},
Spec: anywherev1.ClusterSpec{
DatacenterRef: anywherev1.Ref{
Kind: anywherev1.VSphereDatacenterKind,
Name: "datacenter",
},
ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.VSphereMachineConfigKind,
Name: "machine-1",
},
},
WorkerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{
{
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.VSphereMachineConfigKind,
Name: "machine-2",
},
},
{
MachineGroupRef: &anywherev1.Ref{
Kind: anywherev1.CloudStackMachineConfigKind, // Should not process this one
Name: "machine-3",
},
},
},
},
}
datacenter := &anywherev1.VSphereDatacenterConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "datacenter",
Namespace: "default",
},
}
machineControlPlane := &anywherev1.VSphereMachineConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "machine-1",
Namespace: "default",
},
}
machineWorker := &anywherev1.VSphereMachineConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "machine-2",
Namespace: "default",
},
}
client.EXPECT().Get(ctx, "datacenter", "default", &anywherev1.VSphereDatacenterConfig{}).Return(nil).DoAndReturn(
func(ctx context.Context, name, namespace string, obj runtime.Object) error {
d := obj.(*anywherev1.VSphereDatacenterConfig)
d.ObjectMeta = datacenter.ObjectMeta
d.Spec = datacenter.Spec
return nil
},
)
client.EXPECT().Get(ctx, "machine-1", "default", &anywherev1.VSphereMachineConfig{}).Return(nil).DoAndReturn(
func(ctx context.Context, name, namespace string, obj runtime.Object) error {
m := obj.(*anywherev1.VSphereMachineConfig)
m.ObjectMeta = machineControlPlane.ObjectMeta
return nil
},
)
client.EXPECT().Get(ctx, "machine-2", "default", &anywherev1.VSphereMachineConfig{}).Return(nil).DoAndReturn(
func(ctx context.Context, name, namespace string, obj runtime.Object) error {
m := obj.(*anywherev1.VSphereMachineConfig)
m.ObjectMeta = machineWorker.ObjectMeta
return nil
},
)
config, err := b.Build(ctx, client, cluster)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(config).NotTo(BeNil())
g.Expect(config.Cluster).To(Equal(cluster))
g.Expect(config.VSphereDatacenter).To(Equal(datacenter))
g.Expect(len(config.VSphereMachineConfigs)).To(Equal(2))
g.Expect(config.VSphereMachineConfigs["machine-1"]).To(Equal(machineControlPlane))
g.Expect(config.VSphereMachineConfigs["machine-2"]).To(Equal(machineWorker))
}
| 118 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/cluster/client_builder.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
kubernetes "github.com/aws/eks-anywhere/pkg/clients/kubernetes"
gomock "github.com/golang/mock/gomock"
)
// MockClient is a mock of Client interface.
type MockClient struct {
ctrl *gomock.Controller
recorder *MockClientMockRecorder
}
// MockClientMockRecorder is the mock recorder for MockClient.
type MockClientMockRecorder struct {
mock *MockClient
}
// NewMockClient creates a new mock instance.
func NewMockClient(ctrl *gomock.Controller) *MockClient {
mock := &MockClient{ctrl: ctrl}
mock.recorder = &MockClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockClient) EXPECT() *MockClientMockRecorder {
return m.recorder
}
// Get mocks base method.
func (m *MockClient) Get(ctx context.Context, name, namespace string, obj kubernetes.Object) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Get", ctx, name, namespace, obj)
ret0, _ := ret[0].(error)
return ret0
}
// Get indicates an expected call of Get.
func (mr *MockClientMockRecorder) Get(ctx, name, namespace, obj interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockClient)(nil).Get), ctx, name, namespace, obj)
}
| 51 |
eks-anywhere | aws | Go | package clusterapi
import (
"fmt"
etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/crypto"
)
const (
clusterKind = "Cluster"
kubeadmControlPlaneKind = "KubeadmControlPlane"
etcdadmClusterKind = "EtcdadmCluster"
kubeadmConfigTemplateKind = "KubeadmConfigTemplate"
machineDeploymentKind = "MachineDeployment"
EKSAClusterLabelName = "cluster.anywhere.eks.amazonaws.com/cluster-name"
EKSAClusterLabelNamespace = "cluster.anywhere.eks.amazonaws.com/cluster-namespace"
)
var (
clusterAPIVersion = clusterv1.GroupVersion.String()
kubeadmControlPlaneAPIVersion = controlplanev1.GroupVersion.String()
bootstrapAPIVersion = bootstrapv1.GroupVersion.String()
etcdAPIVersion = etcdv1.GroupVersion.String()
)
type APIObject interface {
runtime.Object
GetName() string
}
func InfrastructureAPIVersion() string {
return fmt.Sprintf("infrastructure.%s/%s", clusterv1.GroupVersion.Group, clusterv1.GroupVersion.Version)
}
func eksaClusterLabels(clusterSpec *cluster.Spec) map[string]string {
return map[string]string{
EKSAClusterLabelName: clusterSpec.Cluster.Name,
EKSAClusterLabelNamespace: clusterSpec.Cluster.Namespace,
}
}
func capiClusterLabel(clusterSpec *cluster.Spec) map[string]string {
return map[string]string{
clusterv1.ClusterNameLabel: ClusterName(clusterSpec.Cluster),
}
}
func capiObjectLabels(clusterSpec *cluster.Spec) map[string]string {
return mergeLabels(eksaClusterLabels(clusterSpec), capiClusterLabel(clusterSpec))
}
func mergeLabels(labels ...map[string]string) map[string]string {
size := 0
for _, l := range labels {
size += len(l)
}
merged := make(map[string]string, size)
for _, l := range labels {
for k, v := range l {
merged[k] = v
}
}
return merged
}
// ClusterName generates the CAPI cluster name for an EKSA Cluster.
func ClusterName(cluster *anywherev1.Cluster) string {
return cluster.Name
}
// Cluster builds a CAPI Cluster based on an eks-a cluster spec, infrastructureObject, controlPlaneObject and unstackedEtcdObject.
func Cluster(clusterSpec *cluster.Spec, infrastructureObject, controlPlaneObject, unstackedEtcdObject APIObject) *clusterv1.Cluster {
clusterName := clusterSpec.Cluster.GetName()
cluster := &clusterv1.Cluster{
TypeMeta: metav1.TypeMeta{
APIVersion: clusterAPIVersion,
Kind: clusterKind,
},
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: constants.EksaSystemNamespace,
Labels: capiObjectLabels(clusterSpec),
},
Spec: clusterv1.ClusterSpec{
ClusterNetwork: &clusterv1.ClusterNetwork{
Pods: &clusterv1.NetworkRanges{
CIDRBlocks: clusterSpec.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks,
},
Services: &clusterv1.NetworkRanges{
CIDRBlocks: clusterSpec.Cluster.Spec.ClusterNetwork.Services.CidrBlocks,
},
},
ControlPlaneRef: &v1.ObjectReference{
APIVersion: controlPlaneObject.GetObjectKind().GroupVersionKind().GroupVersion().String(),
Name: controlPlaneObject.GetName(),
Kind: controlPlaneObject.GetObjectKind().GroupVersionKind().Kind,
},
InfrastructureRef: &v1.ObjectReference{
APIVersion: infrastructureObject.GetObjectKind().GroupVersionKind().GroupVersion().String(),
Name: infrastructureObject.GetName(),
Kind: infrastructureObject.GetObjectKind().GroupVersionKind().Kind,
},
},
}
if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil {
setUnstackedEtcdConfigInCluster(cluster, unstackedEtcdObject)
}
return cluster
}
func KubeadmControlPlane(clusterSpec *cluster.Spec, infrastructureObject APIObject) (*controlplanev1.KubeadmControlPlane, error) {
replicas := int32(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Count)
kcp := &controlplanev1.KubeadmControlPlane{
TypeMeta: metav1.TypeMeta{
APIVersion: kubeadmControlPlaneAPIVersion,
Kind: kubeadmControlPlaneKind,
},
ObjectMeta: metav1.ObjectMeta{
Name: KubeadmControlPlaneName(clusterSpec.Cluster),
Namespace: constants.EksaSystemNamespace,
},
Spec: controlplanev1.KubeadmControlPlaneSpec{
MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
InfrastructureRef: v1.ObjectReference{
APIVersion: infrastructureObject.GetObjectKind().GroupVersionKind().GroupVersion().String(),
Kind: infrastructureObject.GetObjectKind().GroupVersionKind().Kind,
Name: infrastructureObject.GetName(),
},
},
KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{
ClusterConfiguration: &bootstrapv1.ClusterConfiguration{
ImageRepository: clusterSpec.VersionsBundle.KubeDistro.Kubernetes.Repository,
DNS: bootstrapv1.DNS{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: clusterSpec.VersionsBundle.KubeDistro.CoreDNS.Repository,
ImageTag: clusterSpec.VersionsBundle.KubeDistro.CoreDNS.Tag,
},
},
APIServer: bootstrapv1.APIServer{
ControlPlaneComponent: bootstrapv1.ControlPlaneComponent{
ExtraArgs: map[string]string{},
ExtraVolumes: []bootstrapv1.HostPathMount{},
},
},
ControllerManager: bootstrapv1.ControlPlaneComponent{
ExtraArgs: ControllerManagerArgs(clusterSpec),
ExtraVolumes: []bootstrapv1.HostPathMount{},
},
Scheduler: bootstrapv1.ControlPlaneComponent{
ExtraArgs: map[string]string{},
ExtraVolumes: []bootstrapv1.HostPathMount{},
},
},
InitConfiguration: &bootstrapv1.InitConfiguration{
NodeRegistration: bootstrapv1.NodeRegistrationOptions{
KubeletExtraArgs: SecureTlsCipherSuitesExtraArgs().
Append(ControlPlaneNodeLabelsExtraArgs(clusterSpec.Cluster.Spec.ControlPlaneConfiguration)),
Taints: clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Taints,
},
},
JoinConfiguration: &bootstrapv1.JoinConfiguration{
NodeRegistration: bootstrapv1.NodeRegistrationOptions{
KubeletExtraArgs: SecureTlsCipherSuitesExtraArgs().
Append(ControlPlaneNodeLabelsExtraArgs(clusterSpec.Cluster.Spec.ControlPlaneConfiguration)),
Taints: clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Taints,
},
},
PreKubeadmCommands: []string{},
PostKubeadmCommands: []string{},
Files: []bootstrapv1.File{},
},
Replicas: &replicas,
Version: clusterSpec.VersionsBundle.KubeDistro.Kubernetes.Tag,
},
}
SetIdentityAuthInKubeadmControlPlane(kcp, clusterSpec)
if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration == nil {
setStackedEtcdConfigInKubeadmControlPlane(kcp, clusterSpec.VersionsBundle.KubeDistro.Etcd)
}
return kcp, nil
}
func KubeadmConfigTemplate(clusterSpec *cluster.Spec, workerNodeGroupConfig anywherev1.WorkerNodeGroupConfiguration) (*bootstrapv1.KubeadmConfigTemplate, error) {
kct := &bootstrapv1.KubeadmConfigTemplate{
TypeMeta: metav1.TypeMeta{
APIVersion: bootstrapAPIVersion,
Kind: kubeadmConfigTemplateKind,
},
ObjectMeta: metav1.ObjectMeta{
Name: DefaultKubeadmConfigTemplateName(clusterSpec, workerNodeGroupConfig),
Namespace: constants.EksaSystemNamespace,
},
Spec: bootstrapv1.KubeadmConfigTemplateSpec{
Template: bootstrapv1.KubeadmConfigTemplateResource{
Spec: bootstrapv1.KubeadmConfigSpec{
ClusterConfiguration: &bootstrapv1.ClusterConfiguration{
ControllerManager: bootstrapv1.ControlPlaneComponent{
ExtraArgs: map[string]string{},
},
APIServer: bootstrapv1.APIServer{
ControlPlaneComponent: bootstrapv1.ControlPlaneComponent{
ExtraArgs: map[string]string{},
},
},
},
JoinConfiguration: &bootstrapv1.JoinConfiguration{
NodeRegistration: bootstrapv1.NodeRegistrationOptions{
KubeletExtraArgs: WorkerNodeLabelsExtraArgs(workerNodeGroupConfig),
Taints: workerNodeGroupConfig.Taints,
},
},
PreKubeadmCommands: []string{},
PostKubeadmCommands: []string{},
Files: []bootstrapv1.File{},
},
},
},
}
return kct, nil
}
// MachineDeployment builds a machineDeployment based on an eks-a cluster spec, workerNodeGroupConfig, bootstrapObject and infrastructureObject.
func MachineDeployment(clusterSpec *cluster.Spec, workerNodeGroupConfig anywherev1.WorkerNodeGroupConfiguration, bootstrapObject, infrastructureObject APIObject) *clusterv1.MachineDeployment {
clusterName := clusterSpec.Cluster.GetName()
replicas := int32(*workerNodeGroupConfig.Count)
version := clusterSpec.VersionsBundle.KubeDistro.Kubernetes.Tag
md := &clusterv1.MachineDeployment{
TypeMeta: metav1.TypeMeta{
APIVersion: clusterAPIVersion,
Kind: machineDeploymentKind,
},
ObjectMeta: metav1.ObjectMeta{
Name: MachineDeploymentName(clusterSpec.Cluster, workerNodeGroupConfig),
Namespace: constants.EksaSystemNamespace,
Labels: capiObjectLabels(clusterSpec),
Annotations: map[string]string{},
},
Spec: clusterv1.MachineDeploymentSpec{
ClusterName: clusterName,
Selector: metav1.LabelSelector{
MatchLabels: map[string]string{},
},
Template: clusterv1.MachineTemplateSpec{
ObjectMeta: clusterv1.ObjectMeta{
Labels: capiClusterLabel(clusterSpec),
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
ConfigRef: &v1.ObjectReference{
APIVersion: bootstrapObject.GetObjectKind().GroupVersionKind().GroupVersion().String(),
Kind: bootstrapObject.GetObjectKind().GroupVersionKind().Kind,
Name: bootstrapObject.GetName(),
},
},
ClusterName: clusterName,
InfrastructureRef: v1.ObjectReference{
APIVersion: infrastructureObject.GetObjectKind().GroupVersionKind().GroupVersion().String(),
Kind: infrastructureObject.GetObjectKind().GroupVersionKind().Kind,
Name: infrastructureObject.GetName(),
},
Version: &version,
},
},
Replicas: &replicas,
},
}
ConfigureAutoscalingInMachineDeployment(md, workerNodeGroupConfig.AutoScalingConfiguration)
return md
}
// EtcdadmCluster builds a etcdadmCluster based on an eks-a cluster spec and infrastructureTemplate.
func EtcdadmCluster(clusterSpec *cluster.Spec, infrastructureTemplate APIObject) *etcdv1.EtcdadmCluster {
replicas := int32(clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.Count)
etcd := &etcdv1.EtcdadmCluster{
TypeMeta: metav1.TypeMeta{
APIVersion: etcdAPIVersion,
Kind: etcdadmClusterKind,
},
ObjectMeta: metav1.ObjectMeta{
Name: EtcdClusterName(clusterSpec.Cluster.GetName()),
Namespace: constants.EksaSystemNamespace,
},
Spec: etcdv1.EtcdadmClusterSpec{
Replicas: &replicas,
EtcdadmConfigSpec: etcdbootstrapv1.EtcdadmConfigSpec{
EtcdadmBuiltin: true,
CipherSuites: crypto.SecureCipherSuitesString(),
PreEtcdadmCommands: []string{},
},
InfrastructureTemplate: v1.ObjectReference{
APIVersion: infrastructureTemplate.GetObjectKind().GroupVersionKind().GroupVersion().String(),
Kind: infrastructureTemplate.GetObjectKind().GroupVersionKind().Kind,
Name: infrastructureTemplate.GetName(),
},
},
}
setProxyConfigInEtcdCluster(etcd, clusterSpec.Cluster)
setRegistryMirrorInEtcdCluster(etcd, clusterSpec.Cluster.Spec.RegistryMirrorConfiguration)
return etcd
}
| 327 |
eks-anywhere | aws | Go | package clusterapi_test
import (
"testing"
etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/internal/test"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
"github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type apiBuilerTest struct {
*WithT
clusterSpec *cluster.Spec
workerNodeGroupConfig *anywherev1.WorkerNodeGroupConfiguration
kubeadmConfigTemplate *bootstrapv1.KubeadmConfigTemplate
providerCluster clusterapi.APIObject
controlPlane clusterapi.APIObject
providerMachineTemplate clusterapi.APIObject
unstackedEtcdCluster clusterapi.APIObject
}
type providerCluster struct {
metav1.TypeMeta
metav1.ObjectMeta
}
func (c *providerCluster) DeepCopyObject() runtime.Object {
return nil
}
type providerMachineTemplate struct {
metav1.TypeMeta
metav1.ObjectMeta
}
func (c *providerMachineTemplate) DeepCopyObject() runtime.Object {
return nil
}
type unstackedEtcdCluster struct {
metav1.TypeMeta
metav1.ObjectMeta
}
func (c *unstackedEtcdCluster) DeepCopyObject() runtime.Object {
return nil
}
func newApiBuilerTest(t *testing.T) apiBuilerTest {
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster = &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
Namespace: "my-namespace",
},
Spec: anywherev1.ClusterSpec{
ClusterNetwork: anywherev1.ClusterNetwork{
Pods: anywherev1.Pods{
CidrBlocks: []string{
"1.2.3.4/5",
},
},
Services: anywherev1.Services{
CidrBlocks: []string{
"1.2.3.4/5",
},
},
},
ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{
Endpoint: &anywherev1.Endpoint{
Host: "1.2.3.4",
},
Count: 3,
Taints: []v1.Taint{
{
Key: "key1",
Value: "val1",
Effect: v1.TaintEffectNoExecute,
TimeAdded: nil,
},
},
Labels: map[string]string{
"key1": "val1",
"key2": "val2",
},
},
KubernetesVersion: "1.21",
},
}
s.VersionsBundle = &cluster.VersionsBundle{
KubeDistro: &cluster.KubeDistro{
Kubernetes: cluster.VersionedRepository{
Repository: "public.ecr.aws/eks-distro/kubernetes",
Tag: "v1.21.5-eks-1-21-9",
},
CoreDNS: cluster.VersionedRepository{
Repository: "public.ecr.aws/eks-distro/coredns",
Tag: "v1.8.4-eks-1-21-9",
},
Etcd: cluster.VersionedRepository{
Repository: "public.ecr.aws/eks-distro/etcd-io",
Tag: "v3.4.16-eks-1-21-9",
},
EtcdImage: v1alpha1.Image{
URI: "public.ecr.aws/eks-distro/etcd-io/etcd:0.0.1",
},
Pause: v1alpha1.Image{
URI: "public.ecr.aws/eks-distro/kubernetes/pause:0.0.1",
},
},
VersionsBundle: &v1alpha1.VersionsBundle{
BottleRocketHostContainers: v1alpha1.BottlerocketHostContainersBundle{
Admin: v1alpha1.Image{
URI: "public.ecr.aws/eks-anywhere/bottlerocket-admin:0.0.1",
},
Control: v1alpha1.Image{
URI: "public.ecr.aws/eks-anywhere/bottlerocket-control:0.0.1",
},
KubeadmBootstrap: v1alpha1.Image{
URI: "public.ecr.aws/eks-anywhere/bottlerocket-bootstrap:0.0.1",
},
},
},
}
})
controlPlane := &controlplanev1.KubeadmControlPlane{
ObjectMeta: metav1.ObjectMeta{
Name: "cp-test",
},
TypeMeta: metav1.TypeMeta{
Kind: "KubeadmControlPlane",
APIVersion: "controlplane.cluster.x-k8s.io/v1beta1",
},
}
workerNodeGroupConfig := &anywherev1.WorkerNodeGroupConfiguration{
Name: "wng-1",
Count: ptr.Int(3),
Taints: []v1.Taint{
{
Key: "key2",
Value: "val2",
Effect: v1.TaintEffectNoSchedule,
TimeAdded: nil,
},
},
Labels: map[string]string{
"key3": "val3",
},
}
kubeadmConfigTemplate := &bootstrapv1.KubeadmConfigTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: "md-0",
},
TypeMeta: metav1.TypeMeta{
Kind: "KubeadmConfigTemplate",
APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1",
},
}
providerCluster := &providerCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "provider-cluster",
},
TypeMeta: metav1.TypeMeta{
Kind: "ProviderCluster",
APIVersion: "providercluster.cluster.x-k8s.io/v1beta1",
},
}
providerMachineTemplate := &providerMachineTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: "provider-template",
},
TypeMeta: metav1.TypeMeta{
Kind: "ProviderMachineTemplate",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
}
unstackedEtcdCluster := &unstackedEtcdCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "unstacked-etcd-cluster",
},
TypeMeta: metav1.TypeMeta{
Kind: "UnstackedEtcdCluster",
APIVersion: "etcdcluster.cluster.x-k8s.io/v1beta1",
},
}
return apiBuilerTest{
WithT: NewWithT(t),
clusterSpec: clusterSpec,
workerNodeGroupConfig: workerNodeGroupConfig,
kubeadmConfigTemplate: kubeadmConfigTemplate,
providerCluster: providerCluster,
controlPlane: controlPlane,
providerMachineTemplate: providerMachineTemplate,
unstackedEtcdCluster: unstackedEtcdCluster,
}
}
func wantCluster() *clusterv1.Cluster {
return &clusterv1.Cluster{
TypeMeta: metav1.TypeMeta{
APIVersion: "cluster.x-k8s.io/v1beta1",
Kind: "Cluster",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
Namespace: "eksa-system",
Labels: map[string]string{
"cluster.x-k8s.io/cluster-name": "test-cluster",
"cluster.anywhere.eks.amazonaws.com/cluster-name": "test-cluster",
"cluster.anywhere.eks.amazonaws.com/cluster-namespace": "my-namespace",
},
},
Spec: clusterv1.ClusterSpec{
ClusterNetwork: &clusterv1.ClusterNetwork{
Pods: &clusterv1.NetworkRanges{
CIDRBlocks: []string{
"1.2.3.4/5",
},
},
Services: &clusterv1.NetworkRanges{
CIDRBlocks: []string{
"1.2.3.4/5",
},
},
},
ControlPlaneRef: &v1.ObjectReference{
APIVersion: "controlplane.cluster.x-k8s.io/v1beta1",
Kind: "KubeadmControlPlane",
Name: "cp-test",
},
InfrastructureRef: &v1.ObjectReference{
APIVersion: "providercluster.cluster.x-k8s.io/v1beta1",
Kind: "ProviderCluster",
Name: "provider-cluster",
},
},
}
}
func TestCluster(t *testing.T) {
tt := newApiBuilerTest(t)
got := clusterapi.Cluster(tt.clusterSpec, tt.providerCluster, tt.controlPlane, nil)
want := wantCluster()
tt.Expect(got).To(Equal(want))
}
func wantKubeadmControlPlane() *controlplanev1.KubeadmControlPlane {
replicas := int32(3)
return &controlplanev1.KubeadmControlPlane{
TypeMeta: metav1.TypeMeta{
APIVersion: "controlplane.cluster.x-k8s.io/v1beta1",
Kind: "KubeadmControlPlane",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
Namespace: "eksa-system",
},
Spec: controlplanev1.KubeadmControlPlaneSpec{
MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
InfrastructureRef: v1.ObjectReference{
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
Kind: "ProviderMachineTemplate",
Name: "provider-template",
},
},
KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{
ClusterConfiguration: &bootstrapv1.ClusterConfiguration{
ImageRepository: "public.ecr.aws/eks-distro/kubernetes",
DNS: bootstrapv1.DNS{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: "public.ecr.aws/eks-distro/coredns",
ImageTag: "v1.8.4-eks-1-21-9",
},
},
Etcd: bootstrapv1.Etcd{
Local: &bootstrapv1.LocalEtcd{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: "public.ecr.aws/eks-distro/etcd-io",
ImageTag: "v3.4.16-eks-1-21-9",
},
ExtraArgs: map[string]string{
"cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
},
},
},
APIServer: bootstrapv1.APIServer{
ControlPlaneComponent: bootstrapv1.ControlPlaneComponent{
ExtraArgs: map[string]string{},
ExtraVolumes: []bootstrapv1.HostPathMount{},
},
},
ControllerManager: bootstrapv1.ControlPlaneComponent{
ExtraArgs: tlsCipherSuitesArgs(),
ExtraVolumes: []bootstrapv1.HostPathMount{},
},
Scheduler: bootstrapv1.ControlPlaneComponent{
ExtraArgs: map[string]string{},
ExtraVolumes: []bootstrapv1.HostPathMount{},
},
},
InitConfiguration: &bootstrapv1.InitConfiguration{
NodeRegistration: bootstrapv1.NodeRegistrationOptions{
KubeletExtraArgs: map[string]string{
"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"node-labels": "key1=val1,key2=val2",
},
Taints: []v1.Taint{
{
Key: "key1",
Value: "val1",
Effect: v1.TaintEffectNoExecute,
TimeAdded: nil,
},
},
},
},
JoinConfiguration: &bootstrapv1.JoinConfiguration{
NodeRegistration: bootstrapv1.NodeRegistrationOptions{
KubeletExtraArgs: map[string]string{
"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"node-labels": "key1=val1,key2=val2",
},
Taints: []v1.Taint{
{
Key: "key1",
Value: "val1",
Effect: v1.TaintEffectNoExecute,
TimeAdded: nil,
},
},
},
},
PreKubeadmCommands: []string{},
PostKubeadmCommands: []string{},
Files: []bootstrapv1.File{},
},
Replicas: &replicas,
Version: "v1.21.5-eks-1-21-9",
},
}
}
func TestKubeadmControlPlane(t *testing.T) {
tt := newApiBuilerTest(t)
got, err := clusterapi.KubeadmControlPlane(tt.clusterSpec, tt.providerMachineTemplate)
tt.Expect(err).To(Succeed())
want := wantKubeadmControlPlane()
tt.Expect(got).To(Equal(want))
}
func wantKubeadmConfigTemplate() *bootstrapv1.KubeadmConfigTemplate {
return &bootstrapv1.KubeadmConfigTemplate{
TypeMeta: metav1.TypeMeta{
APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1",
Kind: "KubeadmConfigTemplate",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster-wng-1-1",
Namespace: "eksa-system",
},
Spec: bootstrapv1.KubeadmConfigTemplateSpec{
Template: bootstrapv1.KubeadmConfigTemplateResource{
Spec: bootstrapv1.KubeadmConfigSpec{
ClusterConfiguration: &bootstrapv1.ClusterConfiguration{
ControllerManager: bootstrapv1.ControlPlaneComponent{
ExtraArgs: map[string]string{},
},
APIServer: bootstrapv1.APIServer{
ControlPlaneComponent: bootstrapv1.ControlPlaneComponent{
ExtraArgs: map[string]string{},
},
},
},
JoinConfiguration: &bootstrapv1.JoinConfiguration{
NodeRegistration: bootstrapv1.NodeRegistrationOptions{
KubeletExtraArgs: map[string]string{
"node-labels": "key3=val3",
},
Taints: []v1.Taint{
{
Key: "key2",
Value: "val2",
Effect: v1.TaintEffectNoSchedule,
TimeAdded: nil,
},
},
},
},
PreKubeadmCommands: []string{},
PostKubeadmCommands: []string{},
Files: []bootstrapv1.File{},
},
},
},
}
}
func TestKubeadmConfigTemplate(t *testing.T) {
tt := newApiBuilerTest(t)
got, err := clusterapi.KubeadmConfigTemplate(tt.clusterSpec, *tt.workerNodeGroupConfig)
tt.Expect(err).To(Succeed())
want := wantKubeadmConfigTemplate()
tt.Expect(got).To(Equal(want))
}
func wantMachineDeployment() *clusterv1.MachineDeployment {
replicas := int32(3)
version := "v1.21.5-eks-1-21-9"
return &clusterv1.MachineDeployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "cluster.x-k8s.io/v1beta1",
Kind: "MachineDeployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster-wng-1",
Namespace: "eksa-system",
Labels: map[string]string{
"cluster.x-k8s.io/cluster-name": "test-cluster",
"cluster.anywhere.eks.amazonaws.com/cluster-name": "test-cluster",
"cluster.anywhere.eks.amazonaws.com/cluster-namespace": "my-namespace",
},
Annotations: map[string]string{},
},
Spec: clusterv1.MachineDeploymentSpec{
ClusterName: "test-cluster",
Selector: metav1.LabelSelector{
MatchLabels: map[string]string{},
},
Template: clusterv1.MachineTemplateSpec{
ObjectMeta: clusterv1.ObjectMeta{
Labels: map[string]string{
"cluster.x-k8s.io/cluster-name": "test-cluster",
},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
ConfigRef: &v1.ObjectReference{
APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1",
Kind: "KubeadmConfigTemplate",
Name: "md-0",
},
},
ClusterName: "test-cluster",
InfrastructureRef: v1.ObjectReference{
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
Kind: "ProviderMachineTemplate",
Name: "provider-template",
},
Version: &version,
},
},
Replicas: &replicas,
},
}
}
func TestMachineDeployment(t *testing.T) {
tt := newApiBuilerTest(t)
got := clusterapi.MachineDeployment(tt.clusterSpec, *tt.workerNodeGroupConfig, tt.kubeadmConfigTemplate, tt.providerMachineTemplate)
tt.Expect(got).To(BeComparableTo(wantMachineDeployment()))
}
func TestClusterName(t *testing.T) {
tests := []struct {
name string
cluster *anywherev1.Cluster
want string
}{
{
name: "no name",
cluster: &anywherev1.Cluster{},
want: "",
},
{
name: "with name",
cluster: &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
},
},
want: "my-cluster",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(clusterapi.ClusterName(tt.cluster)).To(Equal(tt.want))
})
}
}
func wantEtcdCluster() *etcdv1.EtcdadmCluster {
replicas := int32(3)
return &etcdv1.EtcdadmCluster{
TypeMeta: metav1.TypeMeta{
APIVersion: "etcdcluster.cluster.x-k8s.io/v1beta1",
Kind: "EtcdadmCluster",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster-etcd",
Namespace: "eksa-system",
},
Spec: etcdv1.EtcdadmClusterSpec{
Replicas: &replicas,
EtcdadmConfigSpec: etcdbootstrapv1.EtcdadmConfigSpec{
EtcdadmBuiltin: true,
CipherSuites: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
PreEtcdadmCommands: []string{},
},
InfrastructureTemplate: v1.ObjectReference{
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
Kind: "ProviderMachineTemplate",
Name: "provider-template",
},
},
}
}
func TestEtcdadmCluster(t *testing.T) {
tt := newApiBuilerTest(t)
tt.clusterSpec.Cluster.Spec.ExternalEtcdConfiguration = &anywherev1.ExternalEtcdConfiguration{
Count: 3,
}
got := clusterapi.EtcdadmCluster(tt.clusterSpec, tt.providerMachineTemplate)
want := wantEtcdCluster()
tt.Expect(got).To(Equal(want))
}
| 550 |
eks-anywhere | aws | Go | package clusterapi
import (
"strconv"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
const (
nodeGroupMinSizeAnnotation = "cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size"
nodeGroupMaxSizeAnnotation = "cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size"
)
func ConfigureAutoscalingInMachineDeployment(md *clusterv1.MachineDeployment, autoscalingConfig *anywherev1.AutoScalingConfiguration) {
if autoscalingConfig == nil {
return
}
if md.ObjectMeta.Annotations == nil {
md.ObjectMeta.Annotations = map[string]string{}
}
md.ObjectMeta.Annotations[nodeGroupMinSizeAnnotation] = strconv.Itoa(autoscalingConfig.MinCount)
md.ObjectMeta.Annotations[nodeGroupMaxSizeAnnotation] = strconv.Itoa(autoscalingConfig.MaxCount)
}
| 28 |
eks-anywhere | aws | Go | package clusterapi_test
import (
"testing"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
)
func TestConfigureAutoscalingInMachineDeployment(t *testing.T) {
replicas := int32(3)
version := "v1.21.5-eks-1-21-9"
tests := []struct {
name string
autoscalingConfig *v1alpha1.AutoScalingConfiguration
want *clusterv1.MachineDeployment
}{
{
name: "no autoscaling config",
autoscalingConfig: nil,
want: wantMachineDeployment(),
},
{
name: "with autoscaling config",
autoscalingConfig: &v1alpha1.AutoScalingConfiguration{
MinCount: 1,
MaxCount: 3,
},
want: &clusterv1.MachineDeployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "cluster.x-k8s.io/v1beta1",
Kind: "MachineDeployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster-wng-1",
Namespace: "eksa-system",
Labels: map[string]string{
"cluster.x-k8s.io/cluster-name": "test-cluster",
"cluster.anywhere.eks.amazonaws.com/cluster-name": "test-cluster",
"cluster.anywhere.eks.amazonaws.com/cluster-namespace": "my-namespace",
},
Annotations: map[string]string{
"cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size": "1",
"cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size": "3",
},
},
Spec: clusterv1.MachineDeploymentSpec{
ClusterName: "test-cluster",
Selector: metav1.LabelSelector{
MatchLabels: map[string]string{},
},
Template: clusterv1.MachineTemplateSpec{
ObjectMeta: clusterv1.ObjectMeta{
Labels: map[string]string{
"cluster.x-k8s.io/cluster-name": "test-cluster",
},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
ConfigRef: &v1.ObjectReference{
APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1",
Kind: "KubeadmConfigTemplate",
Name: "md-0",
},
},
ClusterName: "test-cluster",
InfrastructureRef: v1.ObjectReference{
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
Kind: "ProviderMachineTemplate",
Name: "provider-template",
},
Version: &version,
},
},
Replicas: &replicas,
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
got := wantMachineDeployment()
clusterapi.ConfigureAutoscalingInMachineDeployment(got, tt.autoscalingConfig)
g.Expect(got).To(Equal(tt.want))
})
}
}
| 94 |
eks-anywhere | aws | Go | package clusterapi
import (
etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/release/api/v1alpha1"
)
func bottlerocketBootstrap(image v1alpha1.Image) bootstrapv1.BottlerocketBootstrap {
return bootstrapv1.BottlerocketBootstrap{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: image.Image(),
ImageTag: image.Tag(),
},
}
}
func bottlerocketAdmin(image v1alpha1.Image) bootstrapv1.BottlerocketAdmin {
return bootstrapv1.BottlerocketAdmin{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: image.Image(),
ImageTag: image.Tag(),
},
}
}
func bottlerocketControl(image v1alpha1.Image) bootstrapv1.BottlerocketControl {
return bootstrapv1.BottlerocketControl{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: image.Image(),
ImageTag: image.Tag(),
},
}
}
func pause(image v1alpha1.Image) bootstrapv1.Pause {
return bootstrapv1.Pause{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: image.Image(),
ImageTag: image.Tag(),
},
}
}
func hostConfig(config *anywherev1.HostOSConfiguration) *bootstrapv1.BottlerocketSettings {
b := &bootstrapv1.BottlerocketSettings{}
if config.BottlerocketConfiguration.Kernel != nil {
b.Kernel = &bootstrapv1.BottlerocketKernelSettings{
SysctlSettings: config.BottlerocketConfiguration.Kernel.SysctlSettings,
}
}
if config.BottlerocketConfiguration.Boot != nil {
b.Boot = &bootstrapv1.BottlerocketBootSettings{
BootKernelParameters: config.BottlerocketConfiguration.Boot.BootKernelParameters,
}
}
return b
}
// SetBottlerocketInKubeadmControlPlane adds bottlerocket bootstrap image metadata in kubeadmControlPlane.
func SetBottlerocketInKubeadmControlPlane(kcp *controlplanev1.KubeadmControlPlane, versionsBundle *cluster.VersionsBundle) {
b := bottlerocketBootstrap(versionsBundle.BottleRocketHostContainers.KubeadmBootstrap)
p := pause(versionsBundle.KubeDistro.Pause)
kcp.Spec.KubeadmConfigSpec.Format = bootstrapv1.Bottlerocket
kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.BottlerocketBootstrap = b
kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Pause = p
kcp.Spec.KubeadmConfigSpec.JoinConfiguration.BottlerocketBootstrap = b
kcp.Spec.KubeadmConfigSpec.JoinConfiguration.Pause = p
kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager.ExtraVolumes = append(kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager.ExtraVolumes,
bootstrapv1.HostPathMount{
HostPath: "/var/lib/kubeadm/controller-manager.conf",
MountPath: "/etc/kubernetes/controller-manager.conf",
Name: "kubeconfig",
PathType: "File",
ReadOnly: true,
},
)
kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Scheduler.ExtraVolumes = append(kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Scheduler.ExtraVolumes,
bootstrapv1.HostPathMount{
HostPath: "/var/lib/kubeadm/scheduler.conf",
MountPath: "/etc/kubernetes/scheduler.conf",
Name: "kubeconfig",
PathType: "File",
ReadOnly: true,
},
)
kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.CertificatesDir = "/var/lib/kubeadm/pki"
}
// SetBottlerocketAdminContainerImageInKubeadmControlPlane overrides the default bottlerocket admin container image metadata in kubeadmControlPlane.
func SetBottlerocketAdminContainerImageInKubeadmControlPlane(kcp *controlplanev1.KubeadmControlPlane, versionsBundle *cluster.VersionsBundle) {
b := bottlerocketAdmin(versionsBundle.BottleRocketHostContainers.Admin)
kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.BottlerocketAdmin = b
kcp.Spec.KubeadmConfigSpec.JoinConfiguration.BottlerocketAdmin = b
}
// SetBottlerocketControlContainerImageInKubeadmControlPlane overrides the default bottlerocket control container image metadata in kubeadmControlPlane.
func SetBottlerocketControlContainerImageInKubeadmControlPlane(kcp *controlplanev1.KubeadmControlPlane, versionsBundle *cluster.VersionsBundle) {
b := bottlerocketControl(versionsBundle.BottleRocketHostContainers.Control)
kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.BottlerocketControl = b
kcp.Spec.KubeadmConfigSpec.JoinConfiguration.BottlerocketControl = b
}
// SetBottlerocketInKubeadmConfigTemplate adds bottlerocket bootstrap image metadata in kubeadmConfigTemplate.
func SetBottlerocketInKubeadmConfigTemplate(kct *bootstrapv1.KubeadmConfigTemplate, versionsBundle *cluster.VersionsBundle) {
kct.Spec.Template.Spec.Format = bootstrapv1.Bottlerocket
kct.Spec.Template.Spec.JoinConfiguration.BottlerocketBootstrap = bottlerocketBootstrap(versionsBundle.BottleRocketHostContainers.KubeadmBootstrap)
kct.Spec.Template.Spec.JoinConfiguration.Pause = pause(versionsBundle.KubeDistro.Pause)
}
// SetBottlerocketAdminContainerImageInKubeadmConfigTemplate overrides the default bottlerocket admin container image metadata in kubeadmConfigTemplate.
func SetBottlerocketAdminContainerImageInKubeadmConfigTemplate(kct *bootstrapv1.KubeadmConfigTemplate, versionsBundle *cluster.VersionsBundle) {
kct.Spec.Template.Spec.JoinConfiguration.BottlerocketAdmin = bottlerocketAdmin(versionsBundle.BottleRocketHostContainers.Admin)
}
// SetBottlerocketControlContainerImageInKubeadmConfigTemplate overrides the default bottlerocket control container image metadata in kubeadmConfigTemplate.
func SetBottlerocketControlContainerImageInKubeadmConfigTemplate(kct *bootstrapv1.KubeadmConfigTemplate, versionsBundle *cluster.VersionsBundle) {
kct.Spec.Template.Spec.JoinConfiguration.BottlerocketControl = bottlerocketControl(versionsBundle.BottleRocketHostContainers.Control)
}
// SetBottlerocketHostConfigInKubeadmControlPlane sets bottlerocket specific kernel settings in kubeadmControlPlane.
func SetBottlerocketHostConfigInKubeadmControlPlane(kcp *controlplanev1.KubeadmControlPlane, hostOSConfig *anywherev1.HostOSConfiguration) {
if hostOSConfig == nil || hostOSConfig.BottlerocketConfiguration == nil {
return
}
kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Bottlerocket = hostConfig(hostOSConfig)
kcp.Spec.KubeadmConfigSpec.JoinConfiguration.Bottlerocket = hostConfig(hostOSConfig)
}
// SetBottlerocketHostConfigInKubeadmConfigTemplate sets bottlerocket specific kernel settings in kubeadmConfigTemplate.
func SetBottlerocketHostConfigInKubeadmConfigTemplate(kct *bootstrapv1.KubeadmConfigTemplate, hostOSConfig *anywherev1.HostOSConfiguration) {
if hostOSConfig == nil || hostOSConfig.BottlerocketConfiguration == nil {
return
}
kct.Spec.Template.Spec.JoinConfiguration.Bottlerocket = hostConfig(hostOSConfig)
}
// SetBottlerocketInEtcdCluster adds bottlerocket config in etcdadmCluster.
func SetBottlerocketInEtcdCluster(etcd *etcdv1.EtcdadmCluster, versionsBundle *cluster.VersionsBundle) {
etcd.Spec.EtcdadmConfigSpec.Format = etcdbootstrapv1.Format(anywherev1.Bottlerocket)
etcd.Spec.EtcdadmConfigSpec.BottlerocketConfig = &etcdbootstrapv1.BottlerocketConfig{
EtcdImage: versionsBundle.KubeDistro.EtcdImage.VersionedImage(),
BootstrapImage: versionsBundle.BottleRocketHostContainers.KubeadmBootstrap.VersionedImage(),
PauseImage: versionsBundle.KubeDistro.Pause.VersionedImage(),
}
}
// SetBottlerocketAdminContainerImageInEtcdCluster overrides the default bottlerocket admin container image metadata in etcdadmCluster.
func SetBottlerocketAdminContainerImageInEtcdCluster(etcd *etcdv1.EtcdadmCluster, adminImage v1alpha1.Image) {
etcd.Spec.EtcdadmConfigSpec.BottlerocketConfig.AdminImage = adminImage.VersionedImage()
}
// SetBottlerocketControlContainerImageInEtcdCluster overrides the default bottlerocket control container image metadata in etcdadmCluster.
func SetBottlerocketControlContainerImageInEtcdCluster(etcd *etcdv1.EtcdadmCluster, controlImage v1alpha1.Image) {
etcd.Spec.EtcdadmConfigSpec.BottlerocketConfig.ControlImage = controlImage.VersionedImage()
}
// SetBottlerocketHostConfigInEtcdCluster sets bottlerocket specific kernel settings in etcdadmCluster.
func SetBottlerocketHostConfigInEtcdCluster(etcd *etcdv1.EtcdadmCluster, hostOSConfig *anywherev1.HostOSConfiguration) {
if hostOSConfig == nil || hostOSConfig.BottlerocketConfiguration == nil {
return
}
if hostOSConfig.BottlerocketConfiguration.Kernel != nil {
etcd.Spec.EtcdadmConfigSpec.BottlerocketConfig.Kernel = &bootstrapv1.BottlerocketKernelSettings{
SysctlSettings: hostOSConfig.BottlerocketConfiguration.Kernel.SysctlSettings,
}
}
if hostOSConfig.BottlerocketConfiguration.Boot != nil {
etcd.Spec.EtcdadmConfigSpec.BottlerocketConfig.Boot = &bootstrapv1.BottlerocketBootSettings{
BootKernelParameters: hostOSConfig.BottlerocketConfiguration.Boot.BootKernelParameters,
}
}
}
| 184 |
eks-anywhere | aws | Go | package clusterapi_test
import (
"testing"
etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1"
. "github.com/onsi/gomega"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
)
var pause = bootstrapv1.Pause{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: "public.ecr.aws/eks-distro/kubernetes/pause",
ImageTag: "0.0.1",
},
}
var bootstrap = bootstrapv1.BottlerocketBootstrap{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: "public.ecr.aws/eks-anywhere/bottlerocket-bootstrap",
ImageTag: "0.0.1",
},
}
var adminContainer = bootstrapv1.BottlerocketAdmin{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: "public.ecr.aws/eks-anywhere/bottlerocket-admin",
ImageTag: "0.0.1",
},
}
var controlContainer = bootstrapv1.BottlerocketControl{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: "public.ecr.aws/eks-anywhere/bottlerocket-control",
ImageTag: "0.0.1",
},
}
var kernel = &bootstrapv1.BottlerocketSettings{
Kernel: &bootstrapv1.BottlerocketKernelSettings{
SysctlSettings: map[string]string{
"foo": "bar",
"abc": "def",
},
},
}
func TestSetBottlerocketInKubeadmControlPlane(t *testing.T) {
g := newApiBuilerTest(t)
got := wantKubeadmControlPlane()
want := got.DeepCopy()
want.Spec.KubeadmConfigSpec.Format = "bottlerocket"
want.Spec.KubeadmConfigSpec.ClusterConfiguration.BottlerocketBootstrap = bootstrap
want.Spec.KubeadmConfigSpec.ClusterConfiguration.Pause = pause
want.Spec.KubeadmConfigSpec.JoinConfiguration.BottlerocketBootstrap = bootstrap
want.Spec.KubeadmConfigSpec.JoinConfiguration.Pause = pause
want.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager.ExtraVolumes = append(want.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager.ExtraVolumes,
bootstrapv1.HostPathMount{
HostPath: "/var/lib/kubeadm/controller-manager.conf",
MountPath: "/etc/kubernetes/controller-manager.conf",
Name: "kubeconfig",
PathType: "File",
ReadOnly: true,
},
)
want.Spec.KubeadmConfigSpec.ClusterConfiguration.Scheduler.ExtraVolumes = append(want.Spec.KubeadmConfigSpec.ClusterConfiguration.Scheduler.ExtraVolumes,
bootstrapv1.HostPathMount{
HostPath: "/var/lib/kubeadm/scheduler.conf",
MountPath: "/etc/kubernetes/scheduler.conf",
Name: "kubeconfig",
PathType: "File",
ReadOnly: true,
},
)
want.Spec.KubeadmConfigSpec.ClusterConfiguration.CertificatesDir = "/var/lib/kubeadm/pki"
clusterapi.SetBottlerocketInKubeadmControlPlane(got, g.clusterSpec.VersionsBundle)
g.Expect(got).To(Equal(want))
}
func TestSetBottlerocketAdminContainerImageInKubeadmControlPlane(t *testing.T) {
g := newApiBuilerTest(t)
got := wantKubeadmControlPlane()
want := got.DeepCopy()
want.Spec.KubeadmConfigSpec.ClusterConfiguration.BottlerocketAdmin = adminContainer
want.Spec.KubeadmConfigSpec.JoinConfiguration.BottlerocketAdmin = adminContainer
clusterapi.SetBottlerocketAdminContainerImageInKubeadmControlPlane(got, g.clusterSpec.VersionsBundle)
g.Expect(got).To(Equal(want))
}
func TestSetBottlerocketControlContainerImageInKubeadmControlPlane(t *testing.T) {
g := newApiBuilerTest(t)
got := wantKubeadmControlPlane()
want := got.DeepCopy()
want.Spec.KubeadmConfigSpec.ClusterConfiguration.BottlerocketControl = controlContainer
want.Spec.KubeadmConfigSpec.JoinConfiguration.BottlerocketControl = controlContainer
clusterapi.SetBottlerocketControlContainerImageInKubeadmControlPlane(got, g.clusterSpec.VersionsBundle)
g.Expect(got).To(Equal(want))
}
func TestSetBottlerocketInKubeadmConfigTemplate(t *testing.T) {
g := newApiBuilerTest(t)
got := wantKubeadmConfigTemplate()
want := got.DeepCopy()
want.Spec.Template.Spec.Format = "bottlerocket"
want.Spec.Template.Spec.JoinConfiguration.BottlerocketBootstrap = bootstrap
want.Spec.Template.Spec.JoinConfiguration.Pause = pause
clusterapi.SetBottlerocketInKubeadmConfigTemplate(got, g.clusterSpec.VersionsBundle)
g.Expect(got).To(Equal(want))
}
func TestSetBottlerocketAdminContainerImageInKubeadmConfigTemplate(t *testing.T) {
g := newApiBuilerTest(t)
got := wantKubeadmConfigTemplate()
want := got.DeepCopy()
want.Spec.Template.Spec.JoinConfiguration.BottlerocketAdmin = adminContainer
clusterapi.SetBottlerocketAdminContainerImageInKubeadmConfigTemplate(got, g.clusterSpec.VersionsBundle)
g.Expect(got).To(Equal(want))
}
func TestSetBottlerocketControlContainerImageInKubeadmConfigTemplate(t *testing.T) {
g := newApiBuilerTest(t)
got := wantKubeadmConfigTemplate()
want := got.DeepCopy()
want.Spec.Template.Spec.JoinConfiguration.BottlerocketControl = controlContainer
clusterapi.SetBottlerocketControlContainerImageInKubeadmConfigTemplate(got, g.clusterSpec.VersionsBundle)
g.Expect(got).To(Equal(want))
}
func TestSetBottlerocketInEtcdCluster(t *testing.T) {
g := newApiBuilerTest(t)
got := wantEtcdCluster()
want := got.DeepCopy()
want.Spec.EtcdadmConfigSpec.Format = etcdbootstrapv1.Format("bottlerocket")
want.Spec.EtcdadmConfigSpec.BottlerocketConfig = &etcdbootstrapv1.BottlerocketConfig{
EtcdImage: "public.ecr.aws/eks-distro/etcd-io/etcd:0.0.1",
BootstrapImage: "public.ecr.aws/eks-anywhere/bottlerocket-bootstrap:0.0.1",
PauseImage: "public.ecr.aws/eks-distro/kubernetes/pause:0.0.1",
}
clusterapi.SetBottlerocketInEtcdCluster(got, g.clusterSpec.VersionsBundle)
g.Expect(got).To(Equal(want))
}
func TestSetBottlerocketAdminContainerImageInEtcdCluster(t *testing.T) {
g := newApiBuilerTest(t)
got := wantEtcdCluster()
got.Spec.EtcdadmConfigSpec.BottlerocketConfig = &etcdbootstrapv1.BottlerocketConfig{
EtcdImage: "public.ecr.aws/eks-distro/etcd-io/etcd:0.0.1",
BootstrapImage: "public.ecr.aws/eks-anywhere/bottlerocket-bootstrap:0.0.1",
PauseImage: "public.ecr.aws/eks-distro/kubernetes/pause:0.0.1",
}
want := got.DeepCopy()
want.Spec.EtcdadmConfigSpec.BottlerocketConfig.AdminImage = "public.ecr.aws/eks-anywhere/bottlerocket-admin:0.0.1"
clusterapi.SetBottlerocketAdminContainerImageInEtcdCluster(got, g.clusterSpec.VersionsBundle.BottleRocketHostContainers.Admin)
g.Expect(got).To(Equal(want))
}
func TestSetBottlerocketControlContainerImageInEtcdCluster(t *testing.T) {
g := newApiBuilerTest(t)
got := wantEtcdCluster()
got.Spec.EtcdadmConfigSpec.BottlerocketConfig = &etcdbootstrapv1.BottlerocketConfig{
EtcdImage: "public.ecr.aws/eks-distro/etcd-io/etcd:0.0.1",
BootstrapImage: "public.ecr.aws/eks-anywhere/bottlerocket-bootstrap:0.0.1",
PauseImage: "public.ecr.aws/eks-distro/kubernetes/pause:0.0.1",
}
want := got.DeepCopy()
want.Spec.EtcdadmConfigSpec.BottlerocketConfig.ControlImage = "public.ecr.aws/eks-anywhere/bottlerocket-control:0.0.1"
clusterapi.SetBottlerocketControlContainerImageInEtcdCluster(got, g.clusterSpec.VersionsBundle.BottleRocketHostContainers.Control)
g.Expect(got).To(Equal(want))
}
func TestSetBottlerocketHostConfigInKubeadmControlPlane(t *testing.T) {
g := newApiBuilerTest(t)
got := wantKubeadmControlPlane()
want := got.DeepCopy()
want.Spec.KubeadmConfigSpec.ClusterConfiguration.Bottlerocket = kernel
want.Spec.KubeadmConfigSpec.JoinConfiguration.Bottlerocket = kernel
clusterapi.SetBottlerocketHostConfigInKubeadmControlPlane(got, &anywherev1.HostOSConfiguration{
BottlerocketConfiguration: &anywherev1.BottlerocketConfiguration{
Kernel: &bootstrapv1.BottlerocketKernelSettings{
SysctlSettings: map[string]string{
"foo": "bar",
"abc": "def",
},
},
},
})
g.Expect(got).To(Equal(want))
}
func TestSetBottlerocketHostConfigInKubeadmConfigTemplate(t *testing.T) {
g := newApiBuilerTest(t)
got := wantKubeadmConfigTemplate()
want := got.DeepCopy()
want.Spec.Template.Spec.JoinConfiguration.Bottlerocket = kernel
clusterapi.SetBottlerocketHostConfigInKubeadmConfigTemplate(got, &anywherev1.HostOSConfiguration{
BottlerocketConfiguration: &anywherev1.BottlerocketConfiguration{
Kernel: &bootstrapv1.BottlerocketKernelSettings{
SysctlSettings: map[string]string{
"foo": "bar",
"abc": "def",
},
},
},
})
g.Expect(got).To(Equal(want))
}
func TestSetBottlerocketKernelSettingsInEtcdCluster(t *testing.T) {
g := newApiBuilerTest(t)
got := wantEtcdCluster()
got.Spec.EtcdadmConfigSpec.BottlerocketConfig = &etcdbootstrapv1.BottlerocketConfig{
EtcdImage: "public.ecr.aws/eks-distro/etcd-io/etcd:0.0.1",
BootstrapImage: "public.ecr.aws/eks-anywhere/bottlerocket-bootstrap:0.0.1",
PauseImage: "public.ecr.aws/eks-distro/kubernetes/pause:0.0.1",
}
want := got.DeepCopy()
want.Spec.EtcdadmConfigSpec.BottlerocketConfig.Kernel = &bootstrapv1.BottlerocketKernelSettings{
SysctlSettings: map[string]string{
"foo": "bar",
"abc": "def",
},
}
clusterapi.SetBottlerocketHostConfigInEtcdCluster(got, &anywherev1.HostOSConfiguration{
BottlerocketConfiguration: &anywherev1.BottlerocketConfiguration{
Kernel: &bootstrapv1.BottlerocketKernelSettings{
SysctlSettings: map[string]string{
"foo": "bar",
"abc": "def",
},
},
},
})
g.Expect(got).To(Equal(want))
}
func TestSetBottlerocketBootSettingsInKubeadmControlPlane(t *testing.T) {
g := newApiBuilerTest(t)
got := wantKubeadmControlPlane()
want := got.DeepCopy()
want.Spec.KubeadmConfigSpec.ClusterConfiguration.Bottlerocket = &bootstrapv1.BottlerocketSettings{
Boot: &bootstrapv1.BottlerocketBootSettings{
BootKernelParameters: map[string][]string{
"foo": {
"abc",
"def",
},
},
},
}
want.Spec.KubeadmConfigSpec.JoinConfiguration.Bottlerocket = &bootstrapv1.BottlerocketSettings{
Boot: &bootstrapv1.BottlerocketBootSettings{
BootKernelParameters: map[string][]string{
"foo": {
"abc",
"def",
},
},
},
}
clusterapi.SetBottlerocketHostConfigInKubeadmControlPlane(got, &anywherev1.HostOSConfiguration{
BottlerocketConfiguration: &anywherev1.BottlerocketConfiguration{
Boot: &bootstrapv1.BottlerocketBootSettings{
BootKernelParameters: map[string][]string{
"foo": {
"abc",
"def",
},
},
},
},
})
g.Expect(got).To(Equal(want))
}
func TestSetBottlerocketBootSettingsInKubeadmConfigTemplate(t *testing.T) {
g := newApiBuilerTest(t)
got := wantKubeadmConfigTemplate()
want := got.DeepCopy()
want.Spec.Template.Spec.JoinConfiguration.Bottlerocket = &bootstrapv1.BottlerocketSettings{
Kernel: &bootstrapv1.BottlerocketKernelSettings{
SysctlSettings: map[string]string{
"foo": "bar",
"abc": "def",
},
},
Boot: &bootstrapv1.BottlerocketBootSettings{
BootKernelParameters: map[string][]string{
"foo": {
"abc",
"def",
},
},
},
}
clusterapi.SetBottlerocketHostConfigInKubeadmConfigTemplate(got, &anywherev1.HostOSConfiguration{
BottlerocketConfiguration: &anywherev1.BottlerocketConfiguration{
Boot: &bootstrapv1.BottlerocketBootSettings{
BootKernelParameters: map[string][]string{
"foo": {
"abc",
"def",
},
},
},
Kernel: &bootstrapv1.BottlerocketKernelSettings{
SysctlSettings: map[string]string{
"foo": "bar",
"abc": "def",
},
},
},
})
g.Expect(got).To(Equal(want))
}
func TestSetBottlerocketBootSettingsInEtcdCluster(t *testing.T) {
g := newApiBuilerTest(t)
got := wantEtcdCluster()
got.Spec.EtcdadmConfigSpec.BottlerocketConfig = &etcdbootstrapv1.BottlerocketConfig{
EtcdImage: "public.ecr.aws/eks-distro/etcd-io/etcd:0.0.1",
BootstrapImage: "public.ecr.aws/eks-anywhere/bottlerocket-bootstrap:0.0.1",
PauseImage: "public.ecr.aws/eks-distro/kubernetes/pause:0.0.1",
}
want := got.DeepCopy()
want.Spec.EtcdadmConfigSpec.BottlerocketConfig.Kernel = &bootstrapv1.BottlerocketKernelSettings{
SysctlSettings: map[string]string{
"foo": "bar",
"abc": "def",
},
}
want.Spec.EtcdadmConfigSpec.BottlerocketConfig.Boot = &bootstrapv1.BottlerocketBootSettings{
BootKernelParameters: map[string][]string{
"foo": {
"abc",
"def",
},
},
}
clusterapi.SetBottlerocketHostConfigInEtcdCluster(got, &anywherev1.HostOSConfiguration{
BottlerocketConfiguration: &anywherev1.BottlerocketConfiguration{
Boot: &bootstrapv1.BottlerocketBootSettings{
BootKernelParameters: map[string][]string{
"foo": {
"abc",
"def",
},
},
},
Kernel: &bootstrapv1.BottlerocketKernelSettings{
SysctlSettings: map[string]string{
"foo": "bar",
"abc": "def",
},
},
},
})
g.Expect(got).To(Equal(want))
}
| 375 |
eks-anywhere | aws | Go | package clusterapi
import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
const (
ControlPlaneReadyCondition clusterv1.ConditionType = "ControlPlaneReady"
)
| 8 |
eks-anywhere | aws | Go | package clusterapi
import (
"github.com/aws/eks-anywhere/pkg/cluster"
)
func ControllerManagerArgs(clusterSpec *cluster.Spec) ExtraArgs {
return SecureTlsCipherSuitesExtraArgs().
Append(NodeCIDRMaskExtraArgs(&clusterSpec.Cluster.Spec.ClusterNetwork))
}
| 11 |
eks-anywhere | aws | Go | package clusterapi_test
import (
"reflect"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clusterapi"
)
func TestSetControllerManagerArgs(t *testing.T) {
tests := []struct {
name string
clusterSpec *cluster.Spec
want clusterapi.ExtraArgs
}{
{
name: "without Node CIDR mask",
clusterSpec: givenClusterSpec(),
want: map[string]string{"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"},
},
{
name: "with Node CIDR mask",
clusterSpec: givenClusterSpecWithNodeCIDR(),
want: map[string]string{"node-cidr-mask-size": "28", "tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := clusterapi.ControllerManagerArgs(tt.clusterSpec)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("ControllerManagerArgs()/%s got = %v, want %v", tt.name, got, tt.want)
}
})
}
}
func givenClusterSpecWithNodeCIDR() *cluster.Spec {
cluster := givenClusterSpec()
nodeCidrMaskSize := new(int)
*nodeCidrMaskSize = 28
cluster.Cluster.Spec.ClusterNetwork = v1alpha1.ClusterNetwork{
Nodes: &v1alpha1.Nodes{CIDRMaskSize: nodeCidrMaskSize},
}
return cluster
}
func givenClusterSpec() *cluster.Spec {
return test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster = &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "snow-test",
Namespace: "test-namespace",
},
Spec: v1alpha1.ClusterSpec{
ClusterNetwork: v1alpha1.ClusterNetwork{
CNI: v1alpha1.Cilium,
Pods: v1alpha1.Pods{
CidrBlocks: []string{
"10.1.0.0/16",
},
},
Services: v1alpha1.Services{
CidrBlocks: []string{
"10.96.0.0/12",
},
},
},
},
}
})
}
func tlsCipherSuitesArgs() map[string]string {
return map[string]string{"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"}
}
| 82 |
eks-anywhere | aws | Go | package clusterapi
import (
"context"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
)
// ControlPlane represents the provider-specific spec for a CAPI control plane using the kubeadm CP provider.
type ControlPlane[C Object[C], M Object[M]] struct {
Cluster *clusterv1.Cluster
// ProviderCluster is the provider-specific resource that holds the details
// for provisioning the infrastructure, referenced in Cluster.Spec.InfrastructureRef
ProviderCluster C
KubeadmControlPlane *controlplanev1.KubeadmControlPlane
// ControlPlaneMachineTemplate is the provider-specific machine template referenced
// in KubeadmControlPlane.Spec.MachineTemplate.InfrastructureRef
ControlPlaneMachineTemplate M
EtcdCluster *etcdv1.EtcdadmCluster
// EtcdMachineTemplate is the provider-specific machine template referenced
// in EtcdCluster.Spec.InfrastructureTemplate
EtcdMachineTemplate M
}
// Objects returns all API objects that form a concrete provider-specific control plane.
func (cp *ControlPlane[C, M]) Objects() []kubernetes.Object {
objs := make([]kubernetes.Object, 0, 4)
objs = append(objs, cp.Cluster, cp.KubeadmControlPlane, cp.ProviderCluster, cp.ControlPlaneMachineTemplate)
if cp.EtcdCluster != nil {
objs = append(objs, cp.EtcdCluster, cp.EtcdMachineTemplate)
}
return objs
}
// UpdateImmutableObjectNames checks if any control plane immutable objects have changed by comparing the new definition
// with the current state of the cluster. If they had, it generates a new name for them by increasing a monotonic number
// at the end of the name
// This is applied to all provider machine templates.
func (cp *ControlPlane[C, M]) UpdateImmutableObjectNames(
ctx context.Context,
client kubernetes.Client,
machineTemplateRetriever ObjectRetriever[M],
machineTemplateComparator ObjectComparator[M],
) error {
currentKCP := &controlplanev1.KubeadmControlPlane{}
err := client.Get(ctx, cp.KubeadmControlPlane.Name, cp.KubeadmControlPlane.Namespace, currentKCP)
if apierrors.IsNotFound(err) {
// KubeadmControlPlane doesn't exist, this is a new cluster so machine templates should use their default name
return nil
}
if err != nil {
return errors.Wrap(err, "reading current kubeadm control plane from API")
}
cp.ControlPlaneMachineTemplate.SetName(currentKCP.Spec.MachineTemplate.InfrastructureRef.Name)
if err = EnsureNewNameIfChanged(ctx, client, machineTemplateRetriever, machineTemplateComparator, cp.ControlPlaneMachineTemplate); err != nil {
return err
}
cp.KubeadmControlPlane.Spec.MachineTemplate.InfrastructureRef.Name = cp.ControlPlaneMachineTemplate.GetName()
if cp.EtcdCluster == nil {
return nil
}
currentEtcdCluster := &etcdv1.EtcdadmCluster{}
err = client.Get(ctx, cp.EtcdCluster.Name, cp.EtcdCluster.Namespace, currentEtcdCluster)
if apierrors.IsNotFound(err) {
// EtcdadmCluster doesn't exist, this is a new cluster so machine templates should use their default name
return nil
}
if err != nil {
return errors.Wrap(err, "reading current etcdadm cluster from API")
}
cp.EtcdMachineTemplate.SetName(currentEtcdCluster.Spec.InfrastructureTemplate.Name)
if err = EnsureNewNameIfChanged(ctx, client, machineTemplateRetriever, machineTemplateComparator, cp.EtcdMachineTemplate); err != nil {
return err
}
cp.EtcdCluster.Spec.InfrastructureTemplate.Name = cp.EtcdMachineTemplate.GetName()
return nil
}
| 95 |
eks-anywhere | aws | Go | package clusterapi_test
import (
"context"
"testing"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
dockerv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/controller/clientutil"
)
type dockerControlPlane = clusterapi.ControlPlane[*dockerv1.DockerCluster, *dockerv1.DockerMachineTemplate]
func TestControlPlaneObjects(t *testing.T) {
tests := []struct {
name string
controlPlane *dockerControlPlane
want []kubernetes.Object
}{
{
name: "stacked etcd",
controlPlane: &dockerControlPlane{
Cluster: capiCluster(),
ProviderCluster: dockerCluster(),
KubeadmControlPlane: kubeadmControlPlane(),
ControlPlaneMachineTemplate: dockerMachineTemplate(),
},
want: []kubernetes.Object{
capiCluster(),
dockerCluster(),
kubeadmControlPlane(),
dockerMachineTemplate(),
},
},
{
name: "unstacked etcd",
controlPlane: &dockerControlPlane{
Cluster: capiCluster(),
ProviderCluster: dockerCluster(),
KubeadmControlPlane: kubeadmControlPlane(),
ControlPlaneMachineTemplate: dockerMachineTemplate(),
EtcdCluster: etcdCluster(),
EtcdMachineTemplate: dockerMachineTemplate(),
},
want: []kubernetes.Object{
capiCluster(),
dockerCluster(),
kubeadmControlPlane(),
dockerMachineTemplate(),
etcdCluster(),
dockerMachineTemplate(),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.controlPlane.Objects()).To(ConsistOf(tt.want))
})
}
}
func TestControlPlaneUpdateImmutableObjectNamesNoKubeadmControlPlane(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
client := test.NewFakeKubeClient()
cp := controlPlaneStackedEtcd()
originalCPMachineTemplateName := "my-machine-template-1"
cp.ControlPlaneMachineTemplate.Name = originalCPMachineTemplateName
g.Expect(cp.UpdateImmutableObjectNames(ctx, client, dummyRetriever, noChangesCompare)).To(Succeed())
g.Expect(cp.ControlPlaneMachineTemplate.Name).To(Equal(originalCPMachineTemplateName))
}
func TestControlPlaneUpdateImmutableObjectNamesErrorReadingControlPlane(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
cp := controlPlaneStackedEtcd()
client := test.NewFakeKubeClientAlwaysError()
g.Expect(
cp.UpdateImmutableObjectNames(ctx, client, dummyRetriever, noChangesCompare),
).To(
MatchError(ContainSubstring("reading current kubeadm control plane from API")),
)
}
func TestControlPlaneUpdateImmutableObjectNamesErrorUpdatingName(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
cp := controlPlaneStackedEtcd()
originalCPMachineTemplateName := "my-machine-template"
cp.ControlPlaneMachineTemplate.Name = originalCPMachineTemplateName
cp.KubeadmControlPlane.Spec.MachineTemplate.InfrastructureRef.Name = originalCPMachineTemplateName
client := test.NewFakeKubeClient(clientutil.ObjectsToClientObjects(cp.Objects())...)
g.Expect(
cp.UpdateImmutableObjectNames(ctx, client, dummyRetriever, withChangesCompare),
).To(
MatchError(ContainSubstring("incrementing name for DockerMachineTemplate eksa-system/my-machine-template")),
)
}
func TestControlPlaneUpdateImmutableObjectNamesSuccessStackedEtcdNoChanges(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
cp := controlPlaneStackedEtcd()
originalCPMachineTemplateName := "my-machine-template-1"
cp.ControlPlaneMachineTemplate.Name = originalCPMachineTemplateName
cp.KubeadmControlPlane.Spec.MachineTemplate.InfrastructureRef.Name = originalCPMachineTemplateName
client := test.NewFakeKubeClient(clientutil.ObjectsToClientObjects(cp.Objects())...)
g.Expect(cp.UpdateImmutableObjectNames(ctx, client, dummyRetriever, noChangesCompare)).To(Succeed())
g.Expect(cp.ControlPlaneMachineTemplate.Name).To(Equal(originalCPMachineTemplateName))
g.Expect(cp.KubeadmControlPlane.Spec.MachineTemplate.InfrastructureRef.Name).To(Equal(cp.ControlPlaneMachineTemplate.Name))
}
func TestControlPlaneUpdateImmutableObjectNamesSuccessStackedEtcdWithChanges(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
cp := controlPlaneStackedEtcd()
originalCPMachineTemplateName := "my-machine-template-1"
cp.ControlPlaneMachineTemplate.Name = originalCPMachineTemplateName
cp.KubeadmControlPlane.Spec.MachineTemplate.InfrastructureRef.Name = originalCPMachineTemplateName
client := test.NewFakeKubeClient(clientutil.ObjectsToClientObjects(cp.Objects())...)
g.Expect(cp.UpdateImmutableObjectNames(ctx, client, dummyRetriever, withChangesCompare)).To(Succeed())
g.Expect(cp.ControlPlaneMachineTemplate.Name).To(Equal("my-machine-template-2"))
g.Expect(cp.KubeadmControlPlane.Spec.MachineTemplate.InfrastructureRef.Name).To(Equal(cp.ControlPlaneMachineTemplate.Name))
}
func TestControlPlaneUpdateImmutableObjectNamesNoEtcdCluster(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
cp := controlPlaneStackedEtcd()
originalCPMachineTemplateName := "my-machine-template-1"
cp.ControlPlaneMachineTemplate.Name = originalCPMachineTemplateName
cp.KubeadmControlPlane.Spec.MachineTemplate.InfrastructureRef.Name = originalCPMachineTemplateName
client := test.NewFakeKubeClient(clientutil.ObjectsToClientObjects(cp.Objects())...)
cp.EtcdCluster = etcdCluster()
g.Expect(cp.UpdateImmutableObjectNames(ctx, client, dummyRetriever, noChangesCompare)).To(Succeed())
g.Expect(cp.ControlPlaneMachineTemplate.Name).To(Equal(originalCPMachineTemplateName))
g.Expect(cp.KubeadmControlPlane.Spec.MachineTemplate.InfrastructureRef.Name).To(Equal(cp.ControlPlaneMachineTemplate.Name))
}
func TestControlPlaneUpdateImmutableObjectNamesErrorReadingEtcdCluster(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
cp := controlPlaneUnStackedEtcd()
originalCPMachineTemplateName := "my-machine-template-1"
cp.ControlPlaneMachineTemplate.Name = originalCPMachineTemplateName
cp.KubeadmControlPlane.Spec.MachineTemplate.InfrastructureRef.Name = originalCPMachineTemplateName
scheme := runtime.NewScheme()
g.Expect(controlplanev1.AddToScheme(scheme)).To(Succeed())
client := test.NewKubeClient(
fake.NewClientBuilder().WithScheme(scheme).WithObjects(cp.KubeadmControlPlane).Build(),
)
g.Expect(
cp.UpdateImmutableObjectNames(ctx, client, dummyRetriever, noChangesCompare),
).To(
MatchError(ContainSubstring("reading current etcdadm cluster from API")),
)
}
func TestControlPlaneUpdateImmutableObjectNamesErrorUpdatingEtcdName(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
cp := controlPlaneUnStackedEtcd()
originalCPMachineTemplateName := "my-machine-template-1"
cp.ControlPlaneMachineTemplate.Name = originalCPMachineTemplateName
cp.KubeadmControlPlane.Spec.MachineTemplate.InfrastructureRef.Name = originalCPMachineTemplateName
originalEtcdMachineTemplateName := "my-etcd-machine-template"
cp.EtcdMachineTemplate.Name = originalEtcdMachineTemplateName
cp.EtcdCluster.Spec.InfrastructureTemplate.Name = originalEtcdMachineTemplateName
client := test.NewFakeKubeClient(clientutil.ObjectsToClientObjects(cp.Objects())...)
g.Expect(
cp.UpdateImmutableObjectNames(ctx, client, dummyRetriever, withChangesCompare),
).To(
MatchError(ContainSubstring("incrementing name for DockerMachineTemplate eksa-system/my-etcd-machine-template")),
)
}
func TestControlPlaneUpdateImmutableObjectNamesSuccessUnstackedEtcd(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
cp := controlPlaneUnStackedEtcd()
originalCPMachineTemplateName := "my-machine-template-1"
cp.ControlPlaneMachineTemplate.Name = originalCPMachineTemplateName
cp.KubeadmControlPlane.Spec.MachineTemplate.InfrastructureRef.Name = originalCPMachineTemplateName
originalEtcdMachineTemplateName := "my-etcd-machine-template-2"
cp.EtcdMachineTemplate.Name = originalEtcdMachineTemplateName
cp.EtcdCluster.Spec.InfrastructureTemplate.Name = originalEtcdMachineTemplateName
client := test.NewFakeKubeClient(clientutil.ObjectsToClientObjects(cp.Objects())...)
g.Expect(cp.UpdateImmutableObjectNames(ctx, client, dummyRetriever, noChangesCompare)).To(Succeed())
g.Expect(cp.ControlPlaneMachineTemplate.Name).To(Equal(originalCPMachineTemplateName))
g.Expect(cp.EtcdMachineTemplate.Name).To(Equal(originalEtcdMachineTemplateName))
g.Expect(cp.KubeadmControlPlane.Spec.MachineTemplate.InfrastructureRef.Name).To(Equal(cp.ControlPlaneMachineTemplate.Name))
g.Expect(cp.EtcdCluster.Spec.InfrastructureTemplate.Name).To(Equal(cp.EtcdMachineTemplate.Name))
}
func TestControlPlaneUpdateImmutableObjectNamesSuccessUnstackedEtcdWithChanges(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
cp := controlPlaneUnStackedEtcd()
originalCPMachineTemplateName := "my-machine-template-1"
cp.ControlPlaneMachineTemplate.Name = originalCPMachineTemplateName
cp.KubeadmControlPlane.Spec.MachineTemplate.InfrastructureRef.Name = originalCPMachineTemplateName
originalEtcdMachineTemplateName := "my-etcd-machine-template-2"
cp.EtcdMachineTemplate.Name = originalEtcdMachineTemplateName
cp.EtcdCluster.Spec.InfrastructureTemplate.Name = originalEtcdMachineTemplateName
client := test.NewFakeKubeClient(clientutil.ObjectsToClientObjects(cp.Objects())...)
g.Expect(cp.UpdateImmutableObjectNames(ctx, client, dummyRetriever, withChangesCompare)).To(Succeed())
g.Expect(cp.ControlPlaneMachineTemplate.Name).To(Equal("my-machine-template-2"))
g.Expect(cp.EtcdMachineTemplate.Name).To(Equal("my-etcd-machine-template-3"))
g.Expect(cp.KubeadmControlPlane.Spec.MachineTemplate.InfrastructureRef.Name).To(Equal(cp.ControlPlaneMachineTemplate.Name))
g.Expect(cp.EtcdCluster.Spec.InfrastructureTemplate.Name).To(Equal(cp.EtcdMachineTemplate.Name))
}
func capiCluster() *clusterv1.Cluster {
return &clusterv1.Cluster{}
}
func dockerCluster() *dockerv1.DockerCluster {
return &dockerv1.DockerCluster{}
}
func kubeadmControlPlane() *controlplanev1.KubeadmControlPlane {
return &controlplanev1.KubeadmControlPlane{}
}
func dockerMachineTemplate() *dockerv1.DockerMachineTemplate {
return &dockerv1.DockerMachineTemplate{
TypeMeta: metav1.TypeMeta{
Kind: "DockerMachineTemplate",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: constants.EksaSystemNamespace,
Name: "mt-1",
},
}
}
func etcdCluster() *etcdv1.EtcdadmCluster {
return &etcdv1.EtcdadmCluster{}
}
func controlPlaneStackedEtcd() *dockerControlPlane {
return &dockerControlPlane{
Cluster: capiCluster(),
ProviderCluster: dockerCluster(),
KubeadmControlPlane: kubeadmControlPlane(),
ControlPlaneMachineTemplate: dockerMachineTemplate(),
}
}
func controlPlaneUnStackedEtcd() *dockerControlPlane {
cp := controlPlaneStackedEtcd()
cp.EtcdCluster = etcdCluster()
cp.EtcdMachineTemplate = dockerMachineTemplate()
return cp
}
| 279 |
eks-anywhere | aws | Go | package clusterapi
import (
etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
v1 "k8s.io/api/core/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
)
// SetUbuntuConfigInEtcdCluster sets up the etcd config in EtcdadmCluster.
func SetUbuntuConfigInEtcdCluster(etcd *etcdv1.EtcdadmCluster, version string) {
etcd.Spec.EtcdadmConfigSpec.Format = etcdbootstrapv1.Format("cloud-config")
etcd.Spec.EtcdadmConfigSpec.CloudInitConfig = &etcdbootstrapv1.CloudInitConfig{
Version: version,
InstallDir: "/usr/bin",
}
}
// SetEtcdConfigInCluster sets up the etcd config in CAPI Cluster.
func setUnstackedEtcdConfigInCluster(cluster *clusterv1.Cluster, unstackedEtcdObject APIObject) {
cluster.Spec.ManagedExternalEtcdRef = &v1.ObjectReference{
APIVersion: unstackedEtcdObject.GetObjectKind().GroupVersionKind().GroupVersion().String(),
Kind: unstackedEtcdObject.GetObjectKind().GroupVersionKind().Kind,
Name: unstackedEtcdObject.GetName(),
Namespace: constants.EksaSystemNamespace,
}
}
// SetUnstackedEtcdConfigInKubeadmControlPlaneForBottlerocket sets up unstacked etcd configuration in kubeadmControlPlane for bottlerocket.
func SetUnstackedEtcdConfigInKubeadmControlPlaneForBottlerocket(kcp *controlplanev1.KubeadmControlPlane, externalEtcdConfig *v1alpha1.ExternalEtcdConfiguration) {
if externalEtcdConfig == nil {
return
}
kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External = &bootstrapv1.ExternalEtcd{
Endpoints: []string{},
CAFile: "/var/lib/kubeadm/pki/etcd/ca.crt",
CertFile: "/var/lib/kubeadm/pki/server-etcd-client.crt",
KeyFile: "/var/lib/kubeadm/pki/apiserver-etcd-client.key",
}
}
// SetUnstackedEtcdConfigInKubeadmControlPlaneForUbuntu sets up unstacked etcd configuration in kubeadmControlPlane for ubuntu.
func SetUnstackedEtcdConfigInKubeadmControlPlaneForUbuntu(kcp *controlplanev1.KubeadmControlPlane, externalEtcdConfig *v1alpha1.ExternalEtcdConfiguration) {
if externalEtcdConfig == nil {
return
}
kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External = &bootstrapv1.ExternalEtcd{
Endpoints: []string{},
CAFile: "/etc/kubernetes/pki/etcd/ca.crt",
CertFile: "/etc/kubernetes/pki/apiserver-etcd-client.crt",
KeyFile: "/etc/kubernetes/pki/apiserver-etcd-client.key",
}
}
// setStackedEtcdConfigInKubeadmControlPlane sets up stacked etcd configuration in kubeadmControlPlane.
func setStackedEtcdConfigInKubeadmControlPlane(kcp *controlplanev1.KubeadmControlPlane, etcd cluster.VersionedRepository) {
kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = &bootstrapv1.LocalEtcd{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: etcd.Repository,
ImageTag: etcd.Tag,
},
ExtraArgs: SecureEtcdTlsCipherSuitesExtraArgs(),
}
}
| 73 |
eks-anywhere | aws | Go | package clusterapi_test
import (
"testing"
etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
)
func TestSetUbuntuConfigInEtcdCluster(t *testing.T) {
g := newApiBuilerTest(t)
got := wantEtcdCluster()
v := "0.0.1"
want := got.DeepCopy()
want.Spec.EtcdadmConfigSpec.Format = etcdbootstrapv1.Format("cloud-config")
want.Spec.EtcdadmConfigSpec.CloudInitConfig = &etcdbootstrapv1.CloudInitConfig{
Version: v,
InstallDir: "/usr/bin",
}
clusterapi.SetUbuntuConfigInEtcdCluster(got, v)
g.Expect(got).To(Equal(want))
}
func TestClusterUnstackedEtcd(t *testing.T) {
tt := newApiBuilerTest(t)
tt.clusterSpec.Cluster.Spec.ExternalEtcdConfiguration = &anywherev1.ExternalEtcdConfiguration{
Count: 3,
}
got := clusterapi.Cluster(tt.clusterSpec, tt.providerCluster, tt.controlPlane, tt.unstackedEtcdCluster)
want := wantCluster()
want.Spec.ManagedExternalEtcdRef = &v1.ObjectReference{
APIVersion: "etcdcluster.cluster.x-k8s.io/v1beta1",
Kind: "UnstackedEtcdCluster",
Name: "unstacked-etcd-cluster",
Namespace: "eksa-system",
}
tt.Expect(got).To(Equal(want))
}
func TestSetUnstackedEtcdConfigInKubeadmControlPlaneForBottlerocket(t *testing.T) {
tt := newApiBuilerTest(t)
etcdConfig := &anywherev1.ExternalEtcdConfiguration{
Count: 3,
}
got := wantKubeadmControlPlane()
got.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External = &bootstrapv1.ExternalEtcd{
Endpoints: []string{},
CAFile: "/var/lib/kubeadm/pki/etcd/ca.crt",
CertFile: "/var/lib/kubeadm/pki/server-etcd-client.crt",
KeyFile: "/var/lib/kubeadm/pki/apiserver-etcd-client.key",
}
want := got.DeepCopy()
clusterapi.SetUnstackedEtcdConfigInKubeadmControlPlaneForBottlerocket(got, etcdConfig)
tt.Expect(got).To(Equal(want))
}
func TestSetUnstackedEtcdConfigInKubeadmControlPlaneForUbuntu(t *testing.T) {
tt := newApiBuilerTest(t)
etcdConfig := &anywherev1.ExternalEtcdConfiguration{
Count: 3,
}
got := wantKubeadmControlPlane()
got.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.External = &bootstrapv1.ExternalEtcd{
Endpoints: []string{},
CAFile: "/etc/kubernetes/pki/etcd/ca.crt",
CertFile: "/etc/kubernetes/pki/apiserver-etcd-client.crt",
KeyFile: "/etc/kubernetes/pki/apiserver-etcd-client.key",
}
want := got.DeepCopy()
clusterapi.SetUnstackedEtcdConfigInKubeadmControlPlaneForUbuntu(got, etcdConfig)
tt.Expect(got).To(Equal(want))
}
func TestSetStackedEtcdConfigInKubeadmControlPlane(t *testing.T) {
tt := newApiBuilerTest(t)
want := wantKubeadmControlPlane()
want.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd.Local = &bootstrapv1.LocalEtcd{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: "public.ecr.aws/eks-distro/etcd-io",
ImageTag: "v3.4.16-eks-1-21-9",
},
ExtraArgs: map[string]string{
"cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
},
}
got, err := clusterapi.KubeadmControlPlane(tt.clusterSpec, tt.providerMachineTemplate)
tt.Expect(err).To(Succeed())
tt.Expect(got).To(Equal(want))
}
| 95 |
eks-anywhere | aws | Go | package clusterapi
import (
"fmt"
"sort"
"strconv"
"strings"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/crypto"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/templater"
)
type ExtraArgs map[string]string
func OIDCToExtraArgs(oidc *v1alpha1.OIDCConfig) ExtraArgs {
args := ExtraArgs{}
if oidc == nil {
return args
}
args.AddIfNotEmpty("oidc-client-id", oidc.Spec.ClientId)
args.AddIfNotEmpty("oidc-groups-claim", oidc.Spec.GroupsClaim)
args.AddIfNotEmpty("oidc-groups-prefix", oidc.Spec.GroupsPrefix)
args.AddIfNotEmpty("oidc-issuer-url", oidc.Spec.IssuerUrl)
if len(oidc.Spec.RequiredClaims) > 0 {
args.AddIfNotEmpty("oidc-required-claim", requiredClaimToArg(&oidc.Spec.RequiredClaims[0]))
}
args.AddIfNotEmpty("oidc-username-claim", oidc.Spec.UsernameClaim)
args.AddIfNotEmpty("oidc-username-prefix", oidc.Spec.UsernamePrefix)
return args
}
func AwsIamAuthExtraArgs(awsiam *v1alpha1.AWSIamConfig) ExtraArgs {
args := ExtraArgs{}
if awsiam == nil {
return args
}
args.AddIfNotEmpty("authentication-token-webhook-config-file", "/etc/kubernetes/aws-iam-authenticator/kubeconfig.yaml")
return args
}
// FeatureGatesExtraArgs takes a list of features with the value and returns it in the proper format
// Example FeatureGatesExtraArgs("ServiceLoadBalancerClass=true").
func FeatureGatesExtraArgs(features ...string) ExtraArgs {
if len(features) == 0 {
return nil
}
return ExtraArgs{
"feature-gates": strings.Join(features[:], ","),
}
}
func PodIAMAuthExtraArgs(podIAMConfig *v1alpha1.PodIAMConfig) ExtraArgs {
if podIAMConfig == nil {
return nil
}
args := ExtraArgs{}
args.AddIfNotEmpty("service-account-issuer", podIAMConfig.ServiceAccountIssuer)
return args
}
func NodeCIDRMaskExtraArgs(clusterNetwork *v1alpha1.ClusterNetwork) ExtraArgs {
if clusterNetwork == nil || clusterNetwork.Nodes == nil || clusterNetwork.Nodes.CIDRMaskSize == nil {
return nil
}
args := ExtraArgs{}
args.AddIfNotEmpty("node-cidr-mask-size", strconv.Itoa(*clusterNetwork.Nodes.CIDRMaskSize))
return args
}
func ResolvConfExtraArgs(resolvConf *v1alpha1.ResolvConf) ExtraArgs {
if resolvConf == nil {
return nil
}
args := ExtraArgs{}
args.AddIfNotEmpty("resolv-conf", resolvConf.Path)
return args
}
// We don't need to add these once the Kubernetes components default to using the secure cipher suites.
func SecureTlsCipherSuitesExtraArgs() ExtraArgs {
args := ExtraArgs{}
args.AddIfNotEmpty("tls-cipher-suites", crypto.SecureCipherSuitesString())
return args
}
func SecureEtcdTlsCipherSuitesExtraArgs() ExtraArgs {
args := ExtraArgs{}
args.AddIfNotEmpty("cipher-suites", crypto.SecureCipherSuitesString())
return args
}
func WorkerNodeLabelsExtraArgs(wnc v1alpha1.WorkerNodeGroupConfiguration) ExtraArgs {
return nodeLabelsExtraArgs(wnc.Labels)
}
func ControlPlaneNodeLabelsExtraArgs(cpc v1alpha1.ControlPlaneConfiguration) ExtraArgs {
return nodeLabelsExtraArgs(cpc.Labels)
}
// CgroupDriverExtraArgs args added for kube versions below 1.24.
func CgroupDriverCgroupfsExtraArgs() ExtraArgs {
args := ExtraArgs{}
args.AddIfNotEmpty("cgroup-driver", "cgroupfs")
return args
}
// CgroupDriverSystemdExtraArgs args added for kube versions 1.24 and above.
func CgroupDriverSystemdExtraArgs() ExtraArgs {
args := ExtraArgs{}
args.AddIfNotEmpty("cgroup-driver", "systemd")
return args
}
func nodeLabelsExtraArgs(labels map[string]string) ExtraArgs {
args := ExtraArgs{}
args.AddIfNotEmpty("node-labels", labelsMapToArg(labels))
return args
}
func (e ExtraArgs) AddIfNotEmpty(k, v string) {
if v != "" {
logger.V(5).Info("Adding extraArgs", k, v)
e[k] = v
}
}
func (e ExtraArgs) Append(args ExtraArgs) ExtraArgs {
for k, v := range args {
e[k] = v
}
return e
}
func (e ExtraArgs) ToPartialYaml() templater.PartialYaml {
p := templater.PartialYaml{}
for k, v := range e {
p.AddIfNotZero(k, v)
}
return p
}
func requiredClaimToArg(r *v1alpha1.OIDCConfigRequiredClaim) string {
if r == nil || r.Claim == "" {
return ""
}
return fmt.Sprintf("%s=%s", r.Claim, r.Value)
}
func labelsMapToArg(m map[string]string) string {
labels := make([]string, 0, len(m))
for k, v := range m {
labels = append(labels, fmt.Sprintf("%s=%s", k, v))
}
sort.Strings(labels)
labelStr := strings.Join(labels, ",")
return labelStr
}
| 166 |
eks-anywhere | aws | Go | package clusterapi_test
import (
"reflect"
"testing"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/crypto"
"github.com/aws/eks-anywhere/pkg/templater"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
func TestOIDCToExtraArgs(t *testing.T) {
tests := []struct {
testName string
oidc *v1alpha1.OIDCConfig
want clusterapi.ExtraArgs
}{
{
testName: "no oidc",
oidc: nil,
want: clusterapi.ExtraArgs{},
},
{
testName: "minimal oidc with zero values",
oidc: &v1alpha1.OIDCConfig{
Spec: v1alpha1.OIDCConfigSpec{
ClientId: "my-client-id",
IssuerUrl: "https://mydomain.com/issuer",
RequiredClaims: []v1alpha1.OIDCConfigRequiredClaim{{}},
GroupsClaim: "",
},
},
want: clusterapi.ExtraArgs{
"oidc-client-id": "my-client-id",
"oidc-issuer-url": "https://mydomain.com/issuer",
},
},
{
testName: "minimal oidc with nil values",
oidc: &v1alpha1.OIDCConfig{
Spec: v1alpha1.OIDCConfigSpec{
ClientId: "my-client-id",
IssuerUrl: "https://mydomain.com/issuer",
RequiredClaims: nil,
},
},
want: clusterapi.ExtraArgs{
"oidc-client-id": "my-client-id",
"oidc-issuer-url": "https://mydomain.com/issuer",
},
},
{
testName: "full oidc",
oidc: &v1alpha1.OIDCConfig{
Spec: v1alpha1.OIDCConfigSpec{
ClientId: "my-client-id",
IssuerUrl: "https://mydomain.com/issuer",
GroupsClaim: "claim1",
GroupsPrefix: "prefix-for-groups",
RequiredClaims: []v1alpha1.OIDCConfigRequiredClaim{{
Claim: "sub",
Value: "test",
}},
UsernameClaim: "username-claim",
UsernamePrefix: "username-prefix",
},
},
want: clusterapi.ExtraArgs{
"oidc-client-id": "my-client-id",
"oidc-groups-claim": "claim1",
"oidc-groups-prefix": "prefix-for-groups",
"oidc-issuer-url": "https://mydomain.com/issuer",
"oidc-required-claim": "sub=test",
"oidc-username-claim": "username-claim",
"oidc-username-prefix": "username-prefix",
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
if got := clusterapi.OIDCToExtraArgs(tt.oidc); !reflect.DeepEqual(got, tt.want) {
t.Errorf("OIDCToExtraArgs() = %v, want %v", got, tt.want)
}
})
}
}
func TestExtraArgsAddIfNotEmpty(t *testing.T) {
tests := []struct {
testName string
e clusterapi.ExtraArgs
k string
v string
wantAdded bool
wantV string
}{
{
testName: "add string",
e: clusterapi.ExtraArgs{},
k: "key",
v: "value",
wantAdded: true,
wantV: "value",
},
{
testName: "add empty string",
e: clusterapi.ExtraArgs{},
k: "key",
v: "",
wantAdded: false,
wantV: "",
},
{
testName: "add present string",
e: clusterapi.ExtraArgs{
"key": "value_old",
},
k: "key",
v: "value_new",
wantAdded: true,
wantV: "value_new",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
tt.e.AddIfNotEmpty(tt.k, tt.v)
gotV, gotAdded := tt.e[tt.k]
if tt.wantAdded != gotAdded {
t.Errorf("ExtraArgs.AddIfNotZero() wasAdded = %v, wantAdded %v", gotAdded, tt.wantAdded)
}
if gotV != tt.wantV {
t.Errorf("ExtraArgs.AddIfNotZero() gotValue = %v, wantValue %v", gotV, tt.wantV)
}
})
}
}
func TestExtraArgsToPartialYaml(t *testing.T) {
tests := []struct {
testName string
e clusterapi.ExtraArgs
want templater.PartialYaml
}{
{
testName: "valid args",
e: clusterapi.ExtraArgs{
"oidc-client-id": "my-client-id",
"oidc-groups-claim": "claim1,claim2",
"oidc-groups-prefix": "prefix-for-groups",
"oidc-issuer-url": "https://mydomain.com/issuer",
"oidc-required-claim": "hd=example.com,sub=test",
"oidc-signing-algs": "ES256,HS256",
"oidc-username-claim": "username-claim",
"oidc-username-prefix": "username-prefix",
},
want: templater.PartialYaml{
"oidc-client-id": "my-client-id",
"oidc-groups-claim": "claim1,claim2",
"oidc-groups-prefix": "prefix-for-groups",
"oidc-issuer-url": "https://mydomain.com/issuer",
"oidc-required-claim": "hd=example.com,sub=test",
"oidc-signing-algs": "ES256,HS256",
"oidc-username-claim": "username-claim",
"oidc-username-prefix": "username-prefix",
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
if got := tt.e.ToPartialYaml(); !reflect.DeepEqual(got, tt.want) {
t.Errorf("ExtraArgs.ToPartialYaml() = %v, want %v", got, tt.want)
}
})
}
}
func TestAwsIamAuthExtraArgs(t *testing.T) {
tests := []struct {
testName string
awsiam *v1alpha1.AWSIamConfig
want clusterapi.ExtraArgs
}{
{
testName: "no aws iam",
awsiam: nil,
want: clusterapi.ExtraArgs{},
},
{
testName: "with aws iam config",
awsiam: &v1alpha1.AWSIamConfig{},
want: clusterapi.ExtraArgs{
"authentication-token-webhook-config-file": "/etc/kubernetes/aws-iam-authenticator/kubeconfig.yaml",
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
if got := clusterapi.AwsIamAuthExtraArgs(tt.awsiam); !reflect.DeepEqual(got, tt.want) {
t.Errorf("AwsIamAuthExtraArgs() = %v, want %v", got, tt.want)
}
})
}
}
func TestPodIAMConfigExtraArgs(t *testing.T) {
tests := []struct {
testName string
podIAM *v1alpha1.PodIAMConfig
want clusterapi.ExtraArgs
}{
{
testName: "no pod IAM config",
podIAM: nil,
want: nil,
},
{
testName: "with pod IAM config",
podIAM: &v1alpha1.PodIAMConfig{ServiceAccountIssuer: "https://test"},
want: clusterapi.ExtraArgs{
"service-account-issuer": "https://test",
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
if got := clusterapi.PodIAMAuthExtraArgs(tt.podIAM); !reflect.DeepEqual(got, tt.want) {
t.Errorf("PodIAMAuthExtraArgs() = %v, want %v", got, tt.want)
}
})
}
}
func TestResolvConfExtraArgs(t *testing.T) {
tests := []struct {
testName string
resolvConf *v1alpha1.ResolvConf
want clusterapi.ExtraArgs
}{
{
testName: "default",
resolvConf: &v1alpha1.ResolvConf{Path: ""},
want: map[string]string{},
},
{
testName: "with custom resolvConf file",
resolvConf: &v1alpha1.ResolvConf{Path: "mypath"},
want: clusterapi.ExtraArgs{
"resolv-conf": "mypath",
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
if got := clusterapi.ResolvConfExtraArgs(tt.resolvConf); !reflect.DeepEqual(got, tt.want) {
t.Errorf("ResolvConfExtraArgs() = %v, want %v", got, tt.want)
}
})
}
}
func TestSecureTlsCipherSuitesExtraArgs(t *testing.T) {
tests := []struct {
testName string
want clusterapi.ExtraArgs
}{
{
testName: "default",
want: clusterapi.ExtraArgs{
"tls-cipher-suites": crypto.SecureCipherSuitesString(),
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
if got := clusterapi.SecureTlsCipherSuitesExtraArgs(); !reflect.DeepEqual(got, tt.want) {
t.Errorf("SecureTlsCipherSuitesExtraArgs() = %v, want %v", got, tt.want)
}
})
}
}
func TestSecureEtcdTlsCipherSuitesExtraArgs(t *testing.T) {
tests := []struct {
testName string
want clusterapi.ExtraArgs
}{
{
testName: "default",
want: clusterapi.ExtraArgs{
"cipher-suites": crypto.SecureCipherSuitesString(),
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
if got := clusterapi.SecureEtcdTlsCipherSuitesExtraArgs(); !reflect.DeepEqual(got, tt.want) {
t.Errorf("SecureEtcdTlsCipherSuitesExtraArgs() = %v, want %v", got, tt.want)
}
})
}
}
func TestCgroupDriverCgroupfsExtraArgs(t *testing.T) {
tests := []struct {
testName string
want clusterapi.ExtraArgs
}{
{
testName: "default",
want: clusterapi.ExtraArgs{
"cgroup-driver": "cgroupfs",
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
if got := clusterapi.CgroupDriverCgroupfsExtraArgs(); !reflect.DeepEqual(got, tt.want) {
t.Errorf("CgroupDriverCgroupfsExtraArgs() = %v, want %v", got, tt.want)
}
})
}
}
func TestCgroupDriverSystemdExtraArgs(t *testing.T) {
tests := []struct {
testName string
want clusterapi.ExtraArgs
}{
{
testName: "default",
want: clusterapi.ExtraArgs{
"cgroup-driver": "systemd",
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
if got := clusterapi.CgroupDriverSystemdExtraArgs(); !reflect.DeepEqual(got, tt.want) {
t.Errorf("CgroupDriverSystemdExtraArgs() = %v, want %v", got, tt.want)
}
})
}
}
func TestNodeLabelsExtraArgs(t *testing.T) {
tests := []struct {
testName string
wnc v1alpha1.WorkerNodeGroupConfiguration
want clusterapi.ExtraArgs
}{
{
testName: "no labels",
wnc: v1alpha1.WorkerNodeGroupConfiguration{
Count: ptr.Int(3),
},
want: clusterapi.ExtraArgs{},
},
{
testName: "with labels",
wnc: v1alpha1.WorkerNodeGroupConfiguration{
Count: ptr.Int(3),
Labels: map[string]string{"label1": "foo", "label2": "bar"},
},
want: clusterapi.ExtraArgs{
"node-labels": "label1=foo,label2=bar",
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
if got := clusterapi.WorkerNodeLabelsExtraArgs(tt.wnc); !reflect.DeepEqual(got, tt.want) {
t.Errorf("WorkerNodeLabelsExtraArgs() = %v, want %v", got, tt.want)
}
})
}
}
func TestCpNodeLabelsExtraArgs(t *testing.T) {
tests := []struct {
testName string
cpc v1alpha1.ControlPlaneConfiguration
want clusterapi.ExtraArgs
}{
{
testName: "no labels",
cpc: v1alpha1.ControlPlaneConfiguration{
Count: 3,
},
want: clusterapi.ExtraArgs{},
},
{
testName: "with labels",
cpc: v1alpha1.ControlPlaneConfiguration{
Count: 3,
Labels: map[string]string{"label1": "foo", "label2": "bar"},
},
want: clusterapi.ExtraArgs{
"node-labels": "label1=foo,label2=bar",
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
if got := clusterapi.ControlPlaneNodeLabelsExtraArgs(tt.cpc); !reflect.DeepEqual(got, tt.want) {
t.Errorf("ControlPlaneNodeLabelsExtraArgs() = %v, want %v", got, tt.want)
}
})
}
}
func TestAppend(t *testing.T) {
tests := []struct {
testName string
e clusterapi.ExtraArgs
a clusterapi.ExtraArgs
want clusterapi.ExtraArgs
}{
{
testName: "initially empty",
e: clusterapi.ExtraArgs{},
a: clusterapi.ExtraArgs{
"key1": "value1",
},
want: clusterapi.ExtraArgs{
"key1": "value1",
},
},
{
testName: "initially not empty",
e: clusterapi.ExtraArgs{
"key1": "value1",
},
a: clusterapi.ExtraArgs{
"key2": "value2",
},
want: clusterapi.ExtraArgs{
"key1": "value1",
"key2": "value2",
},
},
{
testName: "append nil extraArgs",
e: clusterapi.ExtraArgs{
"key1": "value1",
},
a: nil,
want: clusterapi.ExtraArgs{
"key1": "value1",
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
if got := tt.e.Append(tt.a); !reflect.DeepEqual(got, tt.want) {
t.Errorf("ExtraArgs.Append() = %v, want %v", got, tt.want)
}
})
}
}
func TestNodeCIDRMaskExtraArgs(t *testing.T) {
nodeCidrMaskSize := new(int)
*nodeCidrMaskSize = 28
tests := []struct {
testName string
clusterNetwork *v1alpha1.ClusterNetwork
want clusterapi.ExtraArgs
}{
{
testName: "no cluster network config",
clusterNetwork: nil,
want: nil,
},
{
testName: "no nodes config",
clusterNetwork: &v1alpha1.ClusterNetwork{
Pods: v1alpha1.Pods{CidrBlocks: []string{"test", "test"}},
},
want: nil,
},
{
testName: "with nodes config",
clusterNetwork: &v1alpha1.ClusterNetwork{
Nodes: &v1alpha1.Nodes{CIDRMaskSize: nodeCidrMaskSize},
},
want: clusterapi.ExtraArgs{
"node-cidr-mask-size": "28",
},
},
{
testName: "with nodes config empty",
clusterNetwork: &v1alpha1.ClusterNetwork{
Nodes: &v1alpha1.Nodes{},
},
want: nil,
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
if got := clusterapi.NodeCIDRMaskExtraArgs(tt.clusterNetwork); !reflect.DeepEqual(got, tt.want) {
t.Errorf("NodeCIDRMaskExtraArgs() = %v, want %v", got, tt.want)
}
})
}
}
func TestFeatureGatesExtraArgs(t *testing.T) {
tests := []struct {
testName string
features []string
want clusterapi.ExtraArgs
}{
{
testName: "no feature gates",
features: []string{},
want: nil,
},
{
testName: "single feature gate",
features: []string{"feature1=true"},
want: clusterapi.ExtraArgs{
"feature-gates": "feature1=true",
},
},
{
testName: "multiple feature gates",
features: []string{"feature1=true", "feature2=false", "feature3=true"},
want: clusterapi.ExtraArgs{
"feature-gates": "feature1=true,feature2=false,feature3=true",
},
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
if got := clusterapi.FeatureGatesExtraArgs(tt.features...); !reflect.DeepEqual(got, tt.want) {
t.Errorf("FeatureGatesExtraArgs() = %v, want %v", got, tt.want)
}
})
}
}
| 557 |
eks-anywhere | aws | Go | package clusterapi
import (
"context"
apierrors "k8s.io/apimachinery/pkg/api/errors"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
)
// KubeClient is a kubernetes API client.
type KubeClient interface {
Get(ctx context.Context, name, namespace string, obj kubernetes.Object) error
}
func MachineDeploymentInCluster(ctx context.Context, kubeclient KubeClient, clusterSpec *cluster.Spec, workerNodeGroupConfig v1alpha1.WorkerNodeGroupConfiguration) (*clusterv1.MachineDeployment, error) {
md := &clusterv1.MachineDeployment{}
err := kubeclient.Get(ctx, MachineDeploymentName(clusterSpec.Cluster, workerNodeGroupConfig), constants.EksaSystemNamespace, md)
if apierrors.IsNotFound(err) {
return nil, nil
}
if err != nil {
return nil, err
}
return md, nil
}
func KubeadmConfigTemplateInCluster(ctx context.Context, kubeclient KubeClient, md *clusterv1.MachineDeployment) (*bootstrapv1.KubeadmConfigTemplate, error) {
if md == nil {
return nil, nil
}
kct := &bootstrapv1.KubeadmConfigTemplate{}
err := kubeclient.Get(ctx, md.Spec.Template.Spec.Bootstrap.ConfigRef.Name, constants.EksaSystemNamespace, kct)
if apierrors.IsNotFound(err) {
return nil, nil
}
if err != nil {
return nil, err
}
return kct, nil
}
| 48 |
eks-anywhere | aws | Go | package clusterapi_test
import (
"context"
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/clusterapi/mocks"
"github.com/aws/eks-anywhere/pkg/constants"
)
type fetchTest struct {
*WithT
ctx context.Context
kubeClient *mocks.MockKubeClient
clusterSpec *cluster.Spec
workerNodeGroupConfig v1alpha1.WorkerNodeGroupConfiguration
machineDeployment *clusterv1.MachineDeployment
}
func newFetchTest(t *testing.T) fetchTest {
ctrl := gomock.NewController(t)
kubeClient := mocks.NewMockKubeClient(ctrl)
wng := v1alpha1.WorkerNodeGroupConfiguration{
Name: "md-0",
}
md := &clusterv1.MachineDeployment{
Spec: clusterv1.MachineDeploymentSpec{
Template: clusterv1.MachineTemplateSpec{
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
ConfigRef: &v1.ObjectReference{
Name: "snow-test-md-0-1",
},
},
},
},
},
}
return fetchTest{
WithT: NewWithT(t),
ctx: context.Background(),
kubeClient: kubeClient,
clusterSpec: givenClusterSpec(),
workerNodeGroupConfig: wng,
machineDeployment: md,
}
}
func TestMachineDeploymentInCluster(t *testing.T) {
g := newFetchTest(t)
g.kubeClient.EXPECT().
Get(
g.ctx,
"snow-test-md-0",
constants.EksaSystemNamespace,
&clusterv1.MachineDeployment{},
).
DoAndReturn(func(_ context.Context, _, _ string, obj *clusterv1.MachineDeployment) error {
g.machineDeployment.DeepCopyInto(obj)
return nil
})
got, err := clusterapi.MachineDeploymentInCluster(g.ctx, g.kubeClient, g.clusterSpec, g.workerNodeGroupConfig)
g.Expect(err).To(Succeed())
g.Expect(got).To(Equal(g.machineDeployment))
}
func TestMachineDeploymentInClusterNotExists(t *testing.T) {
g := newFetchTest(t)
g.kubeClient.EXPECT().
Get(
g.ctx,
"snow-test-md-0",
constants.EksaSystemNamespace,
&clusterv1.MachineDeployment{},
).
Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, ""))
got, err := clusterapi.MachineDeploymentInCluster(g.ctx, g.kubeClient, g.clusterSpec, g.workerNodeGroupConfig)
g.Expect(err).To(Succeed())
g.Expect(got).To(BeNil())
}
func TestMachineDeploymentInClusterError(t *testing.T) {
g := newFetchTest(t)
g.kubeClient.EXPECT().
Get(
g.ctx,
"snow-test-md-0",
constants.EksaSystemNamespace,
&clusterv1.MachineDeployment{},
).
Return(errors.New("get md error"))
got, err := clusterapi.MachineDeploymentInCluster(g.ctx, g.kubeClient, g.clusterSpec, g.workerNodeGroupConfig)
g.Expect(err).NotTo(Succeed())
g.Expect(got).To(BeNil())
}
func TestKubeadmConfigTemplateInCluster(t *testing.T) {
g := newFetchTest(t)
kct := &bootstrapv1.KubeadmConfigTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: "kct-1",
},
}
g.kubeClient.EXPECT().
Get(
g.ctx,
"snow-test-md-0-1",
constants.EksaSystemNamespace,
&bootstrapv1.KubeadmConfigTemplate{},
).
DoAndReturn(func(_ context.Context, _, _ string, obj *bootstrapv1.KubeadmConfigTemplate) error {
kct.DeepCopyInto(obj)
return nil
})
got, err := clusterapi.KubeadmConfigTemplateInCluster(g.ctx, g.kubeClient, g.machineDeployment)
g.Expect(err).To(Succeed())
g.Expect(got).To(Equal(kct))
}
func TestKubeadmConfigTemplateInClusterMachineDeploymentNil(t *testing.T) {
g := newFetchTest(t)
got, err := clusterapi.KubeadmConfigTemplateInCluster(g.ctx, g.kubeClient, nil)
g.Expect(err).To(Succeed())
g.Expect(got).To(BeNil())
}
func TestKubeadmConfigTemplateInClusterNotExists(t *testing.T) {
g := newFetchTest(t)
g.kubeClient.EXPECT().
Get(
g.ctx,
"snow-test-md-0-1",
constants.EksaSystemNamespace,
&bootstrapv1.KubeadmConfigTemplate{},
).
Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, ""))
got, err := clusterapi.KubeadmConfigTemplateInCluster(g.ctx, g.kubeClient, g.machineDeployment)
g.Expect(err).To(Succeed())
g.Expect(got).To(BeNil())
}
func TestKubeadmConfigTemplateInClusterError(t *testing.T) {
g := newFetchTest(t)
g.kubeClient.EXPECT().
Get(
g.ctx,
"snow-test-md-0-1",
constants.EksaSystemNamespace,
&bootstrapv1.KubeadmConfigTemplate{},
).
Return(errors.New("get kct error"))
got, err := clusterapi.KubeadmConfigTemplateInCluster(g.ctx, g.kubeClient, g.machineDeployment)
g.Expect(err).NotTo(Succeed())
g.Expect(got).To(BeNil())
}
| 175 |
eks-anywhere | aws | Go | package clusterapi
import (
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
)
const awsIamKubeconfig = `
# clusters refers to the remote service.
clusters:
- name: aws-iam-authenticator
cluster:
certificate-authority: /var/aws-iam-authenticator/cert.pem
server: https://localhost:21362/authenticate
# users refers to the API Server's webhook configuration
# (we don't need to authenticate the API server).
users:
- name: apiserver
# kubeconfig files require a context. Provide one for the API Server.
current-context: webhook
contexts:
- name: webhook
context:
cluster: aws-iam-authenticator
user: apiserver
`
var awsIamMounts = []bootstrapv1.HostPathMount{
{
Name: "authconfig",
HostPath: "/var/lib/kubeadm/aws-iam-authenticator/",
MountPath: "/etc/kubernetes/aws-iam-authenticator/",
ReadOnly: false,
},
{
Name: "awsiamcert",
HostPath: "/var/lib/kubeadm/aws-iam-authenticator/pki/",
MountPath: "/var/aws-iam-authenticator/",
ReadOnly: false,
},
}
var awsIamFiles = []bootstrapv1.File{
{
Path: "/var/lib/kubeadm/aws-iam-authenticator/kubeconfig.yaml",
Owner: "root:root",
Permissions: "0640",
Content: awsIamKubeconfig,
},
{
Path: "/var/lib/kubeadm/aws-iam-authenticator/pki/cert.pem",
Owner: "root:root",
Permissions: "0640",
ContentFrom: &bootstrapv1.FileSource{
Secret: bootstrapv1.SecretFileSource{
Name: "test-cluster-aws-iam-authenticator-ca",
Key: "cert.pem",
},
},
},
{
Path: "/var/lib/kubeadm/aws-iam-authenticator/pki/key.pem",
Owner: "root:root",
Permissions: "0640",
ContentFrom: &bootstrapv1.FileSource{
Secret: bootstrapv1.SecretFileSource{
Name: "test-cluster-aws-iam-authenticator-ca",
Key: "key.pem",
},
},
},
}
func configureAWSIAMAuthInKubeadmControlPlane(kcp *controlplanev1.KubeadmControlPlane, awsIamConfig *v1alpha1.AWSIamConfig) {
if awsIamConfig == nil {
return
}
apiServerExtraArgs := kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.APIServer.ExtraArgs
for k, v := range AwsIamAuthExtraArgs(awsIamConfig) {
apiServerExtraArgs[k] = v
}
kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.APIServer.ExtraVolumes = append(
kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.APIServer.ExtraVolumes,
awsIamMounts...,
)
kcp.Spec.KubeadmConfigSpec.Files = append(kcp.Spec.KubeadmConfigSpec.Files, awsIamFiles...)
}
func configureOIDCInKubeadmControlPlane(kcp *controlplanev1.KubeadmControlPlane, oidcConfig *v1alpha1.OIDCConfig) {
if oidcConfig == nil {
return
}
apiServerExtraArgs := kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.APIServer.ExtraArgs
for k, v := range OIDCToExtraArgs(oidcConfig) {
apiServerExtraArgs[k] = v
}
}
func configurePodIamAuthInKubeadmControlPlane(kcp *controlplanev1.KubeadmControlPlane, podIamConfig *v1alpha1.PodIAMConfig) {
if podIamConfig == nil {
return
}
apiServerExtraArgs := kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.APIServer.ExtraArgs
for k, v := range PodIAMAuthExtraArgs(podIamConfig) {
apiServerExtraArgs[k] = v
}
}
func SetIdentityAuthInKubeadmControlPlane(kcp *controlplanev1.KubeadmControlPlane, clusterSpec *cluster.Spec) {
configureOIDCInKubeadmControlPlane(kcp, clusterSpec.OIDCConfig)
configureAWSIAMAuthInKubeadmControlPlane(kcp, clusterSpec.AWSIamConfig)
configurePodIamAuthInKubeadmControlPlane(kcp, clusterSpec.Cluster.Spec.PodIAMConfig)
}
| 122 |
eks-anywhere | aws | Go | package clusterapi_test
import (
"testing"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
)
func TestConfigureAWSIAMAuthInKubeadmControlPlane(t *testing.T) {
replicas := int32(3)
tests := []struct {
name string
awsIamConfig *v1alpha1.AWSIamConfig
want *controlplanev1.KubeadmControlPlane
}{
{
name: "no iam auth",
awsIamConfig: nil,
want: wantKubeadmControlPlane(),
},
{
name: "with iam auth",
awsIamConfig: &v1alpha1.AWSIamConfig{
Spec: v1alpha1.AWSIamConfigSpec{
AWSRegion: "test-region",
BackendMode: []string{"mode1", "mode2"},
MapRoles: []v1alpha1.MapRoles{
{
RoleARN: "test-role-arn",
Username: "test",
Groups: []string{"group1", "group2"},
},
},
MapUsers: []v1alpha1.MapUsers{
{
UserARN: "test-user-arn",
Username: "test",
Groups: []string{"group1", "group2"},
},
},
Partition: "aws",
},
},
want: &controlplanev1.KubeadmControlPlane{
TypeMeta: metav1.TypeMeta{
APIVersion: "controlplane.cluster.x-k8s.io/v1beta1",
Kind: "KubeadmControlPlane",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
Namespace: "eksa-system",
},
Spec: controlplanev1.KubeadmControlPlaneSpec{
MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
InfrastructureRef: v1.ObjectReference{
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
Kind: "ProviderMachineTemplate",
Name: "provider-template",
},
},
KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{
ClusterConfiguration: &bootstrapv1.ClusterConfiguration{
ImageRepository: "public.ecr.aws/eks-distro/kubernetes",
DNS: bootstrapv1.DNS{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: "public.ecr.aws/eks-distro/coredns",
ImageTag: "v1.8.4-eks-1-21-9",
},
},
Etcd: bootstrapv1.Etcd{
Local: &bootstrapv1.LocalEtcd{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: "public.ecr.aws/eks-distro/etcd-io",
ImageTag: "v3.4.16-eks-1-21-9",
},
ExtraArgs: map[string]string{
"cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
},
},
},
APIServer: bootstrapv1.APIServer{
ControlPlaneComponent: bootstrapv1.ControlPlaneComponent{
ExtraArgs: map[string]string{
"authentication-token-webhook-config-file": "/etc/kubernetes/aws-iam-authenticator/kubeconfig.yaml",
},
ExtraVolumes: []bootstrapv1.HostPathMount{
{
Name: "authconfig",
HostPath: "/var/lib/kubeadm/aws-iam-authenticator/",
MountPath: "/etc/kubernetes/aws-iam-authenticator/",
ReadOnly: false,
},
{
Name: "awsiamcert",
HostPath: "/var/lib/kubeadm/aws-iam-authenticator/pki/",
MountPath: "/var/aws-iam-authenticator/",
ReadOnly: false,
},
},
},
},
ControllerManager: bootstrapv1.ControlPlaneComponent{
ExtraArgs: tlsCipherSuitesArgs(),
ExtraVolumes: []bootstrapv1.HostPathMount{},
},
Scheduler: bootstrapv1.ControlPlaneComponent{
ExtraArgs: map[string]string{},
ExtraVolumes: []bootstrapv1.HostPathMount{},
},
},
InitConfiguration: &bootstrapv1.InitConfiguration{
NodeRegistration: bootstrapv1.NodeRegistrationOptions{
KubeletExtraArgs: map[string]string{
"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"node-labels": "key1=val1,key2=val2",
},
Taints: []v1.Taint{
{
Key: "key1",
Value: "val1",
Effect: v1.TaintEffectNoExecute,
TimeAdded: nil,
},
},
},
},
JoinConfiguration: &bootstrapv1.JoinConfiguration{
NodeRegistration: bootstrapv1.NodeRegistrationOptions{
KubeletExtraArgs: map[string]string{
"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"node-labels": "key1=val1,key2=val2",
},
Taints: []v1.Taint{
{
Key: "key1",
Value: "val1",
Effect: v1.TaintEffectNoExecute,
TimeAdded: nil,
},
},
},
},
PreKubeadmCommands: []string{},
PostKubeadmCommands: []string{},
Files: []bootstrapv1.File{
{
Path: "/var/lib/kubeadm/aws-iam-authenticator/kubeconfig.yaml",
Owner: "root:root",
Permissions: "0640",
Content: `
# clusters refers to the remote service.
clusters:
- name: aws-iam-authenticator
cluster:
certificate-authority: /var/aws-iam-authenticator/cert.pem
server: https://localhost:21362/authenticate
# users refers to the API Server's webhook configuration
# (we don't need to authenticate the API server).
users:
- name: apiserver
# kubeconfig files require a context. Provide one for the API Server.
current-context: webhook
contexts:
- name: webhook
context:
cluster: aws-iam-authenticator
user: apiserver
`,
},
{
Path: "/var/lib/kubeadm/aws-iam-authenticator/pki/cert.pem",
Owner: "root:root",
Permissions: "0640",
ContentFrom: &bootstrapv1.FileSource{
Secret: bootstrapv1.SecretFileSource{
Name: "test-cluster-aws-iam-authenticator-ca",
Key: "cert.pem",
},
},
},
{
Path: "/var/lib/kubeadm/aws-iam-authenticator/pki/key.pem",
Owner: "root:root",
Permissions: "0640",
ContentFrom: &bootstrapv1.FileSource{
Secret: bootstrapv1.SecretFileSource{
Name: "test-cluster-aws-iam-authenticator-ca",
Key: "key.pem",
},
},
},
},
},
Replicas: &replicas,
Version: "v1.21.5-eks-1-21-9",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
got := wantKubeadmControlPlane()
g.clusterSpec.AWSIamConfig = tt.awsIamConfig
clusterapi.SetIdentityAuthInKubeadmControlPlane(got, g.clusterSpec)
g.Expect(got).To(Equal(tt.want))
})
}
}
func TestConfigureOIDCInKubeadmControlPlane(t *testing.T) {
replicas := int32(3)
tests := []struct {
name string
oidcConfig *v1alpha1.OIDCConfig
want *controlplanev1.KubeadmControlPlane
}{
{
name: "no oidc",
oidcConfig: nil,
want: wantKubeadmControlPlane(),
},
{
name: "with oidc",
oidcConfig: &v1alpha1.OIDCConfig{
TypeMeta: metav1.TypeMeta{
Kind: "OIDCConfig",
APIVersion: v1alpha1.SchemeBuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-unit-test",
},
Spec: v1alpha1.OIDCConfigSpec{
ClientId: "id1",
GroupsClaim: "claim1",
GroupsPrefix: "prefix-for-groups",
IssuerUrl: "https://mydomain.com/issuer",
RequiredClaims: []v1alpha1.OIDCConfigRequiredClaim{
{
Claim: "sub",
Value: "test",
},
},
UsernameClaim: "username-claim",
UsernamePrefix: "username-prefix",
},
},
want: &controlplanev1.KubeadmControlPlane{
TypeMeta: metav1.TypeMeta{
APIVersion: "controlplane.cluster.x-k8s.io/v1beta1",
Kind: "KubeadmControlPlane",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
Namespace: "eksa-system",
},
Spec: controlplanev1.KubeadmControlPlaneSpec{
MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
InfrastructureRef: v1.ObjectReference{
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
Kind: "ProviderMachineTemplate",
Name: "provider-template",
},
},
KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{
ClusterConfiguration: &bootstrapv1.ClusterConfiguration{
ImageRepository: "public.ecr.aws/eks-distro/kubernetes",
DNS: bootstrapv1.DNS{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: "public.ecr.aws/eks-distro/coredns",
ImageTag: "v1.8.4-eks-1-21-9",
},
},
Etcd: bootstrapv1.Etcd{
Local: &bootstrapv1.LocalEtcd{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: "public.ecr.aws/eks-distro/etcd-io",
ImageTag: "v3.4.16-eks-1-21-9",
},
ExtraArgs: map[string]string{
"cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
},
},
},
APIServer: bootstrapv1.APIServer{
ControlPlaneComponent: bootstrapv1.ControlPlaneComponent{
ExtraArgs: map[string]string{
"oidc-client-id": "id1",
"oidc-groups-claim": "claim1",
"oidc-groups-prefix": "prefix-for-groups",
"oidc-issuer-url": "https://mydomain.com/issuer",
"oidc-required-claim": "sub=test",
"oidc-username-claim": "username-claim",
"oidc-username-prefix": "username-prefix",
},
ExtraVolumes: []bootstrapv1.HostPathMount{},
},
},
ControllerManager: bootstrapv1.ControlPlaneComponent{
ExtraArgs: tlsCipherSuitesArgs(),
ExtraVolumes: []bootstrapv1.HostPathMount{},
},
Scheduler: bootstrapv1.ControlPlaneComponent{
ExtraArgs: map[string]string{},
ExtraVolumes: []bootstrapv1.HostPathMount{},
},
},
InitConfiguration: &bootstrapv1.InitConfiguration{
NodeRegistration: bootstrapv1.NodeRegistrationOptions{
KubeletExtraArgs: map[string]string{
"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"node-labels": "key1=val1,key2=val2",
},
Taints: []v1.Taint{
{
Key: "key1",
Value: "val1",
Effect: v1.TaintEffectNoExecute,
TimeAdded: nil,
},
},
},
},
JoinConfiguration: &bootstrapv1.JoinConfiguration{
NodeRegistration: bootstrapv1.NodeRegistrationOptions{
KubeletExtraArgs: map[string]string{
"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"node-labels": "key1=val1,key2=val2",
},
Taints: []v1.Taint{
{
Key: "key1",
Value: "val1",
Effect: v1.TaintEffectNoExecute,
TimeAdded: nil,
},
},
},
},
PreKubeadmCommands: []string{},
PostKubeadmCommands: []string{},
Files: []bootstrapv1.File{},
},
Replicas: &replicas,
Version: "v1.21.5-eks-1-21-9",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
got := wantKubeadmControlPlane()
g.clusterSpec.OIDCConfig = tt.oidcConfig
clusterapi.SetIdentityAuthInKubeadmControlPlane(got, g.clusterSpec)
g.Expect(got).To(Equal(tt.want))
})
}
}
func TestConfigurePodIamAuthInKubeadmControlPlane(t *testing.T) {
replicas := int32(3)
tests := []struct {
name string
podIAMConfig *v1alpha1.PodIAMConfig
want *controlplanev1.KubeadmControlPlane
}{
{
name: "no pod iam",
podIAMConfig: nil,
want: wantKubeadmControlPlane(),
},
{
name: "with pod iam",
podIAMConfig: &v1alpha1.PodIAMConfig{
ServiceAccountIssuer: "https://test",
},
want: &controlplanev1.KubeadmControlPlane{
TypeMeta: metav1.TypeMeta{
APIVersion: "controlplane.cluster.x-k8s.io/v1beta1",
Kind: "KubeadmControlPlane",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
Namespace: "eksa-system",
},
Spec: controlplanev1.KubeadmControlPlaneSpec{
MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
InfrastructureRef: v1.ObjectReference{
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
Kind: "ProviderMachineTemplate",
Name: "provider-template",
},
},
KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{
ClusterConfiguration: &bootstrapv1.ClusterConfiguration{
ImageRepository: "public.ecr.aws/eks-distro/kubernetes",
DNS: bootstrapv1.DNS{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: "public.ecr.aws/eks-distro/coredns",
ImageTag: "v1.8.4-eks-1-21-9",
},
},
Etcd: bootstrapv1.Etcd{
Local: &bootstrapv1.LocalEtcd{
ImageMeta: bootstrapv1.ImageMeta{
ImageRepository: "public.ecr.aws/eks-distro/etcd-io",
ImageTag: "v3.4.16-eks-1-21-9",
},
ExtraArgs: map[string]string{
"cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
},
},
},
APIServer: bootstrapv1.APIServer{
ControlPlaneComponent: bootstrapv1.ControlPlaneComponent{
ExtraArgs: map[string]string{
"service-account-issuer": "https://test",
},
ExtraVolumes: []bootstrapv1.HostPathMount{},
},
},
ControllerManager: bootstrapv1.ControlPlaneComponent{
ExtraArgs: tlsCipherSuitesArgs(),
ExtraVolumes: []bootstrapv1.HostPathMount{},
},
Scheduler: bootstrapv1.ControlPlaneComponent{
ExtraArgs: map[string]string{},
ExtraVolumes: []bootstrapv1.HostPathMount{},
},
},
InitConfiguration: &bootstrapv1.InitConfiguration{
NodeRegistration: bootstrapv1.NodeRegistrationOptions{
KubeletExtraArgs: map[string]string{
"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"node-labels": "key1=val1,key2=val2",
},
Taints: []v1.Taint{
{
Key: "key1",
Value: "val1",
Effect: v1.TaintEffectNoExecute,
TimeAdded: nil,
},
},
},
},
JoinConfiguration: &bootstrapv1.JoinConfiguration{
NodeRegistration: bootstrapv1.NodeRegistrationOptions{
KubeletExtraArgs: map[string]string{
"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"node-labels": "key1=val1,key2=val2",
},
Taints: []v1.Taint{
{
Key: "key1",
Value: "val1",
Effect: v1.TaintEffectNoExecute,
TimeAdded: nil,
},
},
},
},
PreKubeadmCommands: []string{},
PostKubeadmCommands: []string{},
Files: []bootstrapv1.File{},
},
Replicas: &replicas,
Version: "v1.21.5-eks-1-21-9",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
got := wantKubeadmControlPlane()
g.clusterSpec.Cluster.Spec.PodIAMConfig = tt.podIAMConfig
clusterapi.SetIdentityAuthInKubeadmControlPlane(got, g.clusterSpec)
g.Expect(got).To(Equal(tt.want))
})
}
}
| 491 |
eks-anywhere | aws | Go | package clusterapi
import (
"context"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/types"
)
type Installer struct {
*clients
}
func NewInstaller(capiClient CAPIClient, kubectlClient KubectlClient) *Installer {
return &Installer{
clients: &clients{
capiClient: capiClient,
kubectlClient: kubectlClient,
},
}
}
func (i *Installer) EnsureEtcdProvidersInstallation(ctx context.Context, managementCluster *types.Cluster, provider providers.Provider, currSpec *cluster.Spec) error {
if !currSpec.Cluster.IsSelfManaged() {
logger.V(1).Info("Not a management cluster, skipping check for CAPI etcd providers")
return nil
}
var installProviders []string
etcdBootstrapExists, err := i.kubectlClient.CheckProviderExists(ctx, managementCluster.KubeconfigFile, constants.EtcdAdmBootstrapProviderName, constants.EtcdAdmBootstrapProviderSystemNamespace)
if err != nil {
return err
}
if !etcdBootstrapExists {
installProviders = append(installProviders, constants.EtcdAdmBootstrapProviderName)
}
etcdControllerExists, err := i.kubectlClient.CheckProviderExists(ctx, managementCluster.KubeconfigFile, constants.EtcdadmControllerProviderName, constants.EtcdAdmControllerSystemNamespace)
if err != nil {
return err
}
if !etcdControllerExists {
installProviders = append(installProviders, constants.EtcdadmControllerProviderName)
}
if len(installProviders) > 0 {
return i.capiClient.InstallEtcdadmProviders(ctx, currSpec, managementCluster, provider, installProviders)
}
return nil
}
| 53 |
eks-anywhere | aws | Go | package clusterapi_test
import (
"context"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/clusterapi/mocks"
"github.com/aws/eks-anywhere/pkg/constants"
providerMocks "github.com/aws/eks-anywhere/pkg/providers/mocks"
"github.com/aws/eks-anywhere/pkg/types"
)
type installerTest struct {
*WithT
ctx context.Context
capiClient *mocks.MockCAPIClient
kubectlClient *mocks.MockKubectlClient
installer *clusterapi.Installer
currentSpec *cluster.Spec
cluster *types.Cluster
provider *providerMocks.MockProvider
}
func newInstallerTest(t *testing.T) installerTest {
ctrl := gomock.NewController(t)
capiClient := mocks.NewMockCAPIClient(ctrl)
kubectlClient := mocks.NewMockKubectlClient(ctrl)
currentSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Bundles.Spec.Number = 1
s.VersionsBundle.ExternalEtcdBootstrap.Version = "v0.1.0"
s.VersionsBundle.ExternalEtcdController.Version = "v0.1.0"
})
return installerTest{
WithT: NewWithT(t),
ctx: context.Background(),
capiClient: capiClient,
kubectlClient: kubectlClient,
installer: clusterapi.NewInstaller(capiClient, kubectlClient),
currentSpec: currentSpec,
provider: providerMocks.NewMockProvider(ctrl),
cluster: &types.Cluster{
Name: "cluster-name",
KubeconfigFile: "k.kubeconfig",
},
}
}
func TestEnsureEtcdProviderInstallStackedEtcd(t *testing.T) {
tt := newInstallerTest(t)
tt.kubectlClient.EXPECT().CheckProviderExists(tt.ctx, tt.cluster.KubeconfigFile, constants.EtcdAdmBootstrapProviderName, constants.EtcdAdmBootstrapProviderSystemNamespace).Return(false, nil)
tt.kubectlClient.EXPECT().CheckProviderExists(tt.ctx, tt.cluster.KubeconfigFile, constants.EtcdadmControllerProviderName, constants.EtcdAdmControllerSystemNamespace).Return(false, nil)
tt.capiClient.EXPECT().InstallEtcdadmProviders(tt.ctx, tt.currentSpec, tt.cluster, tt.provider, []string{constants.EtcdAdmBootstrapProviderName, constants.EtcdadmControllerProviderName})
tt.Expect(tt.installer.EnsureEtcdProvidersInstallation(tt.ctx, tt.cluster, tt.provider, tt.currentSpec))
}
func TestEnsureEtcdProviderInstallExternalEtcd(t *testing.T) {
tt := newInstallerTest(t)
tt.kubectlClient.EXPECT().CheckProviderExists(tt.ctx, tt.cluster.KubeconfigFile, constants.EtcdAdmBootstrapProviderName, constants.EtcdAdmBootstrapProviderSystemNamespace).Return(true, nil)
tt.kubectlClient.EXPECT().CheckProviderExists(tt.ctx, tt.cluster.KubeconfigFile, constants.EtcdadmControllerProviderName, constants.EtcdAdmControllerSystemNamespace).Return(true, nil)
tt.Expect(tt.installer.EnsureEtcdProvidersInstallation(tt.ctx, tt.cluster, tt.provider, tt.currentSpec))
}
| 74 |
eks-anywhere | aws | Go | package clusterapi
import (
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/pkg/constants"
)
// SetKubeVipInKubeadmControlPlane appends kube-vip manifest to kubeadmControlPlane's kubeadmConfigSpec files.
func SetKubeVipInKubeadmControlPlane(kcp *controlplanev1.KubeadmControlPlane, address, image string) error {
b, err := yaml.Marshal(kubeVip(address, image))
if err != nil {
return fmt.Errorf("marshalling kube-vip pod: %v", err)
}
kcp.Spec.KubeadmConfigSpec.Files = append(kcp.Spec.KubeadmConfigSpec.Files, bootstrapv1.File{
Path: "/etc/kubernetes/manifests/kube-vip.yaml",
Owner: "root:root",
Content: string(b),
})
return nil
}
func kubeVip(address, image string) *corev1.Pod {
return &corev1.Pod{
TypeMeta: metav1.TypeMeta{
APIVersion: corev1.SchemeGroupVersion.String(),
Kind: "Pod",
},
ObjectMeta: metav1.ObjectMeta{
Name: "kube-vip",
Namespace: constants.KubeSystemNamespace,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "kube-vip",
Image: image,
Args: []string{"manager"},
Env: []corev1.EnvVar{
{
Name: "vip_arp",
Value: "true",
},
{
Name: "port",
Value: "6443",
},
{
Name: "vip_cidr",
Value: "32",
},
{
Name: "cp_enable",
Value: "true",
},
{
Name: "cp_namespace",
Value: "kube-system",
},
{
Name: "vip_ddns",
Value: "false",
},
{
Name: "vip_leaderelection",
Value: "true",
},
{
Name: "vip_leaseduration",
Value: "15",
},
{
Name: "vip_renewdeadline",
Value: "10",
},
{
Name: "vip_retryperiod",
Value: "2",
},
{
Name: "address",
Value: address,
},
},
ImagePullPolicy: corev1.PullIfNotPresent,
SecurityContext: &corev1.SecurityContext{
Capabilities: &corev1.Capabilities{
Add: []corev1.Capability{
"NET_ADMIN",
"NET_RAW",
},
},
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "kubeconfig",
MountPath: "/etc/kubernetes/admin.conf",
},
},
},
},
HostNetwork: true,
Volumes: []corev1.Volume{
{
Name: "kubeconfig",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/etc/kubernetes/admin.conf",
},
},
},
},
},
}
}
| 124 |
eks-anywhere | aws | Go | package clusterapi_test
import (
"testing"
. "github.com/onsi/gomega"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/clusterapi"
)
func TestSetKubeVipInKubeadmControlPlane(t *testing.T) {
g := newApiBuilerTest(t)
got := wantKubeadmControlPlane()
want := got.DeepCopy()
want.Spec.KubeadmConfigSpec.Files = []bootstrapv1.File{
{
Path: "/etc/kubernetes/manifests/kube-vip.yaml",
Owner: "root:root",
Content: test.KubeVipTemplate,
},
}
g.Expect(clusterapi.SetKubeVipInKubeadmControlPlane(got, g.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host, "public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.1433")).To(Succeed())
g.Expect(got).To(Equal(want))
}
| 28 |
eks-anywhere | aws | Go | package clusterapi
import (
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
)
const (
machineHealthCheckKind = "MachineHealthCheck"
maxUnhealthyControlPlane = "100%"
maxUnhealthyWorker = "40%"
)
func machineHealthCheck(clusterName string, unhealthyTimeout, nodeStartupTimeout time.Duration) *clusterv1.MachineHealthCheck {
return &clusterv1.MachineHealthCheck{
TypeMeta: metav1.TypeMeta{
APIVersion: clusterAPIVersion,
Kind: machineHealthCheckKind,
},
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: constants.EksaSystemNamespace,
},
Spec: clusterv1.MachineHealthCheckSpec{
ClusterName: clusterName,
Selector: metav1.LabelSelector{
MatchLabels: map[string]string{},
},
NodeStartupTimeout: &metav1.Duration{Duration: nodeStartupTimeout},
UnhealthyConditions: []clusterv1.UnhealthyCondition{
{
Type: corev1.NodeReady,
Status: corev1.ConditionUnknown,
Timeout: metav1.Duration{Duration: unhealthyTimeout},
},
{
Type: corev1.NodeReady,
Status: corev1.ConditionFalse,
Timeout: metav1.Duration{Duration: unhealthyTimeout},
},
},
},
}
}
// MachineHealthCheckForControlPlane creates MachineHealthCheck resources for the control plane.
func MachineHealthCheckForControlPlane(clusterSpec *cluster.Spec, unhealthyTimeout, nodeStartupTimeout time.Duration) *clusterv1.MachineHealthCheck {
mhc := machineHealthCheck(ClusterName(clusterSpec.Cluster), unhealthyTimeout, nodeStartupTimeout)
mhc.SetName(ControlPlaneMachineHealthCheckName(clusterSpec))
mhc.Spec.Selector.MatchLabels[clusterv1.MachineControlPlaneLabel] = ""
maxUnhealthy := intstr.Parse(maxUnhealthyControlPlane)
mhc.Spec.MaxUnhealthy = &maxUnhealthy
return mhc
}
// MachineHealthCheckForWorkers creates MachineHealthCheck resources for the workers.
func MachineHealthCheckForWorkers(clusterSpec *cluster.Spec, unhealthyTimeout, nodeStartupTimeout time.Duration) []*clusterv1.MachineHealthCheck {
m := make([]*clusterv1.MachineHealthCheck, 0, len(clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations))
for _, workerNodeGroupConfig := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
mhc := machineHealthCheckForWorker(clusterSpec, workerNodeGroupConfig, unhealthyTimeout, nodeStartupTimeout)
m = append(m, mhc)
}
return m
}
func machineHealthCheckForWorker(clusterSpec *cluster.Spec, workerNodeGroupConfig v1alpha1.WorkerNodeGroupConfiguration, unhealthyTimeout, nodeStartupTimeout time.Duration) *clusterv1.MachineHealthCheck {
mhc := machineHealthCheck(ClusterName(clusterSpec.Cluster), unhealthyTimeout, nodeStartupTimeout)
mhc.SetName(WorkerMachineHealthCheckName(clusterSpec, workerNodeGroupConfig))
mhc.Spec.Selector.MatchLabels[clusterv1.MachineDeploymentNameLabel] = MachineDeploymentName(clusterSpec.Cluster, workerNodeGroupConfig)
maxUnhealthy := intstr.Parse(maxUnhealthyWorker)
mhc.Spec.MaxUnhealthy = &maxUnhealthy
return mhc
}
// MachineHealthCheckObjects creates MachineHealthCheck resources for control plane and all the worker node groups.
func MachineHealthCheckObjects(clusterSpec *cluster.Spec, unhealthyTimeout, nodeStartupTimeout time.Duration) []runtime.Object {
mhcWorkers := MachineHealthCheckForWorkers(clusterSpec, unhealthyTimeout, nodeStartupTimeout)
o := make([]runtime.Object, 0, len(mhcWorkers)+1)
for _, item := range mhcWorkers {
o = append(o, item)
}
return append(o, MachineHealthCheckForControlPlane(clusterSpec, unhealthyTimeout, nodeStartupTimeout))
}
| 93 |
eks-anywhere | aws | Go | package clusterapi_test
import (
"testing"
"time"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/constants"
)
func TestMachineHealthCheckForControlPlane(t *testing.T) {
timeouts := []time.Duration{5 * time.Minute, time.Hour, 30 * time.Second}
for _, timeout := range timeouts {
tt := newApiBuilerTest(t)
want := expectedMachineHealthCheckForControlPlane(timeout)
got := clusterapi.MachineHealthCheckForControlPlane(tt.clusterSpec, timeout, timeout)
tt.Expect(got).To(BeComparableTo(want))
}
}
func expectedMachineHealthCheckForControlPlane(timeout time.Duration) *clusterv1.MachineHealthCheck {
maxUnhealthy := intstr.Parse("100%")
return &clusterv1.MachineHealthCheck{
TypeMeta: metav1.TypeMeta{
APIVersion: "cluster.x-k8s.io/v1beta1",
Kind: "MachineHealthCheck",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster-kcp-unhealthy",
Namespace: constants.EksaSystemNamespace,
},
Spec: clusterv1.MachineHealthCheckSpec{
ClusterName: "test-cluster",
Selector: metav1.LabelSelector{
MatchLabels: map[string]string{
"cluster.x-k8s.io/control-plane": "",
},
},
MaxUnhealthy: &maxUnhealthy,
NodeStartupTimeout: &metav1.Duration{Duration: timeout},
UnhealthyConditions: []clusterv1.UnhealthyCondition{
{
Type: corev1.NodeReady,
Status: corev1.ConditionUnknown,
Timeout: metav1.Duration{Duration: timeout},
},
{
Type: corev1.NodeReady,
Status: corev1.ConditionFalse,
Timeout: metav1.Duration{Duration: timeout},
},
},
},
}
}
func TestMachineHealthCheckForWorkers(t *testing.T) {
timeouts := []time.Duration{5 * time.Minute, time.Hour, 30 * time.Second}
for _, timeout := range timeouts {
tt := newApiBuilerTest(t)
tt.clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{*tt.workerNodeGroupConfig}
want := expectedMachineHealthCheckForWorkers(timeout)
got := clusterapi.MachineHealthCheckForWorkers(tt.clusterSpec, timeout, timeout)
tt.Expect(got).To(Equal(want))
}
}
func expectedMachineHealthCheckForWorkers(timeout time.Duration) []*clusterv1.MachineHealthCheck {
maxUnhealthy := intstr.Parse("40%")
return []*clusterv1.MachineHealthCheck{
{
TypeMeta: metav1.TypeMeta{
APIVersion: "cluster.x-k8s.io/v1beta1",
Kind: "MachineHealthCheck",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster-wng-1-worker-unhealthy",
Namespace: constants.EksaSystemNamespace,
},
Spec: clusterv1.MachineHealthCheckSpec{
ClusterName: "test-cluster",
Selector: metav1.LabelSelector{
MatchLabels: map[string]string{
"cluster.x-k8s.io/deployment-name": "test-cluster-wng-1",
},
},
MaxUnhealthy: &maxUnhealthy,
NodeStartupTimeout: &metav1.Duration{Duration: timeout},
UnhealthyConditions: []clusterv1.UnhealthyCondition{
{
Type: corev1.NodeReady,
Status: corev1.ConditionUnknown,
Timeout: metav1.Duration{Duration: timeout},
},
{
Type: corev1.NodeReady,
Status: corev1.ConditionFalse,
Timeout: metav1.Duration{Duration: timeout},
},
},
},
},
}
}
func TestMachineHealthCheckObjects(t *testing.T) {
tt := newApiBuilerTest(t)
tt.clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{*tt.workerNodeGroupConfig}
timeout := 5 * time.Minute
wantWN := clusterapi.MachineHealthCheckForWorkers(tt.clusterSpec, timeout, timeout)
wantCP := clusterapi.MachineHealthCheckForControlPlane(tt.clusterSpec, timeout, timeout)
got := clusterapi.MachineHealthCheckObjects(tt.clusterSpec, timeout, timeout)
tt.Expect(got).To(Equal([]runtime.Object{wantWN[0], wantCP}))
}
| 125 |
eks-anywhere | aws | Go | package clusterapi
import (
"context"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/types"
)
type Manager struct {
*Installer
*Upgrader
}
type clients struct {
capiClient CAPIClient
kubectlClient KubectlClient
}
func NewManager(capiClient CAPIClient, kubectlClient KubectlClient) *Manager {
return &Manager{
Installer: NewInstaller(capiClient, kubectlClient),
Upgrader: NewUpgrader(capiClient, kubectlClient),
}
}
type CAPIClient interface {
Upgrade(ctx context.Context, managementCluster *types.Cluster, provider providers.Provider, newSpec *cluster.Spec, changeDiff *CAPIChangeDiff) error
InstallEtcdadmProviders(ctx context.Context, clusterSpec *cluster.Spec, cluster *types.Cluster, provider providers.Provider, installProviders []string) error
}
type KubectlClient interface {
CheckProviderExists(ctx context.Context, kubeconfigFile, name, namespace string) (bool, error)
}
| 36 |
eks-anywhere | aws | Go | package clusterapi
import (
"context"
"fmt"
"regexp"
"strconv"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/logger"
)
var nameRegex = regexp.MustCompile(`(.*?)(-)(\d+)$`)
// Object represents a kubernetes API object.
type Object[O kubernetes.Object] interface {
kubernetes.Object
DeepCopy() O
}
// ObjectComparator returns true only if only both kubernetes Object's are identical
// Most of the time, this only requires comparing the Spec field, but that can variate
// from object to object.
type ObjectComparator[O Object[O]] func(current, new O) bool
// ObjectRetriever gets a kubernetes API object using the provided client
// If the object doesn't exist, it returns a NotFound error.
type ObjectRetriever[O Object[O]] func(ctx context.Context, client kubernetes.Client, name, namespace string) (O, error)
// IncrementName takes an object name and increments the suffix number by one.
// This method is used for updating objects (e.g. machinetemplate, kubeadmconfigtemplate) that are either immutable
// or require recreation to trigger machine rollout. The original object name should follow the name convention of
// alphanumeric followed by dash digits, e.g. abc-1, md-0, kct-2. An error will be raised if the original name does not follow
// this pattern.
func IncrementName(name string) (string, error) {
match := nameRegex.FindStringSubmatch(name)
if match == nil {
return "", fmt.Errorf(`invalid format of name [name=%s]. Name has to follow regex pattern "(-)(\d+)$", e.g. machinetemplate-cp-1`, name)
}
n, err := strconv.Atoi(match[3])
if err != nil {
return "", fmt.Errorf("converting object suffix to int: %v", err)
}
return ObjectName(match[1], n+1), nil
}
// IncrementNameWithFallbackDefault calls the IncrementName and fallbacks to use the default name if IncrementName
// returns an error. This method is used to accommodate for any objects with name breaking changes from a previous version.
// For example, in beta capi snowmachinetemplate is named after the eks-a snowmachineconfig name, without the '-1' suffix.
// We set the object name to the default new machinetemplate name after detecting the invalid old name.
func IncrementNameWithFallbackDefault(name, defaultName string) string {
n, err := IncrementName(name)
if err != nil {
logger.V(4).Info("Unable to increment object name (might due to changes of name format), fallback to the default name", "error", err.Error())
return defaultName
}
return n
}
func ObjectName(baseName string, version int) string {
return fmt.Sprintf("%s-%d", baseName, version)
}
func DefaultObjectName(baseName string) string {
return ObjectName(baseName, 1)
}
// KubeadmControlPlaneName generates the kubeadmControlPlane name for an EKSA Cluster.
func KubeadmControlPlaneName(cluster *v1alpha1.Cluster) string {
return cluster.GetName()
}
// EtcdClusterName sets the default EtcdCluster object name.
func EtcdClusterName(clusterName string) string {
return fmt.Sprintf("%s-etcd", clusterName)
}
// MachineDeploymentName returns the name for the corresponding MachineDeployment to an EKS-A worker node group.
func MachineDeploymentName(cluster *v1alpha1.Cluster, workerNodeGroupConfig v1alpha1.WorkerNodeGroupConfiguration) string {
// Adding cluster name prefix guarantees the machine deployment name uniqueness
// among clusters under the same management cluster setting.
return clusterWorkerNodeGroupName(cluster, workerNodeGroupConfig)
}
func DefaultKubeadmConfigTemplateName(clusterSpec *cluster.Spec, workerNodeGroupConfig v1alpha1.WorkerNodeGroupConfiguration) string {
return DefaultObjectName(clusterWorkerNodeGroupName(clusterSpec.Cluster, workerNodeGroupConfig))
}
func clusterWorkerNodeGroupName(cluster *v1alpha1.Cluster, workerNodeGroupConfig v1alpha1.WorkerNodeGroupConfiguration) string {
return fmt.Sprintf("%s-%s", cluster.Name, workerNodeGroupConfig.Name)
}
// ControlPlaneMachineTemplateName sets the default object name on the control plane machine template.
func ControlPlaneMachineTemplateName(cluster *v1alpha1.Cluster) string {
return DefaultObjectName(fmt.Sprintf("%s-control-plane", cluster.Name))
}
// EtcdMachineTemplateName sets the default object name on the etcd machine template.
func EtcdMachineTemplateName(cluster *v1alpha1.Cluster) string {
return DefaultObjectName(fmt.Sprintf("%s-etcd", cluster.Name))
}
func WorkerMachineTemplateName(clusterSpec *cluster.Spec, workerNodeGroupConfig v1alpha1.WorkerNodeGroupConfiguration) string {
return DefaultObjectName(fmt.Sprintf("%s-%s", clusterSpec.Cluster.Name, workerNodeGroupConfig.Name))
}
func ControlPlaneMachineHealthCheckName(clusterSpec *cluster.Spec) string {
return fmt.Sprintf("%s-kcp-unhealthy", KubeadmControlPlaneName(clusterSpec.Cluster))
}
func WorkerMachineHealthCheckName(clusterSpec *cluster.Spec, workerNodeGroupConfig v1alpha1.WorkerNodeGroupConfiguration) string {
return fmt.Sprintf("%s-worker-unhealthy", MachineDeploymentName(clusterSpec.Cluster, workerNodeGroupConfig))
}
// InitialTemplateNamesForWorkers returns the default initial names for workers machine templates and kubeadm config templates.
func InitialTemplateNamesForWorkers(clusterSpec *cluster.Spec) (machineTemplateNames, kubeadmConfigTemplateNames map[string]string) {
workerLen := len(clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations)
workloadTemplateNames := make(map[string]string, workerLen)
kubeadmConfigTemplateNames = make(map[string]string, workerLen)
for _, workerNodeGroupConfiguration := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
workloadTemplateNames[workerNodeGroupConfiguration.Name] = WorkerMachineTemplateName(clusterSpec, workerNodeGroupConfiguration)
kubeadmConfigTemplateNames[workerNodeGroupConfiguration.Name] = DefaultKubeadmConfigTemplateName(clusterSpec, workerNodeGroupConfiguration)
}
return workloadTemplateNames, kubeadmConfigTemplateNames
}
// EnsureNewNameIfChanged updates an object's name if such object is different from its current state in the cluster.
func EnsureNewNameIfChanged[M Object[M]](ctx context.Context,
client kubernetes.Client,
retrieve ObjectRetriever[M],
equal ObjectComparator[M],
new M,
) error {
current, err := retrieve(ctx, client, new.GetName(), new.GetNamespace())
if apierrors.IsNotFound(err) {
// if object doesn't exist with same name in same namespace, no need to compare, there won't be a conflict
return nil
}
if err != nil {
return errors.Wrapf(err, "reading %s %s/%s from API",
new.GetObjectKind().GroupVersionKind().Kind,
new.GetNamespace(),
new.GetName(),
)
}
if !equal(new, current) {
newName, err := IncrementName(new.GetName())
if err != nil {
return errors.Wrapf(err, "incrementing name for %s %s/%s",
new.GetObjectKind().GroupVersionKind().Kind,
new.GetNamespace(),
new.GetName(),
)
}
new.SetName(newName)
}
return nil
}
// ClusterCASecretName returns the name of the cluster CA secret for the cluster.
func ClusterCASecretName(clusterName string) string {
return fmt.Sprintf("%s-ca", clusterName)
}
| 175 |
eks-anywhere | aws | Go | package clusterapi_test
import (
"context"
"errors"
"testing"
. "github.com/onsi/gomega"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
dockerv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/clusterapi"
)
func TestIncrementName(t *testing.T) {
tests := []struct {
name string
oldName string
want string
wantErr string
}{
{
name: "valid",
oldName: "cluster-1",
want: "cluster-2",
wantErr: "",
},
{
name: "invalid format",
oldName: "cluster-1a",
want: "",
wantErr: "invalid format of name",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
got, err := clusterapi.IncrementName(tt.oldName)
if tt.wantErr == "" {
g.Expect(err).To(Succeed())
g.Expect(got).To(Equal(tt.want))
} else {
g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr)))
}
})
}
}
func TestIncrementNameWithFallbackDefault(t *testing.T) {
tests := []struct {
name string
oldName string
defaultName string
want string
}{
{
name: "valid",
oldName: "cluster-1",
defaultName: "default",
want: "cluster-2",
},
{
name: "invalid format",
oldName: "cluster-1a",
defaultName: "default",
want: "default",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
got := clusterapi.IncrementNameWithFallbackDefault(tt.oldName, tt.defaultName)
g.Expect(got).To(Equal(tt.want))
})
}
}
func TestObjectName(t *testing.T) {
tests := []struct {
name string
base string
version int
want string
}{
{
name: "cluster-1",
base: "cluster",
version: 1,
want: "cluster-1",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(clusterapi.ObjectName(tt.base, tt.version)).To(Equal(tt.want))
})
}
}
func TestDefaultObjectName(t *testing.T) {
tests := []struct {
name string
base string
want string
}{
{
name: "cluster-1",
base: "cluster",
want: "cluster-1",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(clusterapi.DefaultObjectName(tt.base)).To(Equal(tt.want))
})
}
}
func TestKubeadmControlPlaneName(t *testing.T) {
tests := []struct {
name string
want string
}{
{
name: "test cluster",
want: "test-cluster",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
g.Expect(clusterapi.KubeadmControlPlaneName(g.clusterSpec.Cluster)).To(Equal(tt.want))
})
}
}
func TestMachineDeploymentName(t *testing.T) {
tests := []struct {
name string
want string
}{
{
name: "wng 1",
want: "test-cluster-wng-1",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
g.Expect(clusterapi.MachineDeploymentName(g.clusterSpec.Cluster, *g.workerNodeGroupConfig)).To(Equal(tt.want))
})
}
}
func TestDefaultKubeadmConfigTemplateName(t *testing.T) {
tests := []struct {
name string
want string
}{
{
name: "wng 1",
want: "test-cluster-wng-1-1",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
g.Expect(clusterapi.DefaultKubeadmConfigTemplateName(g.clusterSpec, *g.workerNodeGroupConfig)).To(Equal(tt.want))
})
}
}
func TestControlPlaneMachineTemplateName(t *testing.T) {
tests := []struct {
name string
want string
}{
{
name: "test cluster",
want: "test-cluster-control-plane-1",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
g.Expect(clusterapi.ControlPlaneMachineTemplateName(g.clusterSpec.Cluster)).To(Equal(tt.want))
})
}
}
func TestEtcdMachineTemplateName(t *testing.T) {
tests := []struct {
name string
want string
}{
{
name: "test cluster",
want: "test-cluster-etcd-1",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
g.Expect(clusterapi.EtcdMachineTemplateName(g.clusterSpec.Cluster)).To(Equal(tt.want))
})
}
}
func TestWorkerMachineTemplateName(t *testing.T) {
tests := []struct {
name string
want string
}{
{
name: "wng 1",
want: "test-cluster-wng-1-1",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
g.Expect(clusterapi.WorkerMachineTemplateName(g.clusterSpec, *g.workerNodeGroupConfig)).To(Equal(tt.want))
})
}
}
func TestControlPlaneMachineHealthCheckName(t *testing.T) {
tests := []struct {
name string
want string
}{
{
name: "cp",
want: "test-cluster-kcp-unhealthy",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
g.Expect(clusterapi.ControlPlaneMachineHealthCheckName(g.clusterSpec)).To(Equal(tt.want))
})
}
}
func TestWorkerMachineHealthCheckName(t *testing.T) {
tests := []struct {
name string
want string
}{
{
name: "wng 1",
want: "test-cluster-wng-1-worker-unhealthy",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
g.Expect(clusterapi.WorkerMachineHealthCheckName(g.clusterSpec, *g.workerNodeGroupConfig)).To(Equal(tt.want))
})
}
}
func TestEnsureNewNameIfChangedObjectDoesNotExist(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
originalName := "my-machine-template-1"
mt := dockerMachineTemplate()
mt.Name = originalName
client := test.NewFakeKubeClient()
g.Expect(clusterapi.EnsureNewNameIfChanged(ctx, client, notFoundRetriever, withChangesCompare, mt)).To(Succeed())
g.Expect(mt.Name).To(Equal(originalName))
}
func TestEnsureNewNameIfChangedErrorReadingObject(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
mt := dockerMachineTemplate()
mt.Name = "my-machine-template"
client := test.NewFakeKubeClient()
g.Expect(
clusterapi.EnsureNewNameIfChanged(ctx, client, errorRetriever, withChangesCompare, mt),
).To(
MatchError(ContainSubstring("reading DockerMachineTemplate eksa-system/my-machine-template from API")),
)
}
func TestEnsureNewNameIfChangedErrorIncrementingName(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
mt := dockerMachineTemplate()
mt.Name = "my-machine-template"
client := test.NewFakeKubeClient()
g.Expect(
clusterapi.EnsureNewNameIfChanged(ctx, client, dummyRetriever, withChangesCompare, mt),
).To(
MatchError(ContainSubstring("incrementing name for DockerMachineTemplate eksa-system/my-machine-template")),
)
}
func TestEnsureNewNameIfChangedObjectNeedsNewName(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
mt := dockerMachineTemplate()
mt.Name = "my-machine-template-1"
client := test.NewFakeKubeClient()
g.Expect(clusterapi.EnsureNewNameIfChanged(ctx, client, dummyRetriever, withChangesCompare, mt)).To(Succeed())
g.Expect(mt.Name).To(Equal("my-machine-template-2"))
}
func TestEnsureNewNameIfChangedObjectHasNotChanged(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
originalName := "my-machine-template-1"
mt := dockerMachineTemplate()
mt.Name = originalName
client := test.NewFakeKubeClient()
g.Expect(clusterapi.EnsureNewNameIfChanged(ctx, client, dummyRetriever, noChangesCompare, mt)).To(Succeed())
g.Expect(mt.Name).To(Equal(originalName))
}
func TestClusterCASecretName(t *testing.T) {
g := NewWithT(t)
g.Expect(clusterapi.ClusterCASecretName("my-cluster")).To(Equal("my-cluster-ca"))
}
func TestInitialTemplateNamesForWorkers(t *testing.T) {
tests := []struct {
name string
wantTNames map[string]string
wantKCTNames map[string]string
}{
{
name: "wng 1",
wantTNames: map[string]string{
"wng-1": "test-cluster-wng-1-1",
},
wantKCTNames: map[string]string{
"wng-1": "test-cluster-wng-1-1",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
spec := g.clusterSpec.DeepCopy()
spec.Cluster.Spec.WorkerNodeGroupConfigurations = append(spec.Cluster.Spec.WorkerNodeGroupConfigurations, *g.workerNodeGroupConfig)
workloadTemplateNames, kubeadmConfigTemplateNames := clusterapi.InitialTemplateNamesForWorkers(spec)
g.Expect(workloadTemplateNames).To(Equal(tt.wantTNames))
g.Expect(kubeadmConfigTemplateNames).To(Equal(tt.wantKCTNames))
})
}
}
func dummyRetriever(_ context.Context, _ kubernetes.Client, _, _ string) (*dockerv1.DockerMachineTemplate, error) {
return dockerMachineTemplate(), nil
}
func errorRetriever(_ context.Context, _ kubernetes.Client, _, _ string) (*dockerv1.DockerMachineTemplate, error) {
return nil, errors.New("reading object")
}
func notFoundRetriever(_ context.Context, _ kubernetes.Client, _, _ string) (*dockerv1.DockerMachineTemplate, error) {
return nil, apierrors.NewNotFound(schema.GroupResource{}, "")
}
func noChangesCompare(_, _ *dockerv1.DockerMachineTemplate) bool {
return true
}
func withChangesCompare(_, _ *dockerv1.DockerMachineTemplate) bool {
return false
}
| 382 |
eks-anywhere | aws | Go | package clusterapi
import (
"context"
clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type KubeLister interface {
List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error
}
// GetProviders lists all installed CAPI providers across all namespaces from the kube-api server.
func GetProviders(ctx context.Context, client KubeLister) ([]clusterctlv1.Provider, error) {
providersList := &clusterctlv1.ProviderList{}
err := client.List(ctx, providersList)
if err != nil {
return nil, err
}
return providersList.Items, nil
}
| 24 |
eks-anywhere | aws | Go | package clusterapi_test
import (
"context"
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
_ "github.com/aws/eks-anywhere/internal/test/envtest"
"github.com/aws/eks-anywhere/pkg/clusterapi"
)
func TestGetProvidersEmpty(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
client := fake.NewClientBuilder().
WithRuntimeObjects().
Build()
providers, err := clusterapi.GetProviders(ctx, client)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(providers).To(BeEmpty())
}
func TestGetProvidersMultipleProviders(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
providersWant := []clusterctlv1.Provider{
{
ObjectMeta: metav1.ObjectMeta{
Name: "kubeadm-controlplane",
ResourceVersion: "1",
},
Type: string(clusterctlv1.ControlPlaneProviderType),
ProviderName: "kubeadm",
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "vsphere",
ResourceVersion: "1",
},
Type: string(clusterctlv1.InfrastructureProviderType),
ProviderName: "vsphere",
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "unknown",
ResourceVersion: "1",
},
Type: string(clusterctlv1.InfrastructureProviderType),
ProviderName: "unknown-provider",
},
}
providerObjs := make([]runtime.Object, 0, len(providersWant))
for _, p := range providersWant {
provider := p
providerObjs = append(providerObjs, &provider)
}
client := fake.NewClientBuilder().
WithRuntimeObjects(providerObjs...).
Build()
providers, err := clusterapi.GetProviders(ctx, client)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(providers).To(ConsistOf(providersWant))
}
func TestGetProvidersError(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
client := fake.NewClientBuilder().
WithRuntimeObjects().
// using an empty scheme will fail since it doesn't have the clusterctlv1 api
WithScheme(runtime.NewScheme()).
Build()
_, err := clusterapi.GetProviders(ctx, client)
g.Expect(err).To(HaveOccurred())
}
| 89 |
eks-anywhere | aws | Go | package clusterapi
import (
_ "embed"
"fmt"
etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/templater"
)
//go:embed config/http-proxy.conf
var proxyConfig string
func proxy(cluster *v1alpha1.Cluster) bootstrapv1.ProxyConfiguration {
return bootstrapv1.ProxyConfiguration{
HTTPSProxy: cluster.Spec.ProxyConfiguration.HttpsProxy,
NoProxy: noProxyList(cluster),
}
}
// SetProxyConfigInKubeadmControlPlaneForBottlerocket sets up proxy configuration in kubeadmControlPlane for bottlerocket.
func SetProxyConfigInKubeadmControlPlaneForBottlerocket(kcp *controlplanev1.KubeadmControlPlane, cluster *v1alpha1.Cluster) {
if cluster.Spec.ProxyConfiguration == nil {
return
}
kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.Proxy = proxy(cluster)
kcp.Spec.KubeadmConfigSpec.JoinConfiguration.Proxy = proxy(cluster)
}
// SetProxyConfigInKubeadmControlPlaneForUbuntu sets up proxy configuration in kubeadmControlPlane for ubuntu.
func SetProxyConfigInKubeadmControlPlaneForUbuntu(kcp *controlplanev1.KubeadmControlPlane, cluster *v1alpha1.Cluster) error {
if cluster.Spec.ProxyConfiguration == nil {
return nil
}
return addProxyConfigInKubeadmConfigSpecFiles(&kcp.Spec.KubeadmConfigSpec, cluster)
}
// SetProxyConfigInKubeadmConfigTemplateForBottlerocket sets up proxy configuration in kubeadmConfigTemplate for bottlerocket.
func SetProxyConfigInKubeadmConfigTemplateForBottlerocket(kct *bootstrapv1.KubeadmConfigTemplate, cluster *v1alpha1.Cluster) {
if cluster.Spec.ProxyConfiguration == nil {
return
}
kct.Spec.Template.Spec.JoinConfiguration.Proxy = proxy(cluster)
}
// SetProxyConfigInKubeadmConfigTemplateForUbuntu sets up proxy configuration in kubeadmConfigTemplate for ubuntu.
func SetProxyConfigInKubeadmConfigTemplateForUbuntu(kct *bootstrapv1.KubeadmConfigTemplate, cluster *v1alpha1.Cluster) error {
if cluster.Spec.ProxyConfiguration == nil {
return nil
}
return addProxyConfigInKubeadmConfigSpecFiles(&kct.Spec.Template.Spec, cluster)
}
// setProxyConfigInEtcdCluster sets up proxy configuration in etcdadmCluster.
func setProxyConfigInEtcdCluster(etcd *etcdv1.EtcdadmCluster, cluster *v1alpha1.Cluster) {
if cluster.Spec.ProxyConfiguration == nil {
return
}
etcd.Spec.EtcdadmConfigSpec.Proxy = &etcdbootstrapv1.ProxyConfiguration{
HTTPProxy: cluster.Spec.ProxyConfiguration.HttpProxy,
HTTPSProxy: cluster.Spec.ProxyConfiguration.HttpsProxy,
NoProxy: noProxyList(cluster),
}
}
func NoProxyDefaults() []string {
return []string{
"localhost",
"127.0.0.1",
".svc",
}
}
func noProxyList(cluster *v1alpha1.Cluster) []string {
capacity := len(cluster.Spec.ClusterNetwork.Pods.CidrBlocks) +
len(cluster.Spec.ClusterNetwork.Services.CidrBlocks) +
len(cluster.Spec.ProxyConfiguration.NoProxy) + 4
noProxyList := make([]string, 0, capacity)
noProxyList = append(noProxyList, cluster.Spec.ClusterNetwork.Pods.CidrBlocks...)
noProxyList = append(noProxyList, cluster.Spec.ClusterNetwork.Services.CidrBlocks...)
noProxyList = append(noProxyList, cluster.Spec.ProxyConfiguration.NoProxy...)
// Add no-proxy defaults
noProxyList = append(noProxyList, NoProxyDefaults()...)
noProxyList = append(noProxyList, cluster.Spec.ControlPlaneConfiguration.Endpoint.Host)
return noProxyList
}
func proxyConfigContent(cluster *v1alpha1.Cluster) (string, error) {
val := values{
"httpProxy": cluster.Spec.ProxyConfiguration.HttpProxy,
"httpsProxy": cluster.Spec.ProxyConfiguration.HttpsProxy,
"noProxy": noProxyList(cluster),
}
config, err := templater.Execute(proxyConfig, val)
if err != nil {
return "", fmt.Errorf("building http-proxy.conf file: %v", err)
}
return string(config), nil
}
func proxyConfigFile(cluster *v1alpha1.Cluster) (bootstrapv1.File, error) {
proxyConfig, err := proxyConfigContent(cluster)
if err != nil {
return bootstrapv1.File{}, err
}
return bootstrapv1.File{
Path: "/etc/systemd/system/containerd.service.d/http-proxy.conf",
Owner: "root:root",
Content: proxyConfig,
}, nil
}
func addProxyConfigInKubeadmConfigSpecFiles(kcs *bootstrapv1.KubeadmConfigSpec, cluster *v1alpha1.Cluster) error {
proxyConfigFile, err := proxyConfigFile(cluster)
if err != nil {
return err
}
kcs.Files = append(kcs.Files, proxyConfigFile)
return nil
}
| 138 |
eks-anywhere | aws | Go | package clusterapi_test
import (
"testing"
etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1"
. "github.com/onsi/gomega"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
)
var proxyTests = []struct {
name string
proxy *v1alpha1.ProxyConfiguration
wantFiles []bootstrapv1.File
wantProxyConfig bootstrapv1.ProxyConfiguration
wantProxyEtcd *etcdbootstrapv1.ProxyConfiguration
}{
{
name: "proxy config nil",
proxy: nil,
wantFiles: []bootstrapv1.File{},
},
{
name: "with proxy, pods cidr, service cidr, cp endpoint",
proxy: &v1alpha1.ProxyConfiguration{
HttpProxy: "1.2.3.4:8888",
HttpsProxy: "1.2.3.4:8888",
NoProxy: []string{
"1.2.3.4/0",
"1.2.3.5/0",
},
},
wantFiles: []bootstrapv1.File{
{
Path: "/etc/systemd/system/containerd.service.d/http-proxy.conf",
Owner: "root:root",
Content: `[Service]
Environment="HTTP_PROXY=1.2.3.4:8888"
Environment="HTTPS_PROXY=1.2.3.4:8888"
Environment="NO_PROXY=1.2.3.4/5,1.2.3.4/5,1.2.3.4/0,1.2.3.5/0,localhost,127.0.0.1,.svc,1.2.3.4"`,
},
},
wantProxyConfig: bootstrapv1.ProxyConfiguration{
HTTPSProxy: "1.2.3.4:8888",
NoProxy: []string{
"1.2.3.4/5",
"1.2.3.4/5",
"1.2.3.4/0",
"1.2.3.5/0",
"localhost",
"127.0.0.1",
".svc",
"1.2.3.4",
},
},
wantProxyEtcd: &etcdbootstrapv1.ProxyConfiguration{
HTTPProxy: "1.2.3.4:8888",
HTTPSProxy: "1.2.3.4:8888",
NoProxy: []string{
"1.2.3.4/5",
"1.2.3.4/5",
"1.2.3.4/0",
"1.2.3.5/0",
"localhost",
"127.0.0.1",
".svc",
"1.2.3.4",
},
},
},
}
func TestSetProxyConfigInKubeadmControlPlaneBottlerocket(t *testing.T) {
for _, tt := range proxyTests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
got := wantKubeadmControlPlane()
g.clusterSpec.Cluster.Spec.ProxyConfiguration = tt.proxy
clusterapi.SetProxyConfigInKubeadmControlPlaneForBottlerocket(got, g.clusterSpec.Cluster)
want := wantKubeadmControlPlane()
want.Spec.KubeadmConfigSpec.ClusterConfiguration.Proxy = tt.wantProxyConfig
want.Spec.KubeadmConfigSpec.JoinConfiguration.Proxy = tt.wantProxyConfig
g.Expect(got).To(Equal(want))
})
}
}
func TestSetProxyConfigInKubeadmControlPlaneUbuntu(t *testing.T) {
for _, tt := range proxyTests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
got := wantKubeadmControlPlane()
g.clusterSpec.Cluster.Spec.ProxyConfiguration = tt.proxy
g.Expect(clusterapi.SetProxyConfigInKubeadmControlPlaneForUbuntu(got, g.clusterSpec.Cluster)).To(Succeed())
want := wantKubeadmControlPlane()
want.Spec.KubeadmConfigSpec.Files = tt.wantFiles
g.Expect(got).To(Equal(want))
})
}
}
func TestSetProxyConfigInKubeadmConfigTemplateBottlerocket(t *testing.T) {
for _, tt := range proxyTests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
got := wantKubeadmConfigTemplate()
g.clusterSpec.Cluster.Spec.ProxyConfiguration = tt.proxy
clusterapi.SetProxyConfigInKubeadmConfigTemplateForBottlerocket(got, g.clusterSpec.Cluster)
want := wantKubeadmConfigTemplate()
want.Spec.Template.Spec.JoinConfiguration.Proxy = tt.wantProxyConfig
g.Expect(got).To(Equal(want))
})
}
}
func TestSetProxyConfigInKubeadmConfigTemplateUbuntu(t *testing.T) {
for _, tt := range proxyTests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
got := wantKubeadmConfigTemplate()
g.clusterSpec.Cluster.Spec.ProxyConfiguration = tt.proxy
g.Expect(clusterapi.SetProxyConfigInKubeadmConfigTemplateForUbuntu(got, g.clusterSpec.Cluster)).To(Succeed())
want := wantKubeadmConfigTemplate()
want.Spec.Template.Spec.Files = tt.wantFiles
g.Expect(got).To(Equal(want))
})
}
}
func TestEtcdClusterWithProxy(t *testing.T) {
for _, tt := range proxyTests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
g.clusterSpec.Cluster.Spec.ProxyConfiguration = tt.proxy
g.clusterSpec.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{
Count: 3,
}
got := clusterapi.EtcdadmCluster(g.clusterSpec, g.providerMachineTemplate)
want := wantEtcdCluster()
want.Spec.EtcdadmConfigSpec.Proxy = tt.wantProxyEtcd
g.Expect(got).To(Equal(want))
})
}
}
func TestNoProxyDefaults(t *testing.T) {
g := NewWithT(t)
want := []string{
"localhost",
"127.0.0.1",
".svc",
}
g.Expect(clusterapi.NoProxyDefaults()).To(Equal(want))
}
| 158 |
eks-anywhere | aws | Go | package clusterapi
import (
_ "embed"
"fmt"
etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/registrymirror"
"github.com/aws/eks-anywhere/pkg/registrymirror/containerd"
"github.com/aws/eks-anywhere/pkg/templater"
)
//go:embed config/containerd_config_append.toml
var containerdConfig string
// SetRegistryMirrorInKubeadmControlPlaneForBottlerocket sets up registry mirror configuration in kubeadmControlPlane for bottlerocket.
func SetRegistryMirrorInKubeadmControlPlaneForBottlerocket(kcp *controlplanev1.KubeadmControlPlane, mirrorConfig *v1alpha1.RegistryMirrorConfiguration) {
if mirrorConfig == nil {
return
}
kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.RegistryMirror = registryMirror(mirrorConfig)
kcp.Spec.KubeadmConfigSpec.JoinConfiguration.RegistryMirror = registryMirror(mirrorConfig)
}
// SetRegistryMirrorInKubeadmControlPlaneForUbuntu sets up registry mirror configuration in kubeadmControlPlane for ubuntu.
func SetRegistryMirrorInKubeadmControlPlaneForUbuntu(kcp *controlplanev1.KubeadmControlPlane, mirrorConfig *v1alpha1.RegistryMirrorConfiguration) error {
if mirrorConfig == nil {
return nil
}
return addRegistryMirrorInKubeadmConfigSpecFiles(&kcp.Spec.KubeadmConfigSpec, mirrorConfig)
}
// SetRegistryMirrorInKubeadmConfigTemplateForBottlerocket sets up registry mirror configuration in kubeadmConfigTemplate for bottlerocket.
func SetRegistryMirrorInKubeadmConfigTemplateForBottlerocket(kct *bootstrapv1.KubeadmConfigTemplate, mirrorConfig *v1alpha1.RegistryMirrorConfiguration) {
if mirrorConfig == nil {
return
}
kct.Spec.Template.Spec.JoinConfiguration.RegistryMirror = registryMirror(mirrorConfig)
}
// SetRegistryMirrorInKubeadmConfigTemplateForUbuntu sets up registry mirror configuration in kubeadmConfigTemplate for ubuntu.
func SetRegistryMirrorInKubeadmConfigTemplateForUbuntu(kct *bootstrapv1.KubeadmConfigTemplate, mirrorConfig *v1alpha1.RegistryMirrorConfiguration) error {
if mirrorConfig == nil {
return nil
}
return addRegistryMirrorInKubeadmConfigSpecFiles(&kct.Spec.Template.Spec, mirrorConfig)
}
// setRegistryMirrorInEtcdCluster sets up registry mirror configuration in etcdadmCluster.
func setRegistryMirrorInEtcdCluster(etcd *etcdv1.EtcdadmCluster, mirrorConfig *v1alpha1.RegistryMirrorConfiguration) {
if mirrorConfig == nil {
return
}
etcd.Spec.EtcdadmConfigSpec.RegistryMirror = &etcdbootstrapv1.RegistryMirrorConfiguration{
Endpoint: containerd.ToAPIEndpoint(registrymirror.FromClusterRegistryMirrorConfiguration(mirrorConfig).CoreEKSAMirror()),
CACert: mirrorConfig.CACertContent,
}
}
func registryMirror(mirrorConfig *v1alpha1.RegistryMirrorConfiguration) bootstrapv1.RegistryMirrorConfiguration {
return bootstrapv1.RegistryMirrorConfiguration{
Endpoint: containerd.ToAPIEndpoint(registrymirror.FromClusterRegistryMirrorConfiguration(mirrorConfig).CoreEKSAMirror()),
CACert: mirrorConfig.CACertContent,
}
}
type values map[string]interface{}
func registryMirrorConfigContent(registryMirror *registrymirror.RegistryMirror) (string, error) {
val := values{
"registryMirrorMap": containerd.ToAPIEndpoints(registryMirror.NamespacedRegistryMap),
"mirrorBase": registryMirror.BaseRegistry,
"registryCACert": registryMirror.CACertContent,
"insecureSkip": registryMirror.InsecureSkipVerify,
}
config, err := templater.Execute(containerdConfig, val)
if err != nil {
return "", fmt.Errorf("building containerd config file: %v", err)
}
return string(config), nil
}
func registryMirrorConfig(registryMirrorConfig *v1alpha1.RegistryMirrorConfiguration) (files []bootstrapv1.File, err error) {
registryMirror := registrymirror.FromClusterRegistryMirrorConfiguration(registryMirrorConfig)
registryConfig, err := registryMirrorConfigContent(registryMirror)
if err != nil {
return nil, err
}
files = []bootstrapv1.File{
{
Path: "/etc/containerd/config_append.toml",
Owner: "root:root",
Content: registryConfig,
},
}
if registryMirrorConfig.CACertContent != "" {
files = append(files, bootstrapv1.File{
Path: fmt.Sprintf("/etc/containerd/certs.d/%s/ca.crt", registryMirror.BaseRegistry),
Owner: "root:root",
Content: registryMirrorConfig.CACertContent,
})
}
return files, nil
}
func addRegistryMirrorInKubeadmConfigSpecFiles(kcs *bootstrapv1.KubeadmConfigSpec, mirrorConfig *v1alpha1.RegistryMirrorConfiguration) error {
containerdFiles, err := registryMirrorConfig(mirrorConfig)
if err != nil {
return fmt.Errorf("setting registry mirror configuration: %v", err)
}
kcs.Files = append(kcs.Files, containerdFiles...)
return nil
}
| 129 |
eks-anywhere | aws | Go | package clusterapi_test
import (
"testing"
etcdbootstrapv1 "github.com/aws/etcdadm-bootstrap-provider/api/v1beta1"
. "github.com/onsi/gomega"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
)
var registryMirrorTests = []struct {
name string
registryMirrorConfig *v1alpha1.RegistryMirrorConfiguration
wantFiles []bootstrapv1.File
wantRegistryConfig bootstrapv1.RegistryMirrorConfiguration
wantRegistryConfigEtcd *etcdbootstrapv1.RegistryMirrorConfiguration
}{
{
name: "registry config nil",
wantFiles: []bootstrapv1.File{},
wantRegistryConfig: bootstrapv1.RegistryMirrorConfiguration{},
},
{
name: "with ca cert and namespace mapping for eksa and curated packages",
registryMirrorConfig: &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
Port: "443",
CACertContent: "xyz",
OCINamespaces: []v1alpha1.OCINamespace{
{
Registry: "public.ecr.aws",
Namespace: "eks-anywhere",
},
{
Registry: "783794618700.dkr.ecr.us-west-2.amazonaws.com",
Namespace: "curated-packages",
},
},
},
wantFiles: []bootstrapv1.File{
{
Path: "/etc/containerd/config_append.toml",
Owner: "root:root",
Content: `[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."783794618700.dkr.ecr.*.amazonaws.com"]
endpoint = ["https://1.2.3.4:443/v2/curated-packages"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."public.ecr.aws"]
endpoint = ["https://1.2.3.4:443/v2/eks-anywhere"]
[plugins."io.containerd.grpc.v1.cri".registry.configs."1.2.3.4:443".tls]
ca_file = "/etc/containerd/certs.d/1.2.3.4:443/ca.crt"`,
},
{
Path: "/etc/containerd/certs.d/1.2.3.4:443/ca.crt",
Owner: "root:root",
Content: "xyz",
},
},
wantRegistryConfig: bootstrapv1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4:443/v2/eks-anywhere",
CACert: "xyz",
},
wantRegistryConfigEtcd: &etcdbootstrapv1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4:443/v2/eks-anywhere",
CACert: "xyz",
},
},
{
name: "with insecure skip",
registryMirrorConfig: &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
Port: "443",
InsecureSkipVerify: true,
},
wantFiles: []bootstrapv1.File{
{
Path: "/etc/containerd/config_append.toml",
Owner: "root:root",
Content: `[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."public.ecr.aws"]
endpoint = ["https://1.2.3.4:443"]
[plugins."io.containerd.grpc.v1.cri".registry.configs."1.2.3.4:443".tls]
insecure_skip_verify = true`,
},
},
wantRegistryConfig: bootstrapv1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4:443",
},
wantRegistryConfigEtcd: &etcdbootstrapv1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4:443",
},
},
{
name: "with ca cert and insecure skip",
registryMirrorConfig: &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
Port: "443",
CACertContent: "xyz",
InsecureSkipVerify: true,
},
wantFiles: []bootstrapv1.File{
{
Path: "/etc/containerd/config_append.toml",
Owner: "root:root",
Content: `[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."public.ecr.aws"]
endpoint = ["https://1.2.3.4:443"]
[plugins."io.containerd.grpc.v1.cri".registry.configs."1.2.3.4:443".tls]
ca_file = "/etc/containerd/certs.d/1.2.3.4:443/ca.crt"
insecure_skip_verify = true`,
},
{
Path: "/etc/containerd/certs.d/1.2.3.4:443/ca.crt",
Owner: "root:root",
Content: "xyz",
},
},
wantRegistryConfig: bootstrapv1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4:443",
CACert: "xyz",
},
wantRegistryConfigEtcd: &etcdbootstrapv1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4:443",
CACert: "xyz",
},
},
}
func TestSetRegistryMirrorInKubeadmControlPlaneBottleRocket(t *testing.T) {
for _, tt := range registryMirrorTests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
got := wantKubeadmControlPlane()
clusterapi.SetRegistryMirrorInKubeadmControlPlaneForBottlerocket(got, tt.registryMirrorConfig)
want := wantKubeadmControlPlane()
want.Spec.KubeadmConfigSpec.ClusterConfiguration.RegistryMirror = tt.wantRegistryConfig
want.Spec.KubeadmConfigSpec.JoinConfiguration.RegistryMirror = tt.wantRegistryConfig
g.Expect(got).To(Equal(want))
})
}
}
func TestSetRegistryMirrorInKubeadmControlPlaneUbuntu(t *testing.T) {
for _, tt := range registryMirrorTests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
got := wantKubeadmControlPlane()
g.Expect(clusterapi.SetRegistryMirrorInKubeadmControlPlaneForUbuntu(got, tt.registryMirrorConfig)).To(Succeed())
want := wantKubeadmControlPlane()
want.Spec.KubeadmConfigSpec.Files = tt.wantFiles
g.Expect(got).To(Equal(want))
})
}
}
func TestSetRegistryMirrorInKubeadmConfigTemplateBottlerocket(t *testing.T) {
for _, tt := range registryMirrorTests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
got := wantKubeadmConfigTemplate()
clusterapi.SetRegistryMirrorInKubeadmConfigTemplateForBottlerocket(got, tt.registryMirrorConfig)
want := wantKubeadmConfigTemplate()
want.Spec.Template.Spec.JoinConfiguration.RegistryMirror = tt.wantRegistryConfig
g.Expect(got).To(Equal(want))
})
}
}
func TestSetRegistryMirrorInKubeadmConfigTemplateUbuntu(t *testing.T) {
for _, tt := range registryMirrorTests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
got := wantKubeadmConfigTemplate()
g.Expect(clusterapi.SetRegistryMirrorInKubeadmConfigTemplateForUbuntu(got, tt.registryMirrorConfig)).To(Succeed())
want := wantKubeadmConfigTemplate()
want.Spec.Template.Spec.Files = tt.wantFiles
g.Expect(got).To(Equal(want))
})
}
}
func TestEtcdClusterWithRegistryMirror(t *testing.T) {
for _, tt := range registryMirrorTests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
g.clusterSpec.Cluster.Spec.RegistryMirrorConfiguration = tt.registryMirrorConfig
g.clusterSpec.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{
Count: 3,
}
got := clusterapi.EtcdadmCluster(g.clusterSpec, g.providerMachineTemplate)
want := wantEtcdCluster()
want.Spec.EtcdadmConfigSpec.RegistryMirror = tt.wantRegistryConfigEtcd
g.Expect(got).To(Equal(want))
})
}
}
| 199 |
eks-anywhere | aws | Go | package clusterapi
import (
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
addons "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/pkg/templater"
)
type ClusterResourceSet struct {
resources map[string][]byte
clusterName string
namespace string
}
func NewClusterResourceSet(clusterName string) *ClusterResourceSet {
return &ClusterResourceSet{
clusterName: clusterName,
namespace: "default",
resources: make(map[string][]byte),
}
}
func (c ClusterResourceSet) AddResource(name string, content []byte) {
c.resources[name] = content
}
func (c ClusterResourceSet) ToYaml() ([]byte, error) {
if len(c.resources) == 0 {
return nil, nil
}
return marshall(append(c.buildResourceConfigMaps(), c.buildSet())...)
}
func (c ClusterResourceSet) buildSet() *addons.ClusterResourceSet {
return &addons.ClusterResourceSet{
TypeMeta: metav1.TypeMeta{
APIVersion: addons.GroupVersion.Identifier(),
Kind: "ClusterResourceSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-crs", c.clusterName),
Labels: map[string]string{
clusterv1.ClusterNameLabel: c.clusterName,
},
Namespace: c.namespace,
},
Spec: addons.ClusterResourceSetSpec{
ClusterSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
clusterv1.ClusterNameLabel: c.clusterName,
},
},
Resources: c.resourceRefs(),
},
}
}
func (c ClusterResourceSet) resourceRefs() []addons.ResourceRef {
refs := make([]addons.ResourceRef, 0, len(c.resources))
for name := range c.resources {
refs = append(refs, addons.ResourceRef{Name: name, Kind: string(addons.ConfigMapClusterResourceSetResourceKind)})
}
return refs
}
func (c ClusterResourceSet) buildResourceConfigMaps() []interface{} {
cms := make([]interface{}, 0, len(c.resources))
for name, content := range c.resources {
cm := corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: c.namespace,
},
Data: map[string]string{
"data": string(content),
},
}
cms = append(cms, cm)
}
return cms
}
func marshall(objects ...interface{}) ([]byte, error) {
bytes := make([][]byte, 0, len(objects))
for _, o := range objects {
b, err := yaml.Marshal(o)
if err != nil {
return nil, fmt.Errorf("marshalling object for cluster resource set: %v", err)
}
bytes = append(bytes, b)
}
return templater.AppendYamlResources(bytes...), nil
}
| 111 |
eks-anywhere | aws | Go | package clusterapi_test
import (
"testing"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/clusterapi"
)
func TestClusterResourceSetToYaml(t *testing.T) {
tests := []struct {
testName string
filesWithResources map[string]string
wantFileContent string
}{
{
testName: "no resources",
filesWithResources: map[string]string{},
wantFileContent: "",
},
{
testName: "one resource - cluster role",
filesWithResources: map[string]string{
"coredns-role": "testdata/coredns_clusterrole.yaml",
},
wantFileContent: "testdata/expected_crs_clusterrole.yaml",
},
}
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
c := clusterapi.NewClusterResourceSet("cluster-name")
for name, file := range tt.filesWithResources {
content := test.ReadFile(t, file)
c.AddResource(name, []byte(content))
}
got, err := c.ToYaml()
if err != nil {
t.Fatalf("ClusterResourceSet.ToYaml err = %v, want err = nil", err)
}
test.AssertContentToFile(t, string(got), tt.wantFileContent)
})
}
}
| 46 |
eks-anywhere | aws | Go | package clusterapi
import (
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
)
var buildContainerdConfigCommands = []string{
"cat /etc/containerd/config_append.toml >> /etc/containerd/config.toml",
}
var restartContainerdCommands = []string{
"sudo systemctl daemon-reload",
"sudo systemctl restart containerd",
}
// CreateContainerdConfigFileInKubeadmControlPlane adds the prekubeadm command to create containerd config file in kubeadmControlPlane if registry mirror config exists.
func CreateContainerdConfigFileInKubeadmControlPlane(kcp *controlplanev1.KubeadmControlPlane, cluster *v1alpha1.Cluster) {
if cluster.Spec.RegistryMirrorConfiguration != nil {
kcp.Spec.KubeadmConfigSpec.PreKubeadmCommands = append(kcp.Spec.KubeadmConfigSpec.PreKubeadmCommands, buildContainerdConfigCommands...)
}
}
// CreateContainerdConfigFileInKubeadmConfigTemplate adds the prekubeadm command to create containerd config file in kubeadmConfigTemplate if registry mirror config exists.
func CreateContainerdConfigFileInKubeadmConfigTemplate(kct *bootstrapv1.KubeadmConfigTemplate, cluster *v1alpha1.Cluster) {
if cluster.Spec.RegistryMirrorConfiguration != nil {
kct.Spec.Template.Spec.PreKubeadmCommands = append(kct.Spec.Template.Spec.PreKubeadmCommands, buildContainerdConfigCommands...)
}
}
// RestartContainerdInKubeadmControlPlane adds the prekubeadm command to restart containerd daemon in kubeadmControlPlane if registry mirror or proxy config exists.
func RestartContainerdInKubeadmControlPlane(kcp *controlplanev1.KubeadmControlPlane, cluster *v1alpha1.Cluster) {
if restartContainerdNeeded(cluster) {
kcp.Spec.KubeadmConfigSpec.PreKubeadmCommands = append(kcp.Spec.KubeadmConfigSpec.PreKubeadmCommands, restartContainerdCommands...)
}
}
// RestartContainerdInKubeadmConfigTemplate adds the prekubeadm command to restart containerd daemon in kubeadmConfigTemplate if registry mirror or proxy config exists.
func RestartContainerdInKubeadmConfigTemplate(kct *bootstrapv1.KubeadmConfigTemplate, cluster *v1alpha1.Cluster) {
if restartContainerdNeeded(cluster) {
kct.Spec.Template.Spec.PreKubeadmCommands = append(kct.Spec.Template.Spec.PreKubeadmCommands, restartContainerdCommands...)
}
}
func restartContainerdNeeded(cluster *v1alpha1.Cluster) bool {
return cluster.Spec.RegistryMirrorConfiguration != nil || cluster.Spec.ProxyConfiguration != nil
}
| 50 |
eks-anywhere | aws | Go | package clusterapi_test
import (
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
)
var restartContainerdCommands = []string{
"sudo systemctl daemon-reload",
"sudo systemctl restart containerd",
}
var restartContainerdTests = []struct {
name string
cluster v1alpha1.ClusterSpec
want []string
}{
{
name: "registry mirror and proxy config both exist",
cluster: v1alpha1.ClusterSpec{
RegistryMirrorConfiguration: nil,
ProxyConfiguration: nil,
},
want: []string{},
},
{
name: "registry mirror nil",
cluster: v1alpha1.ClusterSpec{
RegistryMirrorConfiguration: nil,
ProxyConfiguration: &v1alpha1.ProxyConfiguration{
HttpProxy: "1.2.3.4:8888",
HttpsProxy: "1.2.3.4:8888",
NoProxy: []string{
"1.2.3.4/0",
},
},
},
want: restartContainerdCommands,
},
{
name: "proxy config nil",
cluster: v1alpha1.ClusterSpec{
RegistryMirrorConfiguration: &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
},
ProxyConfiguration: nil,
},
want: restartContainerdCommands,
},
}
func TestRestartContainerdInKubeadmControlPlane(t *testing.T) {
for _, tt := range restartContainerdTests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
g.clusterSpec.Cluster.Spec = tt.cluster
got := wantKubeadmControlPlane()
clusterapi.RestartContainerdInKubeadmControlPlane(got, g.clusterSpec.Cluster)
want := wantKubeadmControlPlane()
want.Spec.KubeadmConfigSpec.PreKubeadmCommands = tt.want
g.Expect(got).To(Equal(want))
})
}
}
func TestRestartContainerdInKubeadmConfigTemplate(t *testing.T) {
for _, tt := range restartContainerdTests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
g.clusterSpec.Cluster.Spec = tt.cluster
got := wantKubeadmConfigTemplate()
clusterapi.RestartContainerdInKubeadmConfigTemplate(got, g.clusterSpec.Cluster)
want := wantKubeadmConfigTemplate()
want.Spec.Template.Spec.PreKubeadmCommands = tt.want
g.Expect(got).To(Equal(want))
})
}
}
var buildContainerdConfigCommands = []string{
"cat /etc/containerd/config_append.toml >> /etc/containerd/config.toml",
}
var createContainerdConfigTests = []struct {
name string
cluster v1alpha1.ClusterSpec
want []string
}{
{
name: "registry mirror exists",
cluster: v1alpha1.ClusterSpec{
RegistryMirrorConfiguration: &v1alpha1.RegistryMirrorConfiguration{
Endpoint: "1.2.3.4",
},
},
want: buildContainerdConfigCommands,
},
{
name: "registry mirror nil",
cluster: v1alpha1.ClusterSpec{
RegistryMirrorConfiguration: nil,
},
want: []string{},
},
}
func TestCreateContainerdConfigFileInKubeadmControlPlane(t *testing.T) {
for _, tt := range createContainerdConfigTests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
g.clusterSpec.Cluster.Spec = tt.cluster
got := wantKubeadmControlPlane()
clusterapi.CreateContainerdConfigFileInKubeadmControlPlane(got, g.clusterSpec.Cluster)
want := wantKubeadmControlPlane()
want.Spec.KubeadmConfigSpec.PreKubeadmCommands = tt.want
g.Expect(got).To(Equal(want))
})
}
}
func TestCreateContainerdConfigFileInKubeadmConfigTemplate(t *testing.T) {
for _, tt := range createContainerdConfigTests {
t.Run(tt.name, func(t *testing.T) {
g := newApiBuilerTest(t)
g.clusterSpec.Cluster.Spec = tt.cluster
got := wantKubeadmConfigTemplate()
clusterapi.CreateContainerdConfigFileInKubeadmConfigTemplate(got, g.clusterSpec.Cluster)
want := wantKubeadmConfigTemplate()
want.Spec.Template.Spec.PreKubeadmCommands = tt.want
g.Expect(got).To(Equal(want))
})
}
}
| 139 |
eks-anywhere | aws | Go | package clusterapi
import (
"context"
"fmt"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/types"
)
type Upgrader struct {
*clients
}
func NewUpgrader(capiClient CAPIClient, kubectlClient KubectlClient) *Upgrader {
return &Upgrader{
clients: &clients{
capiClient: capiClient,
kubectlClient: kubectlClient,
},
}
}
func (u *Upgrader) Upgrade(ctx context.Context, managementCluster *types.Cluster, provider providers.Provider, currentSpec, newSpec *cluster.Spec) (*types.ChangeDiff, error) {
logger.V(1).Info("Checking for CAPI upgrades")
if !newSpec.Cluster.IsSelfManaged() {
logger.V(1).Info("Skipping CAPI upgrades, not a self-managed cluster")
return nil, nil
}
capiChangeDiff := capiChangeDiff(currentSpec, newSpec, provider)
if capiChangeDiff == nil {
logger.V(1).Info("Nothing to upgrade for CAPI")
return nil, nil
}
logger.V(1).Info("Starting CAPI upgrades")
if err := u.capiClient.Upgrade(ctx, managementCluster, provider, newSpec, capiChangeDiff); err != nil {
return nil, fmt.Errorf("failed upgrading ClusterAPI from bundles %d to bundles %d: %v", currentSpec.Bundles.Spec.Number, newSpec.Bundles.Spec.Number, err)
}
return capiChangeDiff.toChangeDiff(), nil
}
type CAPIChangeDiff struct {
CertManager *types.ComponentChangeDiff
Core *types.ComponentChangeDiff
ControlPlane *types.ComponentChangeDiff
BootstrapProviders []types.ComponentChangeDiff
InfrastructureProvider *types.ComponentChangeDiff
}
func (c *CAPIChangeDiff) toChangeDiff() *types.ChangeDiff {
if c == nil {
logger.V(1).Info("Nothing to upgrade for CAPI")
return nil
}
r := make([]*types.ComponentChangeDiff, 0, 4+len(c.BootstrapProviders))
r = append(r, c.CertManager, c.Core, c.ControlPlane, c.InfrastructureProvider)
for _, bootstrapChangeDiff := range c.BootstrapProviders {
b := bootstrapChangeDiff
r = append(r, &b)
}
return types.NewChangeDiff(r...)
}
func CapiChangeDiff(currentSpec, newSpec *cluster.Spec, provider providers.Provider) *types.ChangeDiff {
return capiChangeDiff(currentSpec, newSpec, provider).toChangeDiff()
}
func capiChangeDiff(currentSpec, newSpec *cluster.Spec, provider providers.Provider) *CAPIChangeDiff {
changeDiff := &CAPIChangeDiff{}
componentChanged := false
if currentSpec.VersionsBundle.CertManager.Version != newSpec.VersionsBundle.CertManager.Version {
changeDiff.CertManager = &types.ComponentChangeDiff{
ComponentName: "cert-manager",
NewVersion: newSpec.VersionsBundle.CertManager.Version,
OldVersion: currentSpec.VersionsBundle.CertManager.Version,
}
logger.V(1).Info("Cert-manager change diff", "oldVersion", changeDiff.CertManager.OldVersion, "newVersion", changeDiff.CertManager.NewVersion)
componentChanged = true
}
if currentSpec.VersionsBundle.ClusterAPI.Version != newSpec.VersionsBundle.ClusterAPI.Version {
changeDiff.Core = &types.ComponentChangeDiff{
ComponentName: "cluster-api",
NewVersion: newSpec.VersionsBundle.ClusterAPI.Version,
OldVersion: currentSpec.VersionsBundle.ClusterAPI.Version,
}
logger.V(1).Info("CAPI Core change diff", "oldVersion", changeDiff.Core.OldVersion, "newVersion", changeDiff.Core.NewVersion)
componentChanged = true
}
if currentSpec.VersionsBundle.ControlPlane.Version != newSpec.VersionsBundle.ControlPlane.Version {
changeDiff.ControlPlane = &types.ComponentChangeDiff{
ComponentName: "kubeadm",
NewVersion: newSpec.VersionsBundle.ControlPlane.Version,
OldVersion: currentSpec.VersionsBundle.ControlPlane.Version,
}
logger.V(1).Info("CAPI Control Plane provider change diff", "oldVersion", changeDiff.ControlPlane.OldVersion, "newVersion", changeDiff.ControlPlane.NewVersion)
componentChanged = true
}
if currentSpec.VersionsBundle.Bootstrap.Version != newSpec.VersionsBundle.Bootstrap.Version {
componentChangeDiff := types.ComponentChangeDiff{
ComponentName: "kubeadm",
NewVersion: newSpec.VersionsBundle.Bootstrap.Version,
OldVersion: currentSpec.VersionsBundle.Bootstrap.Version,
}
changeDiff.BootstrapProviders = append(changeDiff.BootstrapProviders, componentChangeDiff)
logger.V(1).Info("CAPI Kubeadm Bootstrap Provider change diff", "oldVersion", componentChangeDiff.OldVersion, "newVersion", componentChangeDiff.NewVersion)
componentChanged = true
}
if currentSpec.VersionsBundle.ExternalEtcdBootstrap.Version != newSpec.VersionsBundle.ExternalEtcdBootstrap.Version {
componentChangeDiff := types.ComponentChangeDiff{
ComponentName: "etcdadm-bootstrap",
NewVersion: newSpec.VersionsBundle.ExternalEtcdBootstrap.Version,
OldVersion: currentSpec.VersionsBundle.ExternalEtcdBootstrap.Version,
}
changeDiff.BootstrapProviders = append(changeDiff.BootstrapProviders, componentChangeDiff)
logger.V(1).Info("CAPI Etcdadm Bootstrap Provider change diff", "oldVersion", componentChangeDiff.OldVersion, "newVersion", componentChangeDiff.NewVersion)
componentChanged = true
}
if currentSpec.VersionsBundle.ExternalEtcdController.Version != newSpec.VersionsBundle.ExternalEtcdController.Version {
componentChangeDiff := types.ComponentChangeDiff{
ComponentName: "etcdadm-controller",
NewVersion: newSpec.VersionsBundle.ExternalEtcdController.Version,
OldVersion: currentSpec.VersionsBundle.ExternalEtcdController.Version,
}
changeDiff.BootstrapProviders = append(changeDiff.BootstrapProviders, componentChangeDiff)
logger.V(1).Info("CAPI Etcdadm Controller Provider change diff", "oldVersion", componentChangeDiff.OldVersion, "newVersion", componentChangeDiff.NewVersion)
componentChanged = true
}
if providerChangeDiff := provider.ChangeDiff(currentSpec, newSpec); providerChangeDiff != nil {
changeDiff.InfrastructureProvider = providerChangeDiff
logger.V(1).Info("CAPI Infrastrcture Provider change diff", "provider", providerChangeDiff.ComponentName, "oldVersion", providerChangeDiff.OldVersion, "newVersion", providerChangeDiff.NewVersion)
componentChanged = true
}
if !componentChanged {
return nil
}
return changeDiff
}
| 153 |
eks-anywhere | aws | Go | package clusterapi_test
import (
"context"
"errors"
"testing"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/clusterapi/mocks"
providerMocks "github.com/aws/eks-anywhere/pkg/providers/mocks"
"github.com/aws/eks-anywhere/pkg/types"
)
type upgraderTest struct {
*WithT
ctx context.Context
capiClient *mocks.MockCAPIClient
kubectlClient *mocks.MockKubectlClient
upgrader *clusterapi.Upgrader
currentSpec *cluster.Spec
newSpec *cluster.Spec
cluster *types.Cluster
provider *providerMocks.MockProvider
providerChangeDiff *types.ComponentChangeDiff
}
func newUpgraderTest(t *testing.T) *upgraderTest {
ctrl := gomock.NewController(t)
capiClient := mocks.NewMockCAPIClient(ctrl)
kubectlClient := mocks.NewMockKubectlClient(ctrl)
currentSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Bundles.Spec.Number = 1
s.VersionsBundle.CertManager.Version = "v0.1.0"
s.VersionsBundle.ClusterAPI.Version = "v0.1.0"
s.VersionsBundle.ControlPlane.Version = "v0.1.0"
s.VersionsBundle.Bootstrap.Version = "v0.1.0"
s.VersionsBundle.ExternalEtcdBootstrap.Version = "v0.1.0"
s.VersionsBundle.ExternalEtcdController.Version = "v0.1.0"
})
return &upgraderTest{
WithT: NewWithT(t),
ctx: context.Background(),
capiClient: capiClient,
kubectlClient: kubectlClient,
upgrader: clusterapi.NewUpgrader(capiClient, kubectlClient),
currentSpec: currentSpec,
newSpec: currentSpec.DeepCopy(),
cluster: &types.Cluster{
Name: "cluster-name",
KubeconfigFile: "k.kubeconfig",
},
provider: providerMocks.NewMockProvider(ctrl),
providerChangeDiff: &types.ComponentChangeDiff{
ComponentName: "vsphere",
NewVersion: "v0.2.0",
OldVersion: "v0.1.0",
},
}
}
func TestUpgraderUpgradeNoSelfManaged(t *testing.T) {
tt := newUpgraderTest(t)
tt.newSpec.Cluster.SetManagedBy("management-cluster")
tt.Expect(tt.upgrader.Upgrade(tt.ctx, tt.cluster, tt.provider, tt.currentSpec, tt.newSpec)).To(BeNil())
}
func TestUpgraderUpgradeNoChanges(t *testing.T) {
tt := newUpgraderTest(t)
tt.provider.EXPECT().ChangeDiff(tt.currentSpec, tt.newSpec).Return(nil)
tt.Expect(tt.upgrader.Upgrade(tt.ctx, tt.cluster, tt.provider, tt.currentSpec, tt.newSpec)).To(BeNil())
}
func TestUpgraderUpgradeProviderChanges(t *testing.T) {
tt := newUpgraderTest(t)
changeDiff := &clusterapi.CAPIChangeDiff{
InfrastructureProvider: tt.providerChangeDiff,
}
wantDiff := &types.ChangeDiff{
ComponentReports: []types.ComponentChangeDiff{*tt.providerChangeDiff},
}
tt.provider.EXPECT().ChangeDiff(tt.currentSpec, tt.newSpec).Return(tt.providerChangeDiff)
tt.capiClient.EXPECT().Upgrade(tt.ctx, tt.cluster, tt.provider, tt.newSpec, changeDiff)
tt.Expect(tt.upgrader.Upgrade(tt.ctx, tt.cluster, tt.provider, tt.currentSpec, tt.newSpec)).To(Equal(wantDiff))
}
func TestUpgraderUpgradeCoreChanges(t *testing.T) {
tt := newUpgraderTest(t)
tt.newSpec.VersionsBundle.ClusterAPI.Version = "v0.2.0"
changeDiff := &clusterapi.CAPIChangeDiff{
Core: &types.ComponentChangeDiff{
ComponentName: "cluster-api",
NewVersion: "v0.2.0",
OldVersion: "v0.1.0",
},
}
wantDiff := &types.ChangeDiff{
ComponentReports: []types.ComponentChangeDiff{*changeDiff.Core},
}
tt.provider.EXPECT().ChangeDiff(tt.currentSpec, tt.newSpec).Return(nil)
tt.capiClient.EXPECT().Upgrade(tt.ctx, tt.cluster, tt.provider, tt.newSpec, changeDiff)
tt.Expect(tt.upgrader.Upgrade(tt.ctx, tt.cluster, tt.provider, tt.currentSpec, tt.newSpec)).To(Equal(wantDiff))
}
func TestUpgraderUpgradeEverythingChangesStackedEtcd(t *testing.T) {
tt := newUpgraderTest(t)
tt.newSpec.VersionsBundle.CertManager.Version = "v0.2.0"
tt.newSpec.VersionsBundle.ClusterAPI.Version = "v0.2.0"
tt.newSpec.VersionsBundle.ControlPlane.Version = "v0.2.0"
tt.newSpec.VersionsBundle.Bootstrap.Version = "v0.2.0"
tt.newSpec.VersionsBundle.ExternalEtcdBootstrap.Version = "v0.2.0"
tt.newSpec.VersionsBundle.ExternalEtcdController.Version = "v0.2.0"
changeDiff := &clusterapi.CAPIChangeDiff{
CertManager: &types.ComponentChangeDiff{
ComponentName: "cert-manager",
NewVersion: "v0.2.0",
OldVersion: "v0.1.0",
},
Core: &types.ComponentChangeDiff{
ComponentName: "cluster-api",
NewVersion: "v0.2.0",
OldVersion: "v0.1.0",
},
ControlPlane: &types.ComponentChangeDiff{
ComponentName: "kubeadm",
NewVersion: "v0.2.0",
OldVersion: "v0.1.0",
},
BootstrapProviders: []types.ComponentChangeDiff{
{
ComponentName: "kubeadm",
NewVersion: "v0.2.0",
OldVersion: "v0.1.0",
},
{
ComponentName: "etcdadm-bootstrap",
NewVersion: "v0.2.0",
OldVersion: "v0.1.0",
},
{
ComponentName: "etcdadm-controller",
NewVersion: "v0.2.0",
OldVersion: "v0.1.0",
},
},
InfrastructureProvider: tt.providerChangeDiff,
}
components := []types.ComponentChangeDiff{*changeDiff.CertManager, *changeDiff.Core, *changeDiff.ControlPlane, *tt.providerChangeDiff}
bootstrapProviders := append(components, changeDiff.BootstrapProviders...)
wantDiff := &types.ChangeDiff{
ComponentReports: bootstrapProviders,
}
tt.provider.EXPECT().ChangeDiff(tt.currentSpec, tt.newSpec).Return(tt.providerChangeDiff)
tt.capiClient.EXPECT().Upgrade(tt.ctx, tt.cluster, tt.provider, tt.newSpec, changeDiff)
tt.Expect(tt.upgrader.Upgrade(tt.ctx, tt.cluster, tt.provider, tt.currentSpec, tt.newSpec)).To(Equal(wantDiff))
}
func TestUpgraderUpgradeEverythingChangesExternalEtcd(t *testing.T) {
tt := newUpgraderTest(t)
tt.newSpec.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{}
tt.newSpec.VersionsBundle.CertManager.Version = "v0.2.0"
tt.newSpec.VersionsBundle.ClusterAPI.Version = "v0.2.0"
tt.newSpec.VersionsBundle.ControlPlane.Version = "v0.2.0"
tt.newSpec.VersionsBundle.Bootstrap.Version = "v0.2.0"
tt.newSpec.VersionsBundle.ExternalEtcdBootstrap.Version = "v0.2.0"
tt.newSpec.VersionsBundle.ExternalEtcdController.Version = "v0.2.0"
changeDiff := &clusterapi.CAPIChangeDiff{
CertManager: &types.ComponentChangeDiff{
ComponentName: "cert-manager",
NewVersion: "v0.2.0",
OldVersion: "v0.1.0",
},
Core: &types.ComponentChangeDiff{
ComponentName: "cluster-api",
NewVersion: "v0.2.0",
OldVersion: "v0.1.0",
},
ControlPlane: &types.ComponentChangeDiff{
ComponentName: "kubeadm",
NewVersion: "v0.2.0",
OldVersion: "v0.1.0",
},
BootstrapProviders: []types.ComponentChangeDiff{
{
ComponentName: "kubeadm",
NewVersion: "v0.2.0",
OldVersion: "v0.1.0",
},
{
ComponentName: "etcdadm-bootstrap",
NewVersion: "v0.2.0",
OldVersion: "v0.1.0",
},
{
ComponentName: "etcdadm-controller",
NewVersion: "v0.2.0",
OldVersion: "v0.1.0",
},
},
InfrastructureProvider: tt.providerChangeDiff,
}
wantDiff := &types.ChangeDiff{
ComponentReports: []types.ComponentChangeDiff{
*changeDiff.CertManager, *changeDiff.Core, *changeDiff.ControlPlane, *tt.providerChangeDiff,
changeDiff.BootstrapProviders[0],
changeDiff.BootstrapProviders[1],
changeDiff.BootstrapProviders[2],
},
}
tt.provider.EXPECT().ChangeDiff(tt.currentSpec, tt.newSpec).Return(tt.providerChangeDiff)
tt.capiClient.EXPECT().Upgrade(tt.ctx, tt.cluster, tt.provider, tt.newSpec, changeDiff)
tt.Expect(tt.upgrader.Upgrade(tt.ctx, tt.cluster, tt.provider, tt.currentSpec, tt.newSpec)).To(Equal(wantDiff))
}
func TestUpgraderUpgradeCAPIClientError(t *testing.T) {
tt := newUpgraderTest(t)
changeDiff := &clusterapi.CAPIChangeDiff{
InfrastructureProvider: tt.providerChangeDiff,
}
tt.provider.EXPECT().ChangeDiff(tt.currentSpec, tt.newSpec).Return(tt.providerChangeDiff)
tt.capiClient.EXPECT().Upgrade(tt.ctx, tt.cluster, tt.provider, tt.newSpec, changeDiff).Return(errors.New("error from client"))
_, err := tt.upgrader.Upgrade(tt.ctx, tt.cluster, tt.provider, tt.currentSpec, tt.newSpec)
tt.Expect(err).NotTo(BeNil())
}
| 247 |
eks-anywhere | aws | Go | package clusterapi
import (
"context"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
kubeadmv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
)
// Workers represents the provider specific CAPI spec for an eks-a cluster's workers.
type Workers[M Object[M]] struct {
Groups []WorkerGroup[M]
}
// WorkerObjects returns a list of API objects for concrete provider-specific collection of worker groups.
func (w *Workers[M]) WorkerObjects() []kubernetes.Object {
objs := make([]kubernetes.Object, 0, len(w.Groups)*3)
for _, g := range w.Groups {
objs = append(objs, g.Objects()...)
}
return objs
}
// UpdateImmutableObjectNames checks if any immutable objects have changed by comparing the new definition
// with the current state of the cluster. If they had, it generates a new name for them by increasing a monotonic number
// at the end of the name.
func (w *Workers[M]) UpdateImmutableObjectNames(
ctx context.Context,
client kubernetes.Client,
machineTemplateRetriever ObjectRetriever[M],
machineTemplateComparator ObjectComparator[M],
) error {
for _, g := range w.Groups {
if err := g.UpdateImmutableObjectNames(ctx, client, machineTemplateRetriever, machineTemplateComparator); err != nil {
return err
}
}
return nil
}
// WorkerGroup represents the provider specific CAPI spec for an eks-a worker group.
type WorkerGroup[M Object[M]] struct {
KubeadmConfigTemplate *kubeadmv1.KubeadmConfigTemplate
MachineDeployment *clusterv1.MachineDeployment
ProviderMachineTemplate M
}
// Objects returns a list of API objects for a provider-specific of the worker group.
func (g *WorkerGroup[M]) Objects() []kubernetes.Object {
return []kubernetes.Object{
g.KubeadmConfigTemplate,
g.MachineDeployment,
g.ProviderMachineTemplate,
}
}
// UpdateImmutableObjectNames checks if any immutable objects have changed by comparing the new definition
// with the current state of the cluster. If they had, it generates a new name for them by increasing a monotonic number
// at the end of the name.
// This process is performed to the provider machine template and the kubeadmconfigtemplate.
// The kubeadmconfigtemplate is not immutable at the API level but we treat it as such for consistency.
func (g *WorkerGroup[M]) UpdateImmutableObjectNames(
ctx context.Context,
client kubernetes.Client,
machineTemplateRetriever ObjectRetriever[M],
machineTemplateComparator ObjectComparator[M],
) error {
currentMachineDeployment := &clusterv1.MachineDeployment{}
err := client.Get(ctx, g.MachineDeployment.Name, g.MachineDeployment.Namespace, currentMachineDeployment)
if apierrors.IsNotFound(err) {
// MachineDeployment doesn't exist, this is a new cluster so machine templates should use their default name
return nil
}
if err != nil {
return errors.Wrap(err, "reading current machine deployment from API")
}
g.ProviderMachineTemplate.SetName(currentMachineDeployment.Spec.Template.Spec.InfrastructureRef.Name)
if err = EnsureNewNameIfChanged(ctx, client, machineTemplateRetriever, machineTemplateComparator, g.ProviderMachineTemplate); err != nil {
return err
}
g.MachineDeployment.Spec.Template.Spec.InfrastructureRef.Name = g.ProviderMachineTemplate.GetName()
g.KubeadmConfigTemplate.SetName(currentMachineDeployment.Spec.Template.Spec.Bootstrap.ConfigRef.Name)
if err = EnsureNewNameIfChanged(ctx, client, GetKubeadmConfigTemplate, KubeadmConfigTemplateEqual, g.KubeadmConfigTemplate); err != nil {
return err
}
g.MachineDeployment.Spec.Template.Spec.Bootstrap.ConfigRef.Name = g.KubeadmConfigTemplate.Name
return nil
}
// DeepCopy generates a new WorkerGroup copying the contexts of the receiver.
func (g *WorkerGroup[M]) DeepCopy() *WorkerGroup[M] {
return &WorkerGroup[M]{
MachineDeployment: g.MachineDeployment.DeepCopy(),
KubeadmConfigTemplate: g.KubeadmConfigTemplate.DeepCopy(),
ProviderMachineTemplate: g.ProviderMachineTemplate.DeepCopy(),
}
}
// GetKubeadmConfigTemplate retrieves a KubeadmConfigTemplate using a client
// Implements ObjectRetriever.
func GetKubeadmConfigTemplate(ctx context.Context, client kubernetes.Client, name, namespace string) (*kubeadmv1.KubeadmConfigTemplate, error) {
k := &kubeadmv1.KubeadmConfigTemplate{}
if err := client.Get(ctx, name, namespace, k); err != nil {
return nil, err
}
return k, nil
}
// KubeadmConfigTemplateEqual returns true only if the new version of a KubeadmConfigTemplate
// involves changes with respect to the old one when applied to the cluster.
// Implements ObjectComparator.
func KubeadmConfigTemplateEqual(new, old *kubeadmv1.KubeadmConfigTemplate) bool {
// DeepDerivative treats empty map (length == 0) as unset field. We need to manually compare certain fields
// such as taints, so that setting it to empty will trigger machine recreate
return kubeadmConfigTemplateTaintsEqual(new, old) && kubeadmConfigTemplateExtraArgsEqual(new, old) &&
equality.Semantic.DeepDerivative(new.Spec, old.Spec)
}
func kubeadmConfigTemplateTaintsEqual(new, old *kubeadmv1.KubeadmConfigTemplate) bool {
return new.Spec.Template.Spec.JoinConfiguration == nil ||
old.Spec.Template.Spec.JoinConfiguration == nil ||
anywherev1.TaintsSliceEqual(
new.Spec.Template.Spec.JoinConfiguration.NodeRegistration.Taints,
old.Spec.Template.Spec.JoinConfiguration.NodeRegistration.Taints,
)
}
func kubeadmConfigTemplateExtraArgsEqual(new, old *kubeadmv1.KubeadmConfigTemplate) bool {
return new.Spec.Template.Spec.JoinConfiguration == nil ||
old.Spec.Template.Spec.JoinConfiguration == nil ||
anywherev1.MapEqual(
new.Spec.Template.Spec.JoinConfiguration.NodeRegistration.KubeletExtraArgs,
old.Spec.Template.Spec.JoinConfiguration.NodeRegistration.KubeletExtraArgs,
)
}
| 148 |
eks-anywhere | aws | Go | package clusterapi_test
import (
"context"
"testing"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
kubeadmv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
dockerv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/constants"
)
type (
dockerGroup = clusterapi.WorkerGroup[*dockerv1.DockerMachineTemplate]
dockerWorkers = clusterapi.Workers[*dockerv1.DockerMachineTemplate]
)
func TestWorkersUpdateImmutableObjectNamesError(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
group1 := dockerGroup{
MachineDeployment: machineDeployment(),
}
group2 := dockerGroup{
MachineDeployment: machineDeployment(),
}
workers := &dockerWorkers{
Groups: []dockerGroup{group1, group2},
}
client := test.NewFakeKubeClientAlwaysError()
g.Expect(
workers.UpdateImmutableObjectNames(ctx, client, dummyRetriever, noChangesCompare),
).NotTo(Succeed())
}
func TestWorkersUpdateImmutableObjectNamesSuccess(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
group1 := dockerGroup{
MachineDeployment: machineDeployment(),
}
group2 := dockerGroup{
MachineDeployment: machineDeployment(),
}
workers := &dockerWorkers{
Groups: []dockerGroup{group1, group2},
}
client := test.NewFakeKubeClient()
g.Expect(
workers.UpdateImmutableObjectNames(ctx, client, dummyRetriever, noChangesCompare),
).To(Succeed())
}
func TestWorkerObjects(t *testing.T) {
g := NewWithT(t)
group1 := dockerGroup{
MachineDeployment: machineDeployment(),
}
group2 := dockerGroup{
MachineDeployment: machineDeployment(),
}
workers := &dockerWorkers{
Groups: []dockerGroup{group1, group2},
}
objects := workers.WorkerObjects()
wantObjects := []kubernetes.Object{
group1.KubeadmConfigTemplate,
group1.MachineDeployment,
group1.ProviderMachineTemplate,
group2.KubeadmConfigTemplate,
group2.MachineDeployment,
group2.ProviderMachineTemplate,
}
g.Expect(objects).To(ConsistOf(wantObjects))
}
func TestWorkerGroupUpdateImmutableObjectNamesNoMachineDeployment(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
group := &dockerGroup{
MachineDeployment: machineDeployment(),
}
client := test.NewFakeKubeClient()
g.Expect(group.UpdateImmutableObjectNames(ctx, client, dummyRetriever, noChangesCompare)).To(Succeed())
}
func TestWorkerGroupUpdateImmutableObjectNamesErrorReadingMachineDeployment(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
group := &dockerGroup{
MachineDeployment: machineDeployment(),
}
client := test.NewFakeKubeClientAlwaysError()
g.Expect(
group.UpdateImmutableObjectNames(ctx, client, dummyRetriever, noChangesCompare),
).To(MatchError(ContainSubstring("reading current machine deployment from API")))
}
func TestWorkerGroupUpdateImmutableObjectNamesErrorUpdatingMachineTemplateName(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
group := &dockerGroup{
MachineDeployment: machineDeployment(),
ProviderMachineTemplate: dockerMachineTemplate(),
KubeadmConfigTemplate: kubeadmConfigTemplate(),
}
group.MachineDeployment.Spec.Template.Spec.InfrastructureRef = *objectReference(group.ProviderMachineTemplate)
group.MachineDeployment.Spec.Template.Spec.Bootstrap.ConfigRef = objectReference(group.KubeadmConfigTemplate)
client := test.NewFakeKubeClient(group.MachineDeployment)
g.Expect(
group.UpdateImmutableObjectNames(ctx, client, errorRetriever, noChangesCompare),
).To(MatchError(ContainSubstring("reading DockerMachineTemplate eksa-system/mt-1 from API")))
}
func TestWorkerGroupUpdateImmutableObjectNamesErrorUpdatingKubeadmConfigTemplate(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
group := &dockerGroup{
MachineDeployment: machineDeployment(),
ProviderMachineTemplate: dockerMachineTemplate(),
KubeadmConfigTemplate: kubeadmConfigTemplate(),
}
group.MachineDeployment.Spec.Template.Spec.InfrastructureRef = *objectReference(group.ProviderMachineTemplate)
group.KubeadmConfigTemplate.Name = "invalid-name"
group.MachineDeployment.Spec.Template.Spec.Bootstrap.ConfigRef = objectReference(group.KubeadmConfigTemplate)
client := test.NewFakeKubeClient(group.MachineDeployment, group.KubeadmConfigTemplate, group.ProviderMachineTemplate)
group.KubeadmConfigTemplate.Spec.Template.Spec.PostKubeadmCommands = []string{"ls"}
g.Expect(
group.UpdateImmutableObjectNames(ctx, client, dummyRetriever, noChangesCompare),
).To(MatchError(ContainSubstring("incrementing name for KubeadmConfigTemplate eksa-system/invalid-name")))
}
func TestWorkerGroupUpdateImmutableObjectNamesSuccess(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
group := &dockerGroup{
MachineDeployment: machineDeployment(),
ProviderMachineTemplate: dockerMachineTemplate(),
KubeadmConfigTemplate: kubeadmConfigTemplate(),
}
group.MachineDeployment.Spec.Template.Spec.InfrastructureRef = *objectReference(group.ProviderMachineTemplate)
group.MachineDeployment.Spec.Template.Spec.Bootstrap.ConfigRef = objectReference(group.KubeadmConfigTemplate)
client := test.NewFakeKubeClient(group.MachineDeployment, group.KubeadmConfigTemplate, group.ProviderMachineTemplate)
group.KubeadmConfigTemplate.Spec.Template.Spec.PostKubeadmCommands = []string{"ls"}
g.Expect(
group.UpdateImmutableObjectNames(ctx, client, dummyRetriever, withChangesCompare),
).To(Succeed())
g.Expect(group.KubeadmConfigTemplate.Name).To(Equal("template-2"))
g.Expect(group.ProviderMachineTemplate.Name).To(Equal("mt-2"))
g.Expect(group.MachineDeployment.Spec.Template.Spec.Bootstrap.ConfigRef.Name).To(Equal(group.KubeadmConfigTemplate.Name))
g.Expect(group.MachineDeployment.Spec.Template.Spec.InfrastructureRef.Name).To(Equal(group.ProviderMachineTemplate.Name))
}
func TestGetKubeadmConfigTemplateSuccess(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
k := kubeadmConfigTemplate()
client := test.NewFakeKubeClient(k)
g.Expect(clusterapi.GetKubeadmConfigTemplate(ctx, client, k.Name, k.Namespace)).To(Equal(k))
}
func TestGetKubeadmConfigTemplateError(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
k := kubeadmConfigTemplate()
client := test.NewFakeKubeClientAlwaysError()
_, err := clusterapi.GetKubeadmConfigTemplate(ctx, client, k.Name, k.Namespace)
g.Expect(err).To(HaveOccurred())
}
func TestKubeadmConfigTemplateEqual(t *testing.T) {
tests := []struct {
name string
new, old *kubeadmv1.KubeadmConfigTemplate
want bool
}{
{
name: "equal",
new: &kubeadmv1.KubeadmConfigTemplate{
Spec: kubeadmv1.KubeadmConfigTemplateSpec{
Template: kubeadmv1.KubeadmConfigTemplateResource{
Spec: kubeadmv1.KubeadmConfigSpec{
JoinConfiguration: &kubeadmv1.JoinConfiguration{
NodeRegistration: kubeadmv1.NodeRegistrationOptions{
Taints: []corev1.Taint{
{
Key: "key",
},
},
},
},
Files: []kubeadmv1.File{
{
Owner: "me",
},
},
},
},
},
},
old: &kubeadmv1.KubeadmConfigTemplate{
Spec: kubeadmv1.KubeadmConfigTemplateSpec{
Template: kubeadmv1.KubeadmConfigTemplateResource{
Spec: kubeadmv1.KubeadmConfigSpec{
JoinConfiguration: &kubeadmv1.JoinConfiguration{
NodeRegistration: kubeadmv1.NodeRegistrationOptions{
Taints: []corev1.Taint{
{
Key: "key",
},
},
},
},
Files: []kubeadmv1.File{
{
Owner: "me",
},
},
},
},
},
},
want: true,
},
{
name: "diff taints",
new: &kubeadmv1.KubeadmConfigTemplate{
Spec: kubeadmv1.KubeadmConfigTemplateSpec{
Template: kubeadmv1.KubeadmConfigTemplateResource{
Spec: kubeadmv1.KubeadmConfigSpec{
JoinConfiguration: &kubeadmv1.JoinConfiguration{
NodeRegistration: kubeadmv1.NodeRegistrationOptions{
Taints: []corev1.Taint{},
},
},
Files: []kubeadmv1.File{
{
Owner: "me",
},
},
},
},
},
},
old: &kubeadmv1.KubeadmConfigTemplate{
Spec: kubeadmv1.KubeadmConfigTemplateSpec{
Template: kubeadmv1.KubeadmConfigTemplateResource{
Spec: kubeadmv1.KubeadmConfigSpec{
JoinConfiguration: &kubeadmv1.JoinConfiguration{
NodeRegistration: kubeadmv1.NodeRegistrationOptions{
Taints: []corev1.Taint{
{
Key: "key",
},
},
},
},
Files: []kubeadmv1.File{
{
Owner: "me",
},
},
},
},
},
},
want: false,
},
{
name: "diff labels",
new: &kubeadmv1.KubeadmConfigTemplate{
Spec: kubeadmv1.KubeadmConfigTemplateSpec{
Template: kubeadmv1.KubeadmConfigTemplateResource{
Spec: kubeadmv1.KubeadmConfigSpec{
JoinConfiguration: &kubeadmv1.JoinConfiguration{
NodeRegistration: kubeadmv1.NodeRegistrationOptions{
KubeletExtraArgs: map[string]string{
"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"cgroup-driver": "cgroupfs",
"eviction-hard": "nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%",
},
},
},
Files: []kubeadmv1.File{
{
Owner: "me",
},
},
},
},
},
},
old: &kubeadmv1.KubeadmConfigTemplate{
Spec: kubeadmv1.KubeadmConfigTemplateSpec{
Template: kubeadmv1.KubeadmConfigTemplateResource{
Spec: kubeadmv1.KubeadmConfigSpec{
JoinConfiguration: &kubeadmv1.JoinConfiguration{
NodeRegistration: kubeadmv1.NodeRegistrationOptions{
KubeletExtraArgs: map[string]string{
"tls-cipher-suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"cgroup-driver": "cgroupfs",
"eviction-hard": "nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%",
"node-labels": "foo-bar",
},
},
},
Files: []kubeadmv1.File{
{
Owner: "me",
},
},
},
},
},
},
want: false,
},
{
name: "new JoinConfiguration nil",
new: &kubeadmv1.KubeadmConfigTemplate{
Spec: kubeadmv1.KubeadmConfigTemplateSpec{
Template: kubeadmv1.KubeadmConfigTemplateResource{
Spec: kubeadmv1.KubeadmConfigSpec{
Files: []kubeadmv1.File{
{
Owner: "me",
},
},
},
},
},
},
old: &kubeadmv1.KubeadmConfigTemplate{
Spec: kubeadmv1.KubeadmConfigTemplateSpec{
Template: kubeadmv1.KubeadmConfigTemplateResource{
Spec: kubeadmv1.KubeadmConfigSpec{
JoinConfiguration: &kubeadmv1.JoinConfiguration{
NodeRegistration: kubeadmv1.NodeRegistrationOptions{
Taints: []corev1.Taint{
{
Key: "key",
},
},
},
},
Files: []kubeadmv1.File{
{
Owner: "me",
},
},
},
},
},
},
want: true,
},
{
name: "old JoinConfiguration nil",
new: &kubeadmv1.KubeadmConfigTemplate{
Spec: kubeadmv1.KubeadmConfigTemplateSpec{
Template: kubeadmv1.KubeadmConfigTemplateResource{
Spec: kubeadmv1.KubeadmConfigSpec{
JoinConfiguration: &kubeadmv1.JoinConfiguration{
NodeRegistration: kubeadmv1.NodeRegistrationOptions{
Taints: []corev1.Taint{
{
Key: "key",
},
},
},
},
Files: []kubeadmv1.File{
{
Owner: "me",
},
},
},
},
},
},
old: &kubeadmv1.KubeadmConfigTemplate{
Spec: kubeadmv1.KubeadmConfigTemplateSpec{
Template: kubeadmv1.KubeadmConfigTemplateResource{
Spec: kubeadmv1.KubeadmConfigSpec{
Files: []kubeadmv1.File{
{
Owner: "me",
},
},
},
},
},
},
want: false,
},
{
name: "diff spec",
new: &kubeadmv1.KubeadmConfigTemplate{
Spec: kubeadmv1.KubeadmConfigTemplateSpec{
Template: kubeadmv1.KubeadmConfigTemplateResource{
Spec: kubeadmv1.KubeadmConfigSpec{
JoinConfiguration: &kubeadmv1.JoinConfiguration{
NodeRegistration: kubeadmv1.NodeRegistrationOptions{
Taints: []corev1.Taint{
{
Key: "key",
},
},
},
},
Files: []kubeadmv1.File{
{
Owner: "me",
},
},
},
},
},
},
old: &kubeadmv1.KubeadmConfigTemplate{
Spec: kubeadmv1.KubeadmConfigTemplateSpec{
Template: kubeadmv1.KubeadmConfigTemplateResource{
Spec: kubeadmv1.KubeadmConfigSpec{
JoinConfiguration: &kubeadmv1.JoinConfiguration{
NodeRegistration: kubeadmv1.NodeRegistrationOptions{
Taints: []corev1.Taint{
{
Key: "key",
},
},
},
},
Files: []kubeadmv1.File{
{
Owner: "you",
},
},
},
},
},
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(clusterapi.KubeadmConfigTemplateEqual(tt.new, tt.old)).To(Equal(tt.want))
})
}
}
func TestWorkerGroupDeepCopy(t *testing.T) {
g := NewWithT(t)
group := &dockerGroup{
MachineDeployment: machineDeployment(),
KubeadmConfigTemplate: kubeadmConfigTemplate(),
ProviderMachineTemplate: dockerMachineTemplate(),
}
g.Expect(group.DeepCopy()).To(Equal(group))
}
func kubeadmConfigTemplate() *kubeadmv1.KubeadmConfigTemplate {
return &kubeadmv1.KubeadmConfigTemplate{
TypeMeta: metav1.TypeMeta{
Kind: "KubeadmConfigTemplate",
APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "template-1",
Namespace: constants.EksaSystemNamespace,
},
Spec: kubeadmv1.KubeadmConfigTemplateSpec{
Template: kubeadmv1.KubeadmConfigTemplateResource{
Spec: kubeadmv1.KubeadmConfigSpec{
Files: []kubeadmv1.File{
{
Owner: "me",
},
},
},
},
},
}
}
func machineDeployment() *clusterv1.MachineDeployment {
return &clusterv1.MachineDeployment{
TypeMeta: metav1.TypeMeta{
Kind: "MachineDeployment",
APIVersion: "cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "deployment",
Namespace: constants.EksaSystemNamespace,
},
}
}
func objectReference(obj client.Object) *corev1.ObjectReference {
return &corev1.ObjectReference{
Kind: obj.GetObjectKind().GroupVersionKind().Kind,
APIVersion: obj.GetObjectKind().GroupVersionKind().Version,
Name: obj.GetName(),
Namespace: obj.GetNamespace(),
}
}
| 531 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/clusterapi/manager.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
cluster "github.com/aws/eks-anywhere/pkg/cluster"
clusterapi "github.com/aws/eks-anywhere/pkg/clusterapi"
providers "github.com/aws/eks-anywhere/pkg/providers"
types "github.com/aws/eks-anywhere/pkg/types"
gomock "github.com/golang/mock/gomock"
)
// MockCAPIClient is a mock of CAPIClient interface.
type MockCAPIClient struct {
ctrl *gomock.Controller
recorder *MockCAPIClientMockRecorder
}
// MockCAPIClientMockRecorder is the mock recorder for MockCAPIClient.
type MockCAPIClientMockRecorder struct {
mock *MockCAPIClient
}
// NewMockCAPIClient creates a new mock instance.
func NewMockCAPIClient(ctrl *gomock.Controller) *MockCAPIClient {
mock := &MockCAPIClient{ctrl: ctrl}
mock.recorder = &MockCAPIClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockCAPIClient) EXPECT() *MockCAPIClientMockRecorder {
return m.recorder
}
// InstallEtcdadmProviders mocks base method.
func (m *MockCAPIClient) InstallEtcdadmProviders(ctx context.Context, clusterSpec *cluster.Spec, cluster *types.Cluster, provider providers.Provider, installProviders []string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InstallEtcdadmProviders", ctx, clusterSpec, cluster, provider, installProviders)
ret0, _ := ret[0].(error)
return ret0
}
// InstallEtcdadmProviders indicates an expected call of InstallEtcdadmProviders.
func (mr *MockCAPIClientMockRecorder) InstallEtcdadmProviders(ctx, clusterSpec, cluster, provider, installProviders interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallEtcdadmProviders", reflect.TypeOf((*MockCAPIClient)(nil).InstallEtcdadmProviders), ctx, clusterSpec, cluster, provider, installProviders)
}
// Upgrade mocks base method.
func (m *MockCAPIClient) Upgrade(ctx context.Context, managementCluster *types.Cluster, provider providers.Provider, newSpec *cluster.Spec, changeDiff *clusterapi.CAPIChangeDiff) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Upgrade", ctx, managementCluster, provider, newSpec, changeDiff)
ret0, _ := ret[0].(error)
return ret0
}
// Upgrade indicates an expected call of Upgrade.
func (mr *MockCAPIClientMockRecorder) Upgrade(ctx, managementCluster, provider, newSpec, changeDiff interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Upgrade", reflect.TypeOf((*MockCAPIClient)(nil).Upgrade), ctx, managementCluster, provider, newSpec, changeDiff)
}
// MockKubectlClient is a mock of KubectlClient interface.
type MockKubectlClient struct {
ctrl *gomock.Controller
recorder *MockKubectlClientMockRecorder
}
// MockKubectlClientMockRecorder is the mock recorder for MockKubectlClient.
type MockKubectlClientMockRecorder struct {
mock *MockKubectlClient
}
// NewMockKubectlClient creates a new mock instance.
func NewMockKubectlClient(ctrl *gomock.Controller) *MockKubectlClient {
mock := &MockKubectlClient{ctrl: ctrl}
mock.recorder = &MockKubectlClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockKubectlClient) EXPECT() *MockKubectlClientMockRecorder {
return m.recorder
}
// CheckProviderExists mocks base method.
func (m *MockKubectlClient) CheckProviderExists(ctx context.Context, kubeconfigFile, name, namespace string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CheckProviderExists", ctx, kubeconfigFile, name, namespace)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CheckProviderExists indicates an expected call of CheckProviderExists.
func (mr *MockKubectlClientMockRecorder) CheckProviderExists(ctx, kubeconfigFile, name, namespace interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckProviderExists", reflect.TypeOf((*MockKubectlClient)(nil).CheckProviderExists), ctx, kubeconfigFile, name, namespace)
}
| 106 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/clusterapi/resourceset_manager.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
types "github.com/aws/eks-anywhere/pkg/types"
gomock "github.com/golang/mock/gomock"
v1 "k8s.io/api/core/v1"
v1beta1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1"
)
// MockClient is a mock of Client interface.
type MockClient struct {
ctrl *gomock.Controller
recorder *MockClientMockRecorder
}
// MockClientMockRecorder is the mock recorder for MockClient.
type MockClientMockRecorder struct {
mock *MockClient
}
// NewMockClient creates a new mock instance.
func NewMockClient(ctrl *gomock.Controller) *MockClient {
mock := &MockClient{ctrl: ctrl}
mock.recorder = &MockClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockClient) EXPECT() *MockClientMockRecorder {
return m.recorder
}
// ApplyKubeSpecFromBytes mocks base method.
func (m *MockClient) ApplyKubeSpecFromBytes(ctx context.Context, cluster *types.Cluster, data []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ApplyKubeSpecFromBytes", ctx, cluster, data)
ret0, _ := ret[0].(error)
return ret0
}
// ApplyKubeSpecFromBytes indicates an expected call of ApplyKubeSpecFromBytes.
func (mr *MockClientMockRecorder) ApplyKubeSpecFromBytes(ctx, cluster, data interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyKubeSpecFromBytes", reflect.TypeOf((*MockClient)(nil).ApplyKubeSpecFromBytes), ctx, cluster, data)
}
// GetClusterResourceSet mocks base method.
func (m *MockClient) GetClusterResourceSet(ctx context.Context, kubeconfigFile, name, namespace string) (*v1beta1.ClusterResourceSet, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetClusterResourceSet", ctx, kubeconfigFile, name, namespace)
ret0, _ := ret[0].(*v1beta1.ClusterResourceSet)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetClusterResourceSet indicates an expected call of GetClusterResourceSet.
func (mr *MockClientMockRecorder) GetClusterResourceSet(ctx, kubeconfigFile, name, namespace interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterResourceSet", reflect.TypeOf((*MockClient)(nil).GetClusterResourceSet), ctx, kubeconfigFile, name, namespace)
}
// GetConfigMap mocks base method.
func (m *MockClient) GetConfigMap(ctx context.Context, kubeconfigFile, name, namespace string) (*v1.ConfigMap, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetConfigMap", ctx, kubeconfigFile, name, namespace)
ret0, _ := ret[0].(*v1.ConfigMap)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetConfigMap indicates an expected call of GetConfigMap.
func (mr *MockClientMockRecorder) GetConfigMap(ctx, kubeconfigFile, name, namespace interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConfigMap", reflect.TypeOf((*MockClient)(nil).GetConfigMap), ctx, kubeconfigFile, name, namespace)
}
// GetSecretFromNamespace mocks base method.
func (m *MockClient) GetSecretFromNamespace(ctx context.Context, kubeconfigFile, name, namespace string) (*v1.Secret, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetSecretFromNamespace", ctx, kubeconfigFile, name, namespace)
ret0, _ := ret[0].(*v1.Secret)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetSecretFromNamespace indicates an expected call of GetSecretFromNamespace.
func (mr *MockClientMockRecorder) GetSecretFromNamespace(ctx, kubeconfigFile, name, namespace interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSecretFromNamespace", reflect.TypeOf((*MockClient)(nil).GetSecretFromNamespace), ctx, kubeconfigFile, name, namespace)
}
| 98 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/clusterapi/fetch.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
kubernetes "github.com/aws/eks-anywhere/pkg/clients/kubernetes"
gomock "github.com/golang/mock/gomock"
)
// MockKubeClient is a mock of KubeClient interface.
type MockKubeClient struct {
ctrl *gomock.Controller
recorder *MockKubeClientMockRecorder
}
// MockKubeClientMockRecorder is the mock recorder for MockKubeClient.
type MockKubeClientMockRecorder struct {
mock *MockKubeClient
}
// NewMockKubeClient creates a new mock instance.
func NewMockKubeClient(ctrl *gomock.Controller) *MockKubeClient {
mock := &MockKubeClient{ctrl: ctrl}
mock.recorder = &MockKubeClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockKubeClient) EXPECT() *MockKubeClientMockRecorder {
return m.recorder
}
// Get mocks base method.
func (m *MockKubeClient) Get(ctx context.Context, name, namespace string, obj kubernetes.Object) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Get", ctx, name, namespace, obj)
ret0, _ := ret[0].(error)
return ret0
}
// Get indicates an expected call of Get.
func (mr *MockKubeClientMockRecorder) Get(ctx, name, namespace, obj interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockKubeClient)(nil).Get), ctx, name, namespace, obj)
}
| 51 |
eks-anywhere | aws | Go | package yaml
import (
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
"github.com/go-logr/logr"
"github.com/pkg/errors"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/yamlutil"
)
// NewControlPlaneParserAndBuilder builds a Parser and a Builder for a particular provider ControlPlane
// It registers the basic shared mappings plus another two for the provider cluster and machine template
// For ControlPlane that need to include more objects, wrap around the provider builder and implement BuildFromParsed
// Any extra mappings will need to be registered manually in the Parser.
func NewControlPlaneParserAndBuilder[C clusterapi.Object[C], M clusterapi.Object[M]](logger logr.Logger, clusterMapping yamlutil.Mapping[C], machineTemplateMapping yamlutil.Mapping[M]) (*yamlutil.Parser, *ControlPlaneBuilder[C, M], error) {
parser := yamlutil.NewParser(logger)
if err := RegisterControlPlaneMappings(parser); err != nil {
return nil, nil, errors.Wrap(err, "building capi control plane parser")
}
err := parser.RegisterMappings(
clusterMapping.ToAPIObjectMapping(),
machineTemplateMapping.ToAPIObjectMapping(),
)
if err != nil {
return nil, nil, errors.Wrap(err, "registering provider control plane mappings")
}
return parser, NewControlPlaneBuilder[C, M](), nil
}
// RegisterControlPlaneMappings records the basic mappings for CAPI cluster, kubeadmcontrolplane
// and etcdadm cluster in a Parser.
func RegisterControlPlaneMappings(parser *yamlutil.Parser) error {
err := parser.RegisterMappings(
yamlutil.NewMapping(
"Cluster", func() yamlutil.APIObject {
return &clusterv1.Cluster{}
},
),
yamlutil.NewMapping(
"KubeadmControlPlane", func() yamlutil.APIObject {
return &controlplanev1.KubeadmControlPlane{}
},
),
yamlutil.NewMapping(
"EtcdadmCluster", func() yamlutil.APIObject {
return &etcdv1.EtcdadmCluster{}
},
),
)
if err != nil {
return errors.Wrap(err, "registering base control plane mappings")
}
return nil
}
// ControlPlaneBuilder implements yamlutil.Builder
// It's a wrapper around ControlPlane to provide yaml parsing functionality.
type ControlPlaneBuilder[C clusterapi.Object[C], M clusterapi.Object[M]] struct {
ControlPlane *clusterapi.ControlPlane[C, M]
}
// NewControlPlaneBuilder builds a ControlPlaneBuilder.
func NewControlPlaneBuilder[C clusterapi.Object[C], M clusterapi.Object[M]]() *ControlPlaneBuilder[C, M] {
return &ControlPlaneBuilder[C, M]{
ControlPlane: new(clusterapi.ControlPlane[C, M]),
}
}
// BuildFromParsed reads parsed objects in ObjectLookup and sets them in the ControlPlane.
func (cp *ControlPlaneBuilder[C, M]) BuildFromParsed(lookup yamlutil.ObjectLookup) error {
ProcessControlPlaneObjects(cp.ControlPlane, lookup)
return nil
}
// ProcessControlPlaneObjects finds all necessary objects in the parsed objects and sets them in the ControlPlane.
func ProcessControlPlaneObjects[C clusterapi.Object[C], M clusterapi.Object[M]](cp *clusterapi.ControlPlane[C, M], lookup yamlutil.ObjectLookup) {
ProcessCluster(cp, lookup)
if cp.Cluster == nil {
return
}
ProcessProviderCluster(cp, lookup)
ProcessKubeadmControlPlane(cp, lookup)
ProcessEtcdCluster(cp, lookup)
}
// ProcessCluster finds the CAPI cluster in the parsed objects and sets it in ControlPlane.
func ProcessCluster[C clusterapi.Object[C], M clusterapi.Object[M]](cp *clusterapi.ControlPlane[C, M], lookup yamlutil.ObjectLookup) {
for _, obj := range lookup {
if obj.GetObjectKind().GroupVersionKind().Kind == "Cluster" {
cp.Cluster = obj.(*clusterv1.Cluster)
return
}
}
}
// ProcessProviderCluster finds the provider cluster in the parsed objects and sets it in ControlPlane.
func ProcessProviderCluster[C clusterapi.Object[C], M clusterapi.Object[M]](cp *clusterapi.ControlPlane[C, M], lookup yamlutil.ObjectLookup) {
providerCluster := lookup.GetFromRef(*cp.Cluster.Spec.InfrastructureRef)
if providerCluster == nil {
return
}
cp.ProviderCluster = providerCluster.(C)
}
// ProcessKubeadmControlPlane finds the CAPI kubeadm control plane and the kubeadm control plane machine template
// in the parsed objects and sets it in ControlPlane.
func ProcessKubeadmControlPlane[C clusterapi.Object[C], M clusterapi.Object[M]](cp *clusterapi.ControlPlane[C, M], lookup yamlutil.ObjectLookup) {
kcp := lookup.GetFromRef(*cp.Cluster.Spec.ControlPlaneRef)
if kcp == nil {
return
}
cp.KubeadmControlPlane = kcp.(*controlplanev1.KubeadmControlPlane)
machineTemplate := lookup.GetFromRef(cp.KubeadmControlPlane.Spec.MachineTemplate.InfrastructureRef)
if machineTemplate == nil {
return
}
cp.ControlPlaneMachineTemplate = machineTemplate.(M)
}
// ProcessEtcdCluster finds the CAPI etcdadm cluster (for unstacked clusters) in the parsed objects and sets it in ControlPlane.
func ProcessEtcdCluster[C clusterapi.Object[C], M clusterapi.Object[M]](cp *clusterapi.ControlPlane[C, M], lookup yamlutil.ObjectLookup) {
if cp.Cluster.Spec.ManagedExternalEtcdRef == nil {
return
}
etcdCluster := lookup.GetFromRef(*cp.Cluster.Spec.ManagedExternalEtcdRef)
if etcdCluster == nil {
return
}
cp.EtcdCluster = etcdCluster.(*etcdv1.EtcdadmCluster)
etcdMachineTemplate := lookup.GetFromRef(cp.EtcdCluster.Spec.InfrastructureTemplate)
if etcdMachineTemplate == nil {
return
}
cp.EtcdMachineTemplate = etcdMachineTemplate.(M)
}
| 151 |
eks-anywhere | aws | Go | package yaml_test
import (
"testing"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
dockerv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/clusterapi/yaml"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/yamlutil"
)
type dockerControlPlane = clusterapi.ControlPlane[*dockerv1.DockerCluster, *dockerv1.DockerMachineTemplate]
func TestNewControlPlaneParserAndBuilderSuccessParsing(t *testing.T) {
g := NewWithT(t)
parser, builder, err := yaml.NewControlPlaneParserAndBuilder(
test.NewNullLogger(),
yamlutil.NewMapping(
"DockerCluster",
func() *dockerv1.DockerCluster {
return &dockerv1.DockerCluster{}
},
),
yamlutil.NewMapping(
"DockerMachineTemplate",
func() *dockerv1.DockerMachineTemplate {
return &dockerv1.DockerMachineTemplate{}
},
),
)
g.Expect(err).To(Succeed())
yaml := []byte(`apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: cluster
namespace: eksa-system
spec:
controlPlaneRef:
apiVersion: controlplane.clusterapi.k8s/v1beta1
kind: KubeadmControlPlane
name: cp
namespace: eksa-system
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerCluster
name: cluster
namespace: eksa-system
`)
g.Expect(parser.Parse(yaml, builder)).To(Succeed())
g.Expect(builder.ControlPlane.Cluster).To(Equal(capiCluster()))
}
func TestNewControlPlaneParserAndBuilderErrorFromMappings(t *testing.T) {
g := NewWithT(t)
_, _, err := yaml.NewControlPlaneParserAndBuilder(
test.NewNullLogger(),
yamlutil.NewMapping(
"Cluster",
func() *dockerv1.DockerCluster {
return &dockerv1.DockerCluster{}
},
),
yamlutil.NewMapping(
"DockerMachineTemplate",
func() *dockerv1.DockerMachineTemplate {
return &dockerv1.DockerMachineTemplate{}
},
),
)
g.Expect(err).To(MatchError(ContainSubstring("registering provider control plane mappings")))
}
func TestRegisterControlPlaneMappingsError(t *testing.T) {
g := NewWithT(t)
parser := yamlutil.NewParser(test.NewNullLogger())
g.Expect(parser.RegisterMapping("Cluster", func() yamlutil.APIObject { return nil })).To(Succeed())
g.Expect(yaml.RegisterControlPlaneMappings(parser)).To(MatchError(ContainSubstring("registering base control plane mappings")))
}
func TestRegisterControlPlaneSuccess(t *testing.T) {
g := NewWithT(t)
parser := yamlutil.NewParser(test.NewNullLogger())
g.Expect(yaml.RegisterControlPlaneMappings(parser)).To(Succeed())
}
func TestProcessControlPlaneObjectsNoCluster(t *testing.T) {
g := NewWithT(t)
cp := &dockerControlPlane{}
lookup := yamlutil.NewObjectLookupBuilder().Add(dockerCluster()).Build()
yaml.ProcessControlPlaneObjects(cp, lookup)
g.Expect(cp.Cluster).To(BeNil())
}
func TestProcessControlPlaneObjects(t *testing.T) {
g := NewWithT(t)
cp := &dockerControlPlane{}
cluster := capiCluster()
providerCluster := dockerCluster()
kubeadmCP := kubeadmControlPlane()
cpMachineTemplate := dockerMachineTemplate("cp-mt")
etcdCluster := etcdCluster()
cluster.Spec.ManagedExternalEtcdRef = objectReference(etcdCluster)
etcdMachineTemplate := dockerMachineTemplate("etcd-mt")
etcdCluster.Spec.InfrastructureTemplate = *objectReference(etcdMachineTemplate)
lookup := yamlutil.NewObjectLookupBuilder().Add(
cluster,
providerCluster,
kubeadmCP,
cpMachineTemplate,
etcdCluster,
etcdMachineTemplate,
).Build()
yaml.ProcessControlPlaneObjects(cp, lookup)
g.Expect(cp.Cluster).To(Equal(cluster))
g.Expect(cp.ProviderCluster).To(Equal(providerCluster))
g.Expect(cp.KubeadmControlPlane).To(Equal(kubeadmCP))
g.Expect(cp.ControlPlaneMachineTemplate).To(Equal(cpMachineTemplate))
g.Expect(cp.EtcdCluster).To(Equal(etcdCluster))
g.Expect(cp.EtcdMachineTemplate).To(Equal(etcdMachineTemplate))
}
func TestProcessClusterNoCluster(t *testing.T) {
g := NewWithT(t)
cp := &dockerControlPlane{}
lookup := yamlutil.NewObjectLookupBuilder().Add(dockerCluster()).Build()
yaml.ProcessCluster(cp, lookup)
g.Expect(cp.Cluster).To(BeNil())
}
func TestProcessClusterWithCluster(t *testing.T) {
g := NewWithT(t)
cp := &dockerControlPlane{}
cluster := capiCluster()
lookup := yamlutil.NewObjectLookupBuilder().Add(cluster).Build()
yaml.ProcessCluster(cp, lookup)
g.Expect(cp.Cluster).To(Equal(cluster))
}
func TestProcessProviderClusterWithNoCluster(t *testing.T) {
g := NewWithT(t)
cp := &dockerControlPlane{}
cluster := capiCluster()
cp.Cluster = cluster
lookup := yamlutil.NewObjectLookupBuilder().Add(cluster).Build()
yaml.ProcessProviderCluster(cp, lookup)
g.Expect(cp.ProviderCluster).To(BeNil())
}
func TestProcessProviderClusterWithCluster(t *testing.T) {
g := NewWithT(t)
cp := &dockerControlPlane{}
cluster := capiCluster()
cp.Cluster = cluster
providerCluster := dockerCluster()
lookup := yamlutil.NewObjectLookupBuilder().Add(providerCluster).Build()
yaml.ProcessProviderCluster(cp, lookup)
g.Expect(cp.ProviderCluster).To(Equal(providerCluster))
}
func TestProcessKubeadmControlPlaneNoControlPlane(t *testing.T) {
g := NewWithT(t)
cp := &dockerControlPlane{}
cp.Cluster = capiCluster()
lookup := yamlutil.NewObjectLookupBuilder().Add(dockerCluster()).Build()
yaml.ProcessKubeadmControlPlane(cp, lookup)
g.Expect(cp.KubeadmControlPlane).To(BeNil())
}
func TestProcessKubeadmControlPlaneNoMachineTemplate(t *testing.T) {
g := NewWithT(t)
cp := &dockerControlPlane{}
cp.Cluster = capiCluster()
kubeadmControlPlane := kubeadmControlPlane()
lookup := yamlutil.NewObjectLookupBuilder().Add(
dockerCluster(),
kubeadmControlPlane,
).Build()
yaml.ProcessKubeadmControlPlane(cp, lookup)
g.Expect(cp.KubeadmControlPlane).To(Equal(kubeadmControlPlane))
g.Expect(cp.ControlPlaneMachineTemplate).To(BeNil())
}
func TestProcessKubeadmControlPlaneWithControlPlaneAndMachineTemplate(t *testing.T) {
g := NewWithT(t)
cp := &dockerControlPlane{}
cp.Cluster = capiCluster()
kubeadmControlPlane := kubeadmControlPlane()
mt := dockerMachineTemplate(kubeadmControlPlane.Spec.MachineTemplate.InfrastructureRef.Name)
lookup := yamlutil.NewObjectLookupBuilder().Add(
dockerCluster(),
kubeadmControlPlane,
mt,
).Build()
yaml.ProcessKubeadmControlPlane(cp, lookup)
g.Expect(cp.KubeadmControlPlane).To(Equal(kubeadmControlPlane))
g.Expect(cp.ControlPlaneMachineTemplate).To(Equal(mt))
}
func TestProcessEtcdClusterStackedEtcd(t *testing.T) {
g := NewWithT(t)
cp := &dockerControlPlane{}
cp.Cluster = capiCluster()
lookup := yamlutil.NewObjectLookupBuilder().Add(dockerCluster()).Build()
yaml.ProcessEtcdCluster(cp, lookup)
g.Expect(cp.EtcdCluster).To(BeNil())
}
func TestProcessEtcdClusterNoCluster(t *testing.T) {
g := NewWithT(t)
cp := &dockerControlPlane{}
cp.Cluster = capiCluster()
cp.Cluster.Spec.ManagedExternalEtcdRef = objectReference(etcdCluster())
lookup := yamlutil.NewObjectLookupBuilder().Add(dockerCluster()).Build()
yaml.ProcessEtcdCluster(cp, lookup)
g.Expect(cp.EtcdCluster).To(BeNil())
}
func TestProcessEtcdClusterNoMachineTemplate(t *testing.T) {
g := NewWithT(t)
cp := &dockerControlPlane{}
cp.Cluster = capiCluster()
etcdCluster := etcdCluster()
cp.Cluster.Spec.ManagedExternalEtcdRef = objectReference(etcdCluster)
lookup := yamlutil.NewObjectLookupBuilder().Add(
dockerCluster(),
etcdCluster,
).Build()
yaml.ProcessEtcdCluster(cp, lookup)
g.Expect(cp.EtcdCluster).To(Equal(etcdCluster))
g.Expect(cp.EtcdMachineTemplate).To(BeNil())
}
func TestProcessEtcdClusterWithClusterAndMachineTemplate(t *testing.T) {
g := NewWithT(t)
cp := &dockerControlPlane{}
cp.Cluster = capiCluster()
etcdCluster := etcdCluster()
mt := dockerMachineTemplate("etcd-mt")
etcdCluster.Spec.InfrastructureTemplate = *objectReference(mt)
cp.Cluster.Spec.ManagedExternalEtcdRef = objectReference(etcdCluster)
lookup := yamlutil.NewObjectLookupBuilder().Add(
dockerCluster(),
etcdCluster,
mt,
).Build()
yaml.ProcessEtcdCluster(cp, lookup)
g.Expect(cp.EtcdCluster).To(Equal(etcdCluster))
g.Expect(cp.EtcdMachineTemplate).To(Equal(mt))
}
func capiCluster() *clusterv1.Cluster {
return &clusterv1.Cluster{
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
APIVersion: "cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "cluster",
Namespace: constants.EksaSystemNamespace,
},
Spec: clusterv1.ClusterSpec{
InfrastructureRef: &corev1.ObjectReference{
Name: "cluster",
Namespace: constants.EksaSystemNamespace,
Kind: "DockerCluster",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
ControlPlaneRef: &corev1.ObjectReference{
Name: "cp",
Namespace: constants.EksaSystemNamespace,
Kind: "KubeadmControlPlane",
APIVersion: "controlplane.clusterapi.k8s/v1beta1",
},
},
}
}
func kubeadmControlPlane() *controlplanev1.KubeadmControlPlane {
return &controlplanev1.KubeadmControlPlane{
TypeMeta: metav1.TypeMeta{
Kind: "KubeadmControlPlane",
APIVersion: "controlplane.clusterapi.k8s/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "cp",
Namespace: constants.EksaSystemNamespace,
},
Spec: controlplanev1.KubeadmControlPlaneSpec{
MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
InfrastructureRef: corev1.ObjectReference{
Name: "cp-mt",
Namespace: constants.EksaSystemNamespace,
Kind: "DockerMachineTemplate",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
},
},
}
}
func dockerCluster() *dockerv1.DockerCluster {
return &dockerv1.DockerCluster{
TypeMeta: metav1.TypeMeta{
Kind: "DockerCluster",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "cluster",
Namespace: constants.EksaSystemNamespace,
},
}
}
func dockerMachineTemplate(name string) *dockerv1.DockerMachineTemplate {
return &dockerv1.DockerMachineTemplate{
TypeMeta: metav1.TypeMeta{
Kind: "DockerMachineTemplate",
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: constants.EksaSystemNamespace,
},
}
}
func etcdCluster() *etcdv1.EtcdadmCluster {
return &etcdv1.EtcdadmCluster{
TypeMeta: metav1.TypeMeta{
Kind: "EtcdCluster",
APIVersion: "etcd.clusterapi.k8s",
},
ObjectMeta: metav1.ObjectMeta{
Name: "etcd",
Namespace: constants.EksaSystemNamespace,
},
}
}
func objectReference(obj client.Object) *corev1.ObjectReference {
return &corev1.ObjectReference{
Kind: obj.GetObjectKind().GroupVersionKind().Kind,
APIVersion: obj.GetObjectKind().GroupVersionKind().GroupVersion().String(),
Name: obj.GetName(),
Namespace: obj.GetNamespace(),
}
}
| 375 |
eks-anywhere | aws | Go | package yaml
import (
"github.com/go-logr/logr"
"github.com/pkg/errors"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
kubeadmv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/yamlutil"
)
const machineDeploymentKind = "MachineDeployment"
// WorkersBuilder implements yamlutil.Builder
// It's a wrapper around Workers to provide yaml parsing functionality.
type WorkersBuilder[M clusterapi.Object[M]] struct {
Workers *clusterapi.Workers[M]
}
// NewWorkersBuilder builds a WorkersBuilder.
func NewWorkersBuilder[M clusterapi.Object[M]]() *WorkersBuilder[M] {
return &WorkersBuilder[M]{
Workers: new(clusterapi.Workers[M]),
}
}
// BuildFromParsed reads parsed objects in ObjectLookup and sets them in the Workers.
func (cp *WorkersBuilder[M]) BuildFromParsed(lookup yamlutil.ObjectLookup) error {
ProcessWorkerObjects(cp.Workers, lookup)
return nil
}
// NewWorkersParserAndBuilder builds a Parser and a Builder for a particular provider Workers
// It registers the basic shared mappings plus another one for the provider machine template
// For worker specs that need to include more objects, wrap around the provider builder and
// implement BuildFromParsed.
// Any extra mappings will need to be registered manually in the Parser.
func NewWorkersParserAndBuilder[M clusterapi.Object[M]](
logger logr.Logger,
machineTemplateMapping yamlutil.Mapping[M],
) (*yamlutil.Parser, *WorkersBuilder[M], error) {
parser := yamlutil.NewParser(logger)
if err := RegisterWorkerMappings(parser); err != nil {
return nil, nil, errors.Wrap(err, "building capi worker parser")
}
err := parser.RegisterMappings(
machineTemplateMapping.ToAPIObjectMapping(),
)
if err != nil {
return nil, nil, errors.Wrap(err, "registering provider worker mappings")
}
return parser, NewWorkersBuilder[M](), nil
}
// RegisterWorkerMappings records the basic mappings for CAPI MachineDeployment
// and KubeadmConfigTemplate in a Parser.
func RegisterWorkerMappings(parser *yamlutil.Parser) error {
err := parser.RegisterMappings(
yamlutil.NewMapping(
machineDeploymentKind, func() yamlutil.APIObject {
return &clusterv1.MachineDeployment{}
},
),
yamlutil.NewMapping(
"KubeadmConfigTemplate", func() yamlutil.APIObject {
return &kubeadmv1.KubeadmConfigTemplate{}
},
),
)
if err != nil {
return errors.Wrap(err, "registering base worker mappings")
}
return nil
}
// ProcessWorkerObjects finds all necessary objects in the parsed objects and sets them in Workers.
func ProcessWorkerObjects[M clusterapi.Object[M]](w *clusterapi.Workers[M], lookup yamlutil.ObjectLookup) {
for _, obj := range lookup {
if obj.GetObjectKind().GroupVersionKind().Kind == machineDeploymentKind {
g := new(clusterapi.WorkerGroup[M])
g.MachineDeployment = obj.(*clusterv1.MachineDeployment)
ProcessWorkerGroupObjects(g, lookup)
w.Groups = append(w.Groups, *g)
}
}
}
// ProcessWorkerGroupObjects looks in the parsed objects for the KubeadmConfigTemplate and
// the provider machine template referenced in the MachineDeployment and sets them in the WorkerGroup.
// MachineDeployment needs to be already set in the WorkerGroup.
func ProcessWorkerGroupObjects[M clusterapi.Object[M]](g *clusterapi.WorkerGroup[M], lookup yamlutil.ObjectLookup) {
kubeadmConfigTemplate := lookup.GetFromRef(*g.MachineDeployment.Spec.Template.Spec.Bootstrap.ConfigRef)
if kubeadmConfigTemplate != nil {
g.KubeadmConfigTemplate = kubeadmConfigTemplate.(*kubeadmv1.KubeadmConfigTemplate)
}
machineTemplate := lookup.GetFromRef(g.MachineDeployment.Spec.Template.Spec.InfrastructureRef)
if machineTemplate != nil {
g.ProviderMachineTemplate = machineTemplate.(M)
}
}
| 106 |
eks-anywhere | aws | Go | package yaml_test
import (
"testing"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
kubeadmv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
dockerv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/clusterapi/yaml"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/yamlutil"
)
type (
dockerWorkers = clusterapi.Workers[*dockerv1.DockerMachineTemplate]
dockerWorkerGroup = clusterapi.WorkerGroup[*dockerv1.DockerMachineTemplate]
)
func TestNewWorkersParserAndBuilderSuccessParsing(t *testing.T) {
g := NewWithT(t)
parser, builder, err := yaml.NewWorkersParserAndBuilder(
test.NewNullLogger(),
yamlutil.NewMapping(
"DockerMachineTemplate",
func() *dockerv1.DockerMachineTemplate {
return &dockerv1.DockerMachineTemplate{}
},
),
)
g.Expect(err).To(BeNil())
yaml := []byte(`apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
name: workers-1
namespace: eksa-system
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
name: workers-1
namespace: eksa-system
spec:
template:
spec:
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
name: workers-1
namespace: eksa-system
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerMachineTemplate
name: workers-1
namespace: eksa-system
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerMachineTemplate
metadata:
name: workers-1
namespace: eksa-system
---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
name: workers-2
namespace: eksa-system
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
name: workers-2
namespace: eksa-system
spec:
template:
spec:
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
name: workers-2
namespace: eksa-system
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerMachineTemplate
name: workers-2
namespace: eksa-system
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerMachineTemplate
metadata:
name: workers-2
namespace: eksa-system
`)
g.Expect(parser.Parse(yaml, builder)).To(Succeed())
g.Expect(builder.Workers.Groups).To(
ConsistOf(*group("workers-1"), *group("workers-2")),
)
}
func TestNewWorkersParserAndBuilderErrorFromMappings(t *testing.T) {
g := NewWithT(t)
_, _, err := yaml.NewWorkersParserAndBuilder(
test.NewNullLogger(),
yamlutil.NewMapping(
"MachineDeployment",
func() *dockerv1.DockerMachineTemplate {
return &dockerv1.DockerMachineTemplate{}
},
),
)
g.Expect(err).To(MatchError(ContainSubstring("registering provider worker mappings")))
}
func TestRegisterWorkerMappingsError(t *testing.T) {
g := NewWithT(t)
parser := yamlutil.NewParser(test.NewNullLogger())
g.Expect(
parser.RegisterMapping("KubeadmConfigTemplate", func() yamlutil.APIObject { return nil }),
).To(Succeed())
g.Expect(
yaml.RegisterWorkerMappings(parser),
).To(
MatchError(ContainSubstring("registering base worker mappings")),
)
}
func TestRegisterWorkerMappingsSuccess(t *testing.T) {
g := NewWithT(t)
parser := yamlutil.NewParser(test.NewNullLogger())
g.Expect(
yaml.RegisterWorkerMappings(parser),
).To(Succeed())
}
func TestProcessWorkerObjects(t *testing.T) {
g := NewWithT(t)
wantGroup1 := group("workers-1")
wantGroup2 := group("workers-2")
lookup := yamlutil.NewObjectLookupBuilder().Add(
wantGroup1.MachineDeployment,
wantGroup1.KubeadmConfigTemplate,
wantGroup1.ProviderMachineTemplate,
wantGroup2.MachineDeployment,
wantGroup2.KubeadmConfigTemplate,
wantGroup2.ProviderMachineTemplate,
).Build()
w := &dockerWorkers{}
yaml.ProcessWorkerObjects(w, lookup)
g.Expect(w.Groups).To(ConsistOf(*wantGroup1, *wantGroup2))
}
func TestProcessWorkerGroupObjectsNoKubeadmConfigTemplate(t *testing.T) {
g := NewWithT(t)
group := group("workers-1")
group.KubeadmConfigTemplate = nil
lookup := yamlutil.NewObjectLookupBuilder().Add(group.MachineDeployment).Build()
yaml.ProcessWorkerGroupObjects(group, lookup)
g.Expect(group.KubeadmConfigTemplate).To(BeNil())
}
func TestProcessWorkerGroupObjectsNoMachineTemplate(t *testing.T) {
g := NewWithT(t)
group := group("workers-1")
group.ProviderMachineTemplate = nil
lookup := yamlutil.NewObjectLookupBuilder().Add(group.MachineDeployment).Build()
yaml.ProcessWorkerGroupObjects(group, lookup)
g.Expect(group.ProviderMachineTemplate).To(BeNil())
}
func TestProcessWorkerGroupObjects(t *testing.T) {
g := NewWithT(t)
group := group("workers-1")
kct := group.KubeadmConfigTemplate
mt := group.ProviderMachineTemplate
group.KubeadmConfigTemplate = nil
group.ProviderMachineTemplate = nil
lookup := yamlutil.NewObjectLookupBuilder().Add(
group.MachineDeployment,
kct,
mt,
).Build()
yaml.ProcessWorkerGroupObjects(group, lookup)
g.Expect(group.KubeadmConfigTemplate).To(Equal(kct))
g.Expect(group.ProviderMachineTemplate).To(Equal(mt))
}
func kubeadmConfigTemplate() *kubeadmv1.KubeadmConfigTemplate {
return &kubeadmv1.KubeadmConfigTemplate{
TypeMeta: metav1.TypeMeta{
Kind: "KubeadmConfigTemplate",
APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "template-1",
Namespace: constants.EksaSystemNamespace,
},
}
}
func machineDeployment() *clusterv1.MachineDeployment {
return &clusterv1.MachineDeployment{
TypeMeta: metav1.TypeMeta{
Kind: "MachineDeployment",
APIVersion: "cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "deployment",
Namespace: constants.EksaSystemNamespace,
},
}
}
func group(baseName string) *dockerWorkerGroup {
md := machineDeployment()
md.Name = baseName
kct := kubeadmConfigTemplate()
kct.Name = baseName
dmt := dockerMachineTemplate(baseName)
md.Spec.Template.Spec.Bootstrap.ConfigRef = objectReference(kct)
md.Spec.Template.Spec.InfrastructureRef = *objectReference(dmt)
return &dockerWorkerGroup{
MachineDeployment: md,
KubeadmConfigTemplate: kct,
ProviderMachineTemplate: dmt,
}
}
| 244 |
eks-anywhere | aws | Go | package clustermanager
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/runtime"
"github.com/aws/eks-anywhere/pkg/types"
)
// KubernetesClient allows to interact with the k8s api server.
type KubernetesClient interface {
Apply(ctx context.Context, kubeconfigPath string, obj runtime.Object) error
ApplyKubeSpecFromBytes(ctx context.Context, cluster *types.Cluster, data []byte) error
ApplyKubeSpecFromBytesWithNamespace(ctx context.Context, cluster *types.Cluster, data []byte, namespace string) error
ApplyKubeSpecFromBytesForce(ctx context.Context, cluster *types.Cluster, data []byte) error
WaitForDeployment(ctx context.Context, cluster *types.Cluster, timeout string, condition string, target string, namespace string) error
UpdateAnnotationInNamespace(ctx context.Context, resourceType, objectName string, annotations map[string]string, cluster *types.Cluster, namespace string) error
RemoveAnnotationInNamespace(ctx context.Context, resourceType, objectName, key string, cluster *types.Cluster, namespace string) error
}
type clusterManagerClient struct {
ClusterClient
}
func newClient(clusterClient ClusterClient) *clusterManagerClient {
return &clusterManagerClient{ClusterClient: clusterClient}
}
func (c *clusterManagerClient) waitForDeployments(ctx context.Context, deploymentsByNamespace map[string][]string, cluster *types.Cluster, timeout string) error {
for namespace, deployments := range deploymentsByNamespace {
for _, deployment := range deployments {
err := c.WaitForDeployment(ctx, cluster, timeout, "Available", deployment, namespace)
if err != nil {
return fmt.Errorf("waiting for %s in namespace %s: %v", deployment, namespace, err)
}
}
}
return nil
}
| 42 |
eks-anywhere | aws | Go | package clustermanager
import (
"bytes"
"context"
_ "embed"
"errors"
"fmt"
"io"
"math"
"reflect"
"regexp"
"strings"
"time"
eksdv1alpha1 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
"github.com/go-logr/logr"
"k8s.io/utils/integer"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/clustermanager/internal"
"github.com/aws/eks-anywhere/pkg/clustermarshaller"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/diagnostics"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/kubeconfig"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/retrier"
"github.com/aws/eks-anywhere/pkg/templater"
"github.com/aws/eks-anywhere/pkg/types"
releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)
const (
maxRetries = 30
defaultBackOffPeriod = 5 * time.Second
machineBackoff = 1 * time.Second
defaultMachinesMinWait = 30 * time.Minute
// DefaultMaxWaitPerMachine is the default max time the cluster manager will wait per a machine.
DefaultMaxWaitPerMachine = 10 * time.Minute
// DefaultClusterWait is the default max time the cluster manager will wait for the capi cluster to be in ready state.
DefaultClusterWait = 60 * time.Minute
// DefaultControlPlaneWait is the default time the cluster manager will wait for the control plane to be ready.
DefaultControlPlaneWait = 60 * time.Minute
// DefaultControlPlaneWaitAfterMove is the default max time the cluster manager will wait for the control plane to be in ready state after the capi move operation.
DefaultControlPlaneWaitAfterMove = 15 * time.Minute
// DefaultDeploymentWait is the default max time the cluster manager will wait for the deployment to be available.
DefaultDeploymentWait = 30 * time.Minute
controlPlaneInProgressStr = "1m"
etcdInProgressStr = "1m"
// DefaultEtcdWait is the default time the cluster manager will wait for ectd to be ready.
DefaultEtcdWait = 60 * time.Minute
// DefaultUnhealthyMachineTimeout is the default timeout for an unhealthy machine health check.
DefaultUnhealthyMachineTimeout = 5 * time.Minute
// DefaultNodeStartupTimeout is the default timeout for a machine without a node to be considered to have failed machine health check.
DefaultNodeStartupTimeout = 10 * time.Minute
// DefaultClusterctlMoveTimeout is arbitrarily established. Equal to kubectl wait default timeouts.
DefaultClusterctlMoveTimeout = 30 * time.Minute
)
var (
clusterctlNetworkErrorRegex = regexp.MustCompile(`.*failed to connect to the management cluster:.*`)
eksaClusterResourceType = fmt.Sprintf("clusters.%s", v1alpha1.GroupVersion.Group)
)
type ClusterManager struct {
eksaComponents EKSAComponents
clusterClient *RetrierClient
retrier *retrier.Retrier
writer filewriter.FileWriter
networking Networking
diagnosticsFactory diagnostics.DiagnosticBundleFactory
awsIamAuth AwsIamAuth
machineMaxWait time.Duration
machineBackoff time.Duration
machinesMinWait time.Duration
controlPlaneWaitTimeout time.Duration
controlPlaneWaitAfterMoveTimeout time.Duration
externalEtcdWaitTimeout time.Duration
unhealthyMachineTimeout time.Duration
nodeStartupTimeout time.Duration
clusterWaitTimeout time.Duration
deploymentWaitTimeout time.Duration
clusterctlMoveTimeout time.Duration
}
type ClusterClient interface {
KubernetesClient
BackupManagement(ctx context.Context, cluster *types.Cluster, managementStatePath string) error
MoveManagement(ctx context.Context, from, target *types.Cluster, clusterName string) error
WaitForClusterReady(ctx context.Context, cluster *types.Cluster, timeout string, clusterName string) error
WaitForControlPlaneAvailable(ctx context.Context, cluster *types.Cluster, timeout string, newClusterName string) error
WaitForControlPlaneReady(ctx context.Context, cluster *types.Cluster, timeout string, newClusterName string) error
WaitForControlPlaneNotReady(ctx context.Context, cluster *types.Cluster, timeout string, newClusterName string) error
WaitForManagedExternalEtcdReady(ctx context.Context, cluster *types.Cluster, timeout string, newClusterName string) error
WaitForManagedExternalEtcdNotReady(ctx context.Context, cluster *types.Cluster, timeout string, newClusterName string) error
GetWorkloadKubeconfig(ctx context.Context, clusterName string, cluster *types.Cluster) ([]byte, error)
GetEksaGitOpsConfig(ctx context.Context, gitOpsConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.GitOpsConfig, error)
GetEksaFluxConfig(ctx context.Context, fluxConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.FluxConfig, error)
GetEksaOIDCConfig(ctx context.Context, oidcConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.OIDCConfig, error)
GetEksaAWSIamConfig(ctx context.Context, awsIamConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.AWSIamConfig, error)
DeleteCluster(ctx context.Context, managementCluster, clusterToDelete *types.Cluster) error
DeleteGitOpsConfig(ctx context.Context, managementCluster *types.Cluster, gitOpsName, namespace string) error
DeleteOIDCConfig(ctx context.Context, managementCluster *types.Cluster, oidcConfigName, oidcConfigNamespace string) error
DeleteAWSIamConfig(ctx context.Context, managementCluster *types.Cluster, awsIamConfigName, awsIamConfigNamespace string) error
DeleteEKSACluster(ctx context.Context, managementCluster *types.Cluster, eksaClusterName, eksaClusterNamespace string) error
DeletePackageResources(ctx context.Context, managementCluster *types.Cluster, clusterName string) error
InitInfrastructure(ctx context.Context, clusterSpec *cluster.Spec, cluster *types.Cluster, provider providers.Provider) error
WaitForDeployment(ctx context.Context, cluster *types.Cluster, timeout string, condition string, target string, namespace string) error
SaveLog(ctx context.Context, cluster *types.Cluster, deployment *types.Deployment, fileName string, writer filewriter.FileWriter) error
GetMachines(ctx context.Context, cluster *types.Cluster, clusterName string) ([]types.Machine, error)
GetClusters(ctx context.Context, cluster *types.Cluster) ([]types.CAPICluster, error)
PauseCAPICluster(ctx context.Context, cluster, kubeconfig string) error
ResumeCAPICluster(ctx context.Context, cluster, kubeconfig string) error
GetEksaCluster(ctx context.Context, cluster *types.Cluster, clusterName string) (*v1alpha1.Cluster, error)
GetEksaVSphereDatacenterConfig(ctx context.Context, VSphereDatacenterName string, kubeconfigFile string, namespace string) (*v1alpha1.VSphereDatacenterConfig, error)
UpdateEnvironmentVariablesInNamespace(ctx context.Context, resourceType, resourceName string, envMap map[string]string, cluster *types.Cluster, namespace string) error
GetEksaVSphereMachineConfig(ctx context.Context, VSphereDatacenterName string, kubeconfigFile string, namespace string) (*v1alpha1.VSphereMachineConfig, error)
GetEksaCloudStackMachineConfig(ctx context.Context, cloudstackMachineConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.CloudStackMachineConfig, error)
SetEksaControllerEnvVar(ctx context.Context, envVar, envVarVal, kubeconfig string) error
CreateNamespaceIfNotPresent(ctx context.Context, kubeconfig string, namespace string) error
ValidateControlPlaneNodes(ctx context.Context, cluster *types.Cluster, clusterName string) error
ValidateWorkerNodes(ctx context.Context, clusterName string, kubeconfigFile string) error
CountMachineDeploymentReplicasReady(ctx context.Context, clusterName string, kubeconfigFile string) (int, int, error)
GetBundles(ctx context.Context, kubeconfigFile, name, namespace string) (*releasev1alpha1.Bundles, error)
GetApiServerUrl(ctx context.Context, cluster *types.Cluster) (string, error)
KubeconfigSecretAvailable(ctx context.Context, kubeconfig string, clusterName string, namespace string) (bool, error)
DeleteOldWorkerNodeGroup(ctx context.Context, machineDeployment *clusterv1.MachineDeployment, kubeconfig string) error
GetKubeadmControlPlane(ctx context.Context, cluster *types.Cluster, clusterName string, opts ...executables.KubectlOpt) (*controlplanev1.KubeadmControlPlane, error)
GetMachineDeploymentsForCluster(ctx context.Context, clusterName string, opts ...executables.KubectlOpt) ([]clusterv1.MachineDeployment, error)
GetMachineDeployment(ctx context.Context, workerNodeGroupName string, opts ...executables.KubectlOpt) (*clusterv1.MachineDeployment, error)
GetEksdRelease(ctx context.Context, name, namespace, kubeconfigFile string) (*eksdv1alpha1.Release, error)
ListObjects(ctx context.Context, resourceType, namespace, kubeconfig string, list kubernetes.ObjectList) error
}
type Networking interface {
Install(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, namespaces []string) error
Upgrade(ctx context.Context, cluster *types.Cluster, currentSpec, newSpec *cluster.Spec, namespaces []string) (*types.ChangeDiff, error)
RunPostControlPlaneUpgradeSetup(ctx context.Context, cluster *types.Cluster) error
}
type AwsIamAuth interface {
CreateAndInstallAWSIAMAuthCASecret(ctx context.Context, managementCluster *types.Cluster, workloadClusterName string) error
InstallAWSIAMAuth(ctx context.Context, management, workload *types.Cluster, spec *cluster.Spec) error
UpgradeAWSIAMAuth(ctx context.Context, cluster *types.Cluster, spec *cluster.Spec) error
}
// EKSAComponents allows to manage the eks-a components installation in a cluster.
type EKSAComponents interface {
Install(ctx context.Context, log logr.Logger, cluster *types.Cluster, spec *cluster.Spec) error
Upgrade(ctx context.Context, log logr.Logger, cluster *types.Cluster, currentSpec, newSpec *cluster.Spec) (*types.ChangeDiff, error)
}
type ClusterManagerOpt func(*ClusterManager)
// DefaultRetrier builds a retrier with the default configuration.
func DefaultRetrier() *retrier.Retrier {
return retrier.NewWithMaxRetries(maxRetries, defaultBackOffPeriod)
}
// New constructs a new ClusterManager.
func New(clusterClient *RetrierClient, networking Networking, writer filewriter.FileWriter, diagnosticBundleFactory diagnostics.DiagnosticBundleFactory, awsIamAuth AwsIamAuth, eksaComponents EKSAComponents, opts ...ClusterManagerOpt) *ClusterManager {
c := &ClusterManager{
eksaComponents: eksaComponents,
clusterClient: clusterClient,
writer: writer,
networking: networking,
retrier: DefaultRetrier(),
diagnosticsFactory: diagnosticBundleFactory,
machineMaxWait: DefaultMaxWaitPerMachine,
machineBackoff: machineBackoff,
machinesMinWait: defaultMachinesMinWait,
awsIamAuth: awsIamAuth,
controlPlaneWaitTimeout: DefaultControlPlaneWait,
controlPlaneWaitAfterMoveTimeout: DefaultControlPlaneWaitAfterMove,
externalEtcdWaitTimeout: DefaultEtcdWait,
unhealthyMachineTimeout: DefaultUnhealthyMachineTimeout,
nodeStartupTimeout: DefaultNodeStartupTimeout,
clusterWaitTimeout: DefaultClusterWait,
deploymentWaitTimeout: DefaultDeploymentWait,
clusterctlMoveTimeout: DefaultClusterctlMoveTimeout,
}
for _, o := range opts {
o(c)
}
return c
}
func WithControlPlaneWaitTimeout(timeout time.Duration) ClusterManagerOpt {
return func(c *ClusterManager) {
c.controlPlaneWaitTimeout = timeout
}
}
func WithExternalEtcdWaitTimeout(timeout time.Duration) ClusterManagerOpt {
return func(c *ClusterManager) {
c.externalEtcdWaitTimeout = timeout
}
}
func WithMachineBackoff(machineBackoff time.Duration) ClusterManagerOpt {
return func(c *ClusterManager) {
c.machineBackoff = machineBackoff
}
}
func WithMachineMaxWait(machineMaxWait time.Duration) ClusterManagerOpt {
return func(c *ClusterManager) {
c.machineMaxWait = machineMaxWait
}
}
func WithMachineMinWait(machineMinWait time.Duration) ClusterManagerOpt {
return func(c *ClusterManager) {
c.machinesMinWait = machineMinWait
}
}
// WithUnhealthyMachineTimeout sets the timeout of an unhealthy machine health check.
func WithUnhealthyMachineTimeout(timeout time.Duration) ClusterManagerOpt {
return func(c *ClusterManager) {
c.unhealthyMachineTimeout = timeout
}
}
// WithNodeStartupTimeout sets the timeout of a machine without a node to be considered to have failed machine health check.
func WithNodeStartupTimeout(timeout time.Duration) ClusterManagerOpt {
return func(c *ClusterManager) {
c.nodeStartupTimeout = timeout
}
}
func WithRetrier(retrier *retrier.Retrier) ClusterManagerOpt {
return func(c *ClusterManager) {
c.clusterClient.retrier = retrier
c.retrier = retrier
}
}
// WithNoTimeouts disables the timeout for all the waits and retries in cluster manager.
func WithNoTimeouts() ClusterManagerOpt {
return func(c *ClusterManager) {
noTimeoutRetrier := retrier.NewWithNoTimeout()
maxTime := time.Duration(math.MaxInt64)
c.retrier = noTimeoutRetrier
c.machinesMinWait = maxTime
c.controlPlaneWaitTimeout = maxTime
c.controlPlaneWaitAfterMoveTimeout = maxTime
c.externalEtcdWaitTimeout = maxTime
c.unhealthyMachineTimeout = maxTime
c.nodeStartupTimeout = maxTime
c.clusterWaitTimeout = maxTime
c.deploymentWaitTimeout = maxTime
c.clusterctlMoveTimeout = maxTime
}
}
func clusterctlMoveRetryPolicy(totalRetries int, err error) (retry bool, wait time.Duration) {
// Exponential backoff on network errors. Retrier built-in backoff is linear, so implementing here.
// Retrier first calls the policy before retry #1. We want it zero-based for exponentiation.
if totalRetries < 1 {
totalRetries = 1
}
const networkFaultBaseRetryTime = 10 * time.Second
const backoffFactor = 1.5
waitTime := time.Duration(float64(networkFaultBaseRetryTime) * math.Pow(backoffFactor, float64(totalRetries-1)))
if match := clusterctlNetworkErrorRegex.MatchString(err.Error()); match {
return true, waitTime
}
return false, 0
}
// BackupCAPI takes backup of management cluster's resources during uograde process.
func (c *ClusterManager) BackupCAPI(ctx context.Context, cluster *types.Cluster, managementStatePath string) error {
// Network errors, most commonly connection refused or timeout, can occur if either source
// cluster becomes inaccessible during the move operation. If this occurs without retries, clusterctl
// abandons the move operation, and fails cluster upgrade.
// Retrying once connectivity is re-established completes the partial move.
// Here we use a retrier, with the above defined clusterctlMoveRetryPolicy policy, to attempt to
// wait out the network disruption and complete the move.
// Keeping clusterctlMoveTimeout to the same as MoveManagement since both uses the same command with the differrent params.
r := retrier.New(c.clusterctlMoveTimeout, retrier.WithRetryPolicy(clusterctlMoveRetryPolicy))
err := r.Retry(func() error {
return c.clusterClient.BackupManagement(ctx, cluster, managementStatePath)
})
if err != nil {
return fmt.Errorf("backing up CAPI resources of management cluster before moving to bootstrap cluster: %v", err)
}
return nil
}
func (c *ClusterManager) MoveCAPI(ctx context.Context, from, to *types.Cluster, clusterName string, clusterSpec *cluster.Spec, checkers ...types.NodeReadyChecker) error {
logger.V(3).Info("Waiting for management machines to be ready before move")
labels := []string{clusterv1.MachineControlPlaneNameLabel, clusterv1.MachineDeploymentNameLabel}
if err := c.waitForNodesReady(ctx, from, clusterName, labels, checkers...); err != nil {
return err
}
logger.V(3).Info("Waiting for management cluster to be ready before move")
if err := c.clusterClient.WaitForClusterReady(ctx, from, c.clusterWaitTimeout.String(), clusterName); err != nil {
return err
}
// Network errors, most commonly connection refused or timeout, can occur if either source or target
// cluster becomes inaccessible during the move operation. If this occurs without retries, clusterctl
// abandons the move operation, leaving an unpredictable subset of the CAPI components copied to target
// or deleted from source. Retrying once connectivity is re-established completes the partial move.
// Here we use a retrier, with the above defined clusterctlMoveRetryPolicy policy, to attempt to
// wait out the network disruption and complete the move.
r := retrier.New(c.clusterctlMoveTimeout, retrier.WithRetryPolicy(clusterctlMoveRetryPolicy))
err := r.Retry(func() error {
return c.clusterClient.MoveManagement(ctx, from, to, clusterName)
})
if err != nil {
return fmt.Errorf("moving CAPI management from source to target: %v", err)
}
logger.V(3).Info("Waiting for workload cluster control plane to be ready after move")
err = c.clusterClient.WaitForControlPlaneReady(ctx, to, c.controlPlaneWaitAfterMoveTimeout.String(), clusterName)
if err != nil {
return err
}
logger.V(3).Info("Waiting for workload cluster control plane replicas to be ready after move")
err = c.waitForControlPlaneReplicasReady(ctx, to, clusterSpec)
if err != nil {
return fmt.Errorf("waiting for workload cluster control plane replicas to be ready: %v", err)
}
logger.V(3).Info("Waiting for workload cluster machine deployment replicas to be ready after move")
err = c.waitForMachineDeploymentReplicasReady(ctx, to, clusterSpec)
if err != nil {
return fmt.Errorf("waiting for workload cluster machinedeployment replicas to be ready: %v", err)
}
logger.V(3).Info("Waiting for machines to be ready after move")
if err = c.waitForNodesReady(ctx, to, clusterName, labels, checkers...); err != nil {
return err
}
return nil
}
func (c *ClusterManager) writeCAPISpecFile(clusterName string, content []byte) error {
fileName := fmt.Sprintf("%s-eks-a-cluster.yaml", clusterName)
if _, err := c.writer.Write(fileName, content); err != nil {
return fmt.Errorf("writing capi spec file: %v", err)
}
return nil
}
// CreateWorkloadCluster creates a workload cluster in the provider that the customer has specified.
// It applied the kubernetes manifest file on the management cluster, waits for the control plane to be ready,
// and then generates the kubeconfig for the cluster.
// It returns a struct of type Cluster containing the name and the kubeconfig of the cluster.
func (c *ClusterManager) CreateWorkloadCluster(ctx context.Context, managementCluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) (*types.Cluster, error) {
clusterName := clusterSpec.Cluster.Name
workloadCluster := &types.Cluster{
Name: clusterName,
ExistingManagement: managementCluster.ExistingManagement,
}
if err := c.applyProviderManifests(ctx, clusterSpec, managementCluster, provider); err != nil {
return nil, err
}
if err := c.waitUntilControlPlaneAvailable(ctx, clusterSpec, managementCluster); err != nil {
return nil, err
}
logger.V(3).Info("Waiting for workload kubeconfig generation", "cluster", clusterName)
// Use a buffer to cache the kubeconfig.
var buf bytes.Buffer
if err := c.getWorkloadClusterKubeconfig(ctx, clusterName, managementCluster, &buf); err != nil {
return nil, fmt.Errorf("waiting for workload kubeconfig: %v", err)
}
rawKubeconfig := buf.Bytes()
// The Docker provider wants to update the kubeconfig to patch the server address before
// we write it to disk. This is to ensure we can communicate with the cluster even when
// hosted inside a Docker Desktop VM.
if err := provider.UpdateKubeConfig(&rawKubeconfig, clusterName); err != nil {
return nil, err
}
kubeconfigFile, err := c.writer.Write(
kubeconfig.FormatWorkloadClusterKubeconfigFilename(clusterName),
rawKubeconfig,
filewriter.PersistentFile,
filewriter.Permission0600,
)
if err != nil {
return nil, fmt.Errorf("writing workload kubeconfig: %v", err)
}
workloadCluster.KubeconfigFile = kubeconfigFile
return workloadCluster, nil
}
func (c *ClusterManager) waitUntilControlPlaneAvailable(
ctx context.Context,
clusterSpec *cluster.Spec,
managementCluster *types.Cluster,
) error {
// If we have external etcd we need to wait for that first as control plane nodes can't
// come up without it.
if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil {
logger.V(3).Info("Waiting for external etcd to be ready", "cluster", clusterSpec.Cluster.Name)
err := c.clusterClient.WaitForManagedExternalEtcdReady(
ctx,
managementCluster,
c.externalEtcdWaitTimeout.String(),
clusterSpec.Cluster.Name,
)
if err != nil {
return fmt.Errorf("waiting for external etcd for workload cluster to be ready: %v", err)
}
logger.V(3).Info("External etcd is ready")
}
logger.V(3).Info("Waiting for control plane to be available")
err := c.clusterClient.WaitForControlPlaneAvailable(
ctx,
managementCluster,
c.controlPlaneWaitTimeout.String(),
clusterSpec.Cluster.Name,
)
if err != nil {
return fmt.Errorf("waiting for control plane to be ready: %v", err)
}
return nil
}
func (c *ClusterManager) applyProviderManifests(
ctx context.Context,
spec *cluster.Spec,
management *types.Cluster,
provider providers.Provider,
) error {
cpContent, mdContent, err := provider.GenerateCAPISpecForCreate(ctx, management, spec)
if err != nil {
return fmt.Errorf("generating capi spec: %v", err)
}
content := templater.AppendYamlResources(cpContent, mdContent)
if err = c.writeCAPISpecFile(spec.Cluster.Name, content); err != nil {
return err
}
err = c.clusterClient.ApplyKubeSpecFromBytesWithNamespace(ctx, management, content, constants.EksaSystemNamespace)
if err != nil {
return fmt.Errorf("applying capi spec: %v", err)
}
return nil
}
func (c *ClusterManager) getWorkloadClusterKubeconfig(ctx context.Context, clusterName string, managementCluster *types.Cluster, w io.Writer) error {
kubeconfig, err := c.clusterClient.GetWorkloadKubeconfig(ctx, clusterName, managementCluster)
if err != nil {
return fmt.Errorf("getting workload kubeconfig: %v", err)
}
if _, err := io.Copy(w, bytes.NewReader(kubeconfig)); err != nil {
return err
}
return nil
}
func (c *ClusterManager) RunPostCreateWorkloadCluster(ctx context.Context, managementCluster, workloadCluster *types.Cluster, clusterSpec *cluster.Spec) error {
logger.V(3).Info("Waiting for controlplane and worker machines to be ready")
labels := []string{clusterv1.MachineControlPlaneNameLabel, clusterv1.MachineDeploymentNameLabel}
return c.waitForNodesReady(ctx, managementCluster, workloadCluster.Name, labels, types.WithNodeRef())
}
func (c *ClusterManager) DeleteCluster(ctx context.Context, managementCluster, clusterToDelete *types.Cluster, provider providers.Provider, clusterSpec *cluster.Spec) error {
if clusterSpec.Cluster.IsManaged() {
if err := c.deleteEKSAObjects(ctx, managementCluster, clusterToDelete, provider, clusterSpec); err != nil {
return err
}
}
logger.V(1).Info("Deleting CAPI cluster", "name", clusterToDelete.Name)
if err := c.clusterClient.DeleteCluster(ctx, managementCluster, clusterToDelete); err != nil {
return err
}
return provider.PostClusterDeleteValidate(ctx, managementCluster)
}
func (c *ClusterManager) deleteEKSAObjects(ctx context.Context, managementCluster, clusterToDelete *types.Cluster, provider providers.Provider, clusterSpec *cluster.Spec) error {
log := logger.Get()
log.V(1).Info("Deleting EKS-A objects", "cluster", clusterSpec.Cluster.Name)
log.V(2).Info("Pausing EKS-A reconciliation", "cluster", clusterSpec.Cluster.Name)
if err := c.PauseEKSAControllerReconcile(ctx, clusterToDelete, clusterSpec, provider); err != nil {
return err
}
log.V(2).Info("Deleting EKS-A Cluster", "name", clusterSpec.Cluster.Name)
if err := c.clusterClient.DeleteEKSACluster(ctx, managementCluster, clusterSpec.Cluster.Name, clusterSpec.Cluster.Namespace); err != nil {
return err
}
if clusterSpec.GitOpsConfig != nil {
log.V(2).Info("Deleting GitOpsConfig", "name", clusterSpec.GitOpsConfig.Name)
if err := c.clusterClient.DeleteGitOpsConfig(ctx, managementCluster, clusterSpec.GitOpsConfig.Name, clusterSpec.GitOpsConfig.Namespace); err != nil {
return err
}
}
if clusterSpec.OIDCConfig != nil {
log.V(2).Info("Deleting OIDCConfig", "name", clusterSpec.OIDCConfig.Name)
if err := c.clusterClient.DeleteOIDCConfig(ctx, managementCluster, clusterSpec.OIDCConfig.Name, clusterSpec.OIDCConfig.Namespace); err != nil {
return err
}
}
if clusterSpec.AWSIamConfig != nil {
log.V(2).Info("Deleting AWSIamConfig", "name", clusterSpec.AWSIamConfig.Name)
if err := c.clusterClient.DeleteAWSIamConfig(ctx, managementCluster, clusterSpec.AWSIamConfig.Name, clusterSpec.AWSIamConfig.Namespace); err != nil {
return err
}
}
log.V(2).Info("Cleaning up provider specific resources")
if err := provider.DeleteResources(ctx, clusterSpec); err != nil {
return err
}
return nil
}
func (c *ClusterManager) UpgradeCluster(ctx context.Context, managementCluster, workloadCluster *types.Cluster, newClusterSpec *cluster.Spec, provider providers.Provider) error {
eksaMgmtCluster := workloadCluster
if managementCluster != nil && managementCluster.ExistingManagement {
eksaMgmtCluster = managementCluster
}
currentSpec, err := c.GetCurrentClusterSpec(ctx, eksaMgmtCluster, newClusterSpec.Cluster.Name)
if err != nil {
return fmt.Errorf("getting current cluster spec: %v", err)
}
cpContent, mdContent, err := provider.GenerateCAPISpecForUpgrade(ctx, managementCluster, eksaMgmtCluster, currentSpec, newClusterSpec)
if err != nil {
return fmt.Errorf("generating capi spec: %v", err)
}
if err = c.writeCAPISpecFile(newClusterSpec.Cluster.Name, templater.AppendYamlResources(cpContent, mdContent)); err != nil {
return err
}
err = c.clusterClient.ApplyKubeSpecFromBytesWithNamespace(ctx, managementCluster, cpContent, constants.EksaSystemNamespace)
if err != nil {
return fmt.Errorf("applying capi control plane spec: %v", err)
}
var externalEtcdTopology bool
if newClusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil {
logger.V(3).Info("Waiting for external etcd upgrade to be in progress")
err = c.clusterClient.WaitForManagedExternalEtcdNotReady(ctx, managementCluster, etcdInProgressStr, newClusterSpec.Cluster.Name)
if err != nil {
if !strings.Contains(fmt.Sprint(err), "timed out waiting for the condition on clusters") {
return fmt.Errorf("error waiting for external etcd upgrade not ready: %v", err)
} else {
logger.V(3).Info("Timed out while waiting for external etcd to be in progress, likely caused by no external etcd upgrade")
}
}
logger.V(3).Info("Waiting for external etcd to be ready after upgrade")
if err = c.clusterClient.WaitForManagedExternalEtcdReady(ctx, managementCluster, c.externalEtcdWaitTimeout.String(), newClusterSpec.Cluster.Name); err != nil {
if err := c.clusterClient.RemoveAnnotationInNamespace(ctx, "etcdadmcluster", fmt.Sprintf("%s-etcd", newClusterSpec.Cluster.Name),
etcdv1.UpgradeInProgressAnnotation,
managementCluster,
constants.EksaSystemNamespace); err != nil {
return fmt.Errorf("removing annotation: %v", err)
}
return fmt.Errorf("waiting for external etcd for workload cluster to be ready: %v", err)
}
externalEtcdTopology = true
logger.V(3).Info("External etcd is ready")
}
logger.V(3).Info("Waiting for control plane upgrade to be in progress")
err = c.clusterClient.WaitForControlPlaneNotReady(ctx, managementCluster, controlPlaneInProgressStr, newClusterSpec.Cluster.Name)
if err != nil {
if !strings.Contains(fmt.Sprint(err), "timed out waiting for the condition on clusters") {
return fmt.Errorf("error waiting for control plane not ready: %v", err)
} else {
logger.V(3).Info("Timed out while waiting for control plane to be in progress, likely caused by no control plane upgrade")
}
}
logger.V(3).Info("Run post control plane upgrade operations")
err = provider.RunPostControlPlaneUpgrade(ctx, currentSpec, newClusterSpec, workloadCluster, managementCluster)
if err != nil {
return fmt.Errorf("running post control plane upgrade operations: %v", err)
}
logger.V(3).Info("Waiting for control plane to be ready")
err = c.clusterClient.WaitForControlPlaneReady(ctx, managementCluster, c.controlPlaneWaitTimeout.String(), newClusterSpec.Cluster.Name)
if err != nil {
return fmt.Errorf("waiting for workload cluster control plane to be ready: %v", err)
}
logger.V(3).Info("Waiting for control plane machines to be ready")
if err = c.waitForNodesReady(ctx, managementCluster, newClusterSpec.Cluster.Name, []string{clusterv1.MachineControlPlaneNameLabel}, types.WithNodeRef(), types.WithNodeHealthy()); err != nil {
return err
}
logger.V(3).Info("Waiting for control plane to be ready after upgrade")
err = c.clusterClient.WaitForControlPlaneReady(ctx, managementCluster, c.controlPlaneWaitTimeout.String(), newClusterSpec.Cluster.Name)
if err != nil {
return fmt.Errorf("waiting for workload cluster control plane to be ready: %v", err)
}
logger.V(3).Info("Running CNI post control plane upgrade operations")
if err = c.networking.RunPostControlPlaneUpgradeSetup(ctx, workloadCluster); err != nil {
return fmt.Errorf("running CNI post control plane upgrade operations: %v", err)
}
logger.V(3).Info("Waiting for workload cluster control plane replicas to be ready after upgrade")
err = c.waitForControlPlaneReplicasReady(ctx, managementCluster, newClusterSpec)
if err != nil {
return fmt.Errorf("waiting for workload cluster control plane replicas to be ready: %v", err)
}
err = c.clusterClient.ApplyKubeSpecFromBytesWithNamespace(ctx, managementCluster, mdContent, constants.EksaSystemNamespace)
if err != nil {
return fmt.Errorf("applying capi machine deployment spec: %v", err)
}
if err = c.removeOldWorkerNodeGroups(ctx, managementCluster, provider, currentSpec, newClusterSpec); err != nil {
return fmt.Errorf("removing old worker node groups: %v", err)
}
logger.V(3).Info("Waiting for workload cluster machine deployment replicas to be ready after upgrade")
err = c.waitForMachineDeploymentReplicasReady(ctx, managementCluster, newClusterSpec)
if err != nil {
return fmt.Errorf("waiting for workload cluster machinedeployment replicas to be ready: %v", err)
}
logger.V(3).Info("Waiting for machine deployment machines to be ready")
if err = c.waitForNodesReady(ctx, managementCluster, newClusterSpec.Cluster.Name, []string{clusterv1.MachineDeploymentNameLabel}, types.WithNodeRef(), types.WithNodeHealthy()); err != nil {
return err
}
logger.V(3).Info("Waiting for workload cluster capi components to be ready after upgrade")
err = c.waitForCAPI(ctx, eksaMgmtCluster, provider, externalEtcdTopology)
if err != nil {
return fmt.Errorf("waiting for workload cluster capi components to be ready: %v", err)
}
if newClusterSpec.AWSIamConfig != nil {
logger.V(3).Info("Run aws-iam-authenticator upgrade operations")
if err = c.awsIamAuth.UpgradeAWSIAMAuth(ctx, workloadCluster, newClusterSpec); err != nil {
return fmt.Errorf("running aws-iam-authenticator upgrade operations: %v", err)
}
}
return nil
}
func (c *ClusterManager) EKSAClusterSpecChanged(ctx context.Context, cluster *types.Cluster, newClusterSpec *cluster.Spec) (bool, error) {
cc, err := c.clusterClient.GetEksaCluster(ctx, cluster, newClusterSpec.Cluster.Name)
if err != nil {
return false, err
}
if !cc.Equal(newClusterSpec.Cluster) {
logger.V(3).Info("Existing cluster and new cluster spec differ")
return true, nil
}
currentClusterSpec, err := c.buildSpecForCluster(ctx, cluster, cc)
if err != nil {
return false, err
}
if currentClusterSpec.VersionsBundle.EksD.Name != newClusterSpec.VersionsBundle.EksD.Name {
logger.V(3).Info("New eks-d release detected")
return true, nil
}
if newClusterSpec.OIDCConfig != nil && currentClusterSpec.OIDCConfig != nil {
if !newClusterSpec.OIDCConfig.Spec.Equal(¤tClusterSpec.OIDCConfig.Spec) {
logger.V(3).Info("OIDC config changes detected")
return true, nil
}
}
if newClusterSpec.AWSIamConfig != nil && currentClusterSpec.AWSIamConfig != nil {
if !reflect.DeepEqual(newClusterSpec.AWSIamConfig.Spec.MapRoles, currentClusterSpec.AWSIamConfig.Spec.MapRoles) ||
!reflect.DeepEqual(newClusterSpec.AWSIamConfig.Spec.MapUsers, currentClusterSpec.AWSIamConfig.Spec.MapUsers) {
logger.V(3).Info("AWSIamConfig changes detected")
return true, nil
}
}
logger.V(3).Info("Clusters are the same")
return false, nil
}
func (c *ClusterManager) InstallCAPI(ctx context.Context, clusterSpec *cluster.Spec, cluster *types.Cluster, provider providers.Provider) error {
err := c.clusterClient.InitInfrastructure(ctx, clusterSpec, cluster, provider)
if err != nil {
return fmt.Errorf("initializing capi resources in cluster: %v", err)
}
return c.waitForCAPI(ctx, cluster, provider, clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil)
}
func (c *ClusterManager) waitForCAPI(ctx context.Context, cluster *types.Cluster, provider providers.Provider, externalEtcdTopology bool) error {
err := c.clusterClient.waitForDeployments(ctx, internal.CAPIDeployments, cluster, c.deploymentWaitTimeout.String())
if err != nil {
return err
}
if externalEtcdTopology {
err := c.clusterClient.waitForDeployments(ctx, internal.ExternalEtcdDeployments, cluster, c.deploymentWaitTimeout.String())
if err != nil {
return err
}
}
err = c.clusterClient.waitForDeployments(ctx, provider.GetDeployments(), cluster, c.deploymentWaitTimeout.String())
if err != nil {
return err
}
return nil
}
func (c *ClusterManager) InstallNetworking(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) error {
return c.networking.Install(ctx, cluster, clusterSpec, getProviderNamespaces(provider.GetDeployments()))
}
func (c *ClusterManager) UpgradeNetworking(ctx context.Context, cluster *types.Cluster, currentSpec, newSpec *cluster.Spec, provider providers.Provider) (*types.ChangeDiff, error) {
providerNamespaces := getProviderNamespaces(provider.GetDeployments())
return c.networking.Upgrade(ctx, cluster, currentSpec, newSpec, providerNamespaces)
}
func getProviderNamespaces(providerDeployments map[string][]string) []string {
namespaces := make([]string, 0, len(providerDeployments))
for namespace := range providerDeployments {
namespaces = append(namespaces, namespace)
}
return namespaces
}
func (c *ClusterManager) InstallMachineHealthChecks(ctx context.Context, clusterSpec *cluster.Spec, workloadCluster *types.Cluster) error {
mhc, err := templater.ObjectsToYaml(clusterapi.MachineHealthCheckObjects(clusterSpec, c.unhealthyMachineTimeout, c.nodeStartupTimeout)...)
if err != nil {
return err
}
err = c.clusterClient.ApplyKubeSpecFromBytes(ctx, workloadCluster, mhc)
if err != nil {
return fmt.Errorf("applying machine health checks: %v", err)
}
return nil
}
// InstallAwsIamAuth applies the aws-iam-authenticator manifest based on cluster spec inputs.
// Generates a kubeconfig for interacting with the cluster with aws-iam-authenticator client.
func (c *ClusterManager) InstallAwsIamAuth(ctx context.Context, management, workload *types.Cluster, spec *cluster.Spec) error {
return c.awsIamAuth.InstallAWSIAMAuth(ctx, management, workload, spec)
}
func (c *ClusterManager) CreateAwsIamAuthCaSecret(ctx context.Context, managementCluster *types.Cluster, workloadClusterName string) error {
return c.awsIamAuth.CreateAndInstallAWSIAMAuthCASecret(ctx, managementCluster, workloadClusterName)
}
func (c *ClusterManager) SaveLogsManagementCluster(ctx context.Context, spec *cluster.Spec, cluster *types.Cluster) error {
if cluster == nil {
return nil
}
if cluster.KubeconfigFile == "" {
return nil
}
bundle, err := c.diagnosticsFactory.DiagnosticBundleManagementCluster(spec, cluster.KubeconfigFile)
if err != nil {
logger.V(5).Info("Error generating support bundle for management cluster", "error", err)
return nil
}
return collectDiagnosticBundle(ctx, bundle)
}
func (c *ClusterManager) SaveLogsWorkloadCluster(ctx context.Context, provider providers.Provider, spec *cluster.Spec, cluster *types.Cluster) error {
if cluster == nil {
return nil
}
if cluster.KubeconfigFile == "" {
return nil
}
bundle, err := c.diagnosticsFactory.DiagnosticBundleWorkloadCluster(spec, provider, cluster.KubeconfigFile)
if err != nil {
logger.V(5).Info("Error generating support bundle for workload cluster", "error", err)
return nil
}
return collectDiagnosticBundle(ctx, bundle)
}
func collectDiagnosticBundle(ctx context.Context, bundle diagnostics.DiagnosticBundle) error {
var sinceTimeValue *time.Time
threeHours := "3h"
sinceTimeValue, err := diagnostics.ParseTimeFromDuration(threeHours)
if err != nil {
logger.V(5).Info("Error parsing time options for support bundle generation", "error", err)
return nil
}
err = bundle.CollectAndAnalyze(ctx, sinceTimeValue)
if err != nil {
logger.V(5).Info("Error collecting and saving logs", "error", err)
}
return nil
}
func (c *ClusterManager) waitForControlPlaneReplicasReady(ctx context.Context, managementCluster *types.Cluster, clusterSpec *cluster.Spec) error {
isCpReady := func() error {
return c.clusterClient.ValidateControlPlaneNodes(ctx, managementCluster, clusterSpec.Cluster.Name)
}
err := isCpReady()
if err == nil {
return nil
}
timeout := c.totalTimeoutForMachinesReadyWait(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Count)
r := retrier.New(timeout)
if err := r.Retry(isCpReady); err != nil {
return fmt.Errorf("retries exhausted waiting for controlplane replicas to be ready: %v", err)
}
return nil
}
func (c *ClusterManager) waitForMachineDeploymentReplicasReady(ctx context.Context, managementCluster *types.Cluster, clusterSpec *cluster.Spec) error {
ready, total := 0, 0
policy := func(_ int, _ error) (bool, time.Duration) {
return true, c.machineBackoff * time.Duration(integer.IntMax(1, total-ready))
}
var machineDeploymentReplicasCount int
for _, workerNodeGroupConfiguration := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
machineDeploymentReplicasCount += *workerNodeGroupConfiguration.Count
}
areMdReplicasReady := func() error {
var err error
ready, total, err = c.clusterClient.CountMachineDeploymentReplicasReady(ctx, clusterSpec.Cluster.Name, managementCluster.KubeconfigFile)
if err != nil {
return err
}
if ready != total {
return fmt.Errorf("%d machine deployment replicas are not ready", total-ready)
}
return nil
}
timeout := c.totalTimeoutForMachinesReadyWait(machineDeploymentReplicasCount)
r := retrier.New(timeout, retrier.WithRetryPolicy(policy))
if err := r.Retry(areMdReplicasReady); err != nil {
return fmt.Errorf("retries exhausted waiting for machinedeployment replicas to be ready: %v", err)
}
return nil
}
// totalTimeoutForMachinesReadyWait calculates the total timeout when waiting for machines to be ready.
// The timeout increases linearly with the number of machines but can never be less than the configured
// minimun.
func (c *ClusterManager) totalTimeoutForMachinesReadyWait(replicaCount int) time.Duration {
timeout := time.Duration(replicaCount) * c.machineMaxWait
if timeout <= c.machinesMinWait {
timeout = c.machinesMinWait
}
return timeout
}
func (c *ClusterManager) waitForNodesReady(ctx context.Context, managementCluster *types.Cluster, clusterName string, labels []string, checkers ...types.NodeReadyChecker) error {
totalNodes, err := c.getNodesCount(ctx, managementCluster, clusterName, labels)
if err != nil {
return fmt.Errorf("getting the total count of nodes: %v", err)
}
readyNodes := 0
policy := func(_ int, _ error) (bool, time.Duration) {
return true, c.machineBackoff * time.Duration(integer.IntMax(1, totalNodes-readyNodes))
}
areNodesReady := func() error {
var err error
readyNodes, err = c.countNodesReady(ctx, managementCluster, clusterName, labels, checkers...)
if err != nil {
return err
}
if readyNodes != totalNodes {
logger.V(4).Info("Nodes are not ready yet", "total", totalNodes, "ready", readyNodes, "cluster name", clusterName)
return errors.New("nodes are not ready yet")
}
logger.V(4).Info("Nodes ready", "total", totalNodes)
return nil
}
err = areNodesReady()
if err == nil {
return nil
}
timeout := c.totalTimeoutForMachinesReadyWait(totalNodes)
r := retrier.New(timeout, retrier.WithRetryPolicy(policy))
if err := r.Retry(areNodesReady); err != nil {
return fmt.Errorf("retries exhausted waiting for machines to be ready: %v", err)
}
return nil
}
func (c *ClusterManager) getNodesCount(ctx context.Context, managementCluster *types.Cluster, clusterName string, labels []string) (int, error) {
totalNodes := 0
labelsMap := make(map[string]interface{}, len(labels))
for _, label := range labels {
labelsMap[label] = nil
}
if _, ok := labelsMap[clusterv1.MachineControlPlaneNameLabel]; ok {
kcp, err := c.clusterClient.GetKubeadmControlPlane(ctx, managementCluster, clusterName, executables.WithCluster(managementCluster), executables.WithNamespace(constants.EksaSystemNamespace))
if err != nil {
return 0, fmt.Errorf("getting KubeadmControlPlane for cluster %s: %v", clusterName, err)
}
totalNodes += int(*kcp.Spec.Replicas)
}
if _, ok := labelsMap[clusterv1.MachineDeploymentNameLabel]; ok {
mds, err := c.clusterClient.GetMachineDeploymentsForCluster(ctx, clusterName, executables.WithCluster(managementCluster), executables.WithNamespace(constants.EksaSystemNamespace))
if err != nil {
return 0, fmt.Errorf("getting KubeadmControlPlane for cluster %s: %v", clusterName, err)
}
for _, md := range mds {
totalNodes += int(*md.Spec.Replicas)
}
}
return totalNodes, nil
}
func (c *ClusterManager) countNodesReady(ctx context.Context, managementCluster *types.Cluster, clusterName string, labels []string, checkers ...types.NodeReadyChecker) (ready int, err error) {
machines, err := c.clusterClient.GetMachines(ctx, managementCluster, clusterName)
if err != nil {
return 0, fmt.Errorf("getting machines resources from management cluster: %v", err)
}
for _, m := range machines {
// Extracted from cluster-api: NodeRef is considered a better signal than InfrastructureReady,
// because it ensures the node in the workload cluster is up and running.
if !m.HasAnyLabel(labels) {
continue
}
passed := true
for _, checker := range checkers {
if !checker(m.Status) {
passed = false
break
}
}
if passed {
ready += 1
}
}
return ready, nil
}
func (c *ClusterManager) waitForAllControlPlanes(ctx context.Context, cluster *types.Cluster, waitForCluster time.Duration) error {
clusters, err := c.clusterClient.GetClusters(ctx, cluster)
if err != nil {
return fmt.Errorf("getting clusters: %v", err)
}
for _, clu := range clusters {
err = c.clusterClient.WaitForControlPlaneReady(ctx, cluster, waitForCluster.String(), clu.Metadata.Name)
if err != nil {
return fmt.Errorf("waiting for workload cluster control plane for cluster %s to be ready: %v", clu.Metadata.Name, err)
}
}
return nil
}
func (c *ClusterManager) waitForAllClustersReady(ctx context.Context, cluster *types.Cluster, waitStr string) error {
clusters, err := c.clusterClient.GetClusters(ctx, cluster)
if err != nil {
return fmt.Errorf("getting clusters: %v", err)
}
for _, clu := range clusters {
err = c.clusterClient.WaitForClusterReady(ctx, cluster, waitStr, clu.Metadata.Name)
if err != nil {
return fmt.Errorf("waiting for cluster %s to be ready: %v", clu.Metadata.Name, err)
}
}
return nil
}
func machineDeploymentsToDelete(currentSpec, newSpec *cluster.Spec) []string {
nodeGroupsToDelete := cluster.NodeGroupsToDelete(currentSpec, newSpec)
machineDeployments := make([]string, 0, len(nodeGroupsToDelete))
for _, group := range nodeGroupsToDelete {
mdName := clusterapi.MachineDeploymentName(newSpec.Cluster, group)
machineDeployments = append(machineDeployments, mdName)
}
return machineDeployments
}
func (c *ClusterManager) removeOldWorkerNodeGroups(ctx context.Context, workloadCluster *types.Cluster, provider providers.Provider, currentSpec, newSpec *cluster.Spec) error {
machineDeployments := machineDeploymentsToDelete(currentSpec, newSpec)
for _, machineDeploymentName := range machineDeployments {
machineDeployment, err := c.clusterClient.GetMachineDeployment(ctx, machineDeploymentName, executables.WithKubeconfig(workloadCluster.KubeconfigFile), executables.WithNamespace(constants.EksaSystemNamespace))
if err != nil {
return fmt.Errorf("getting machine deployment to remove: %v", err)
}
if err := c.clusterClient.DeleteOldWorkerNodeGroup(ctx, machineDeployment, workloadCluster.KubeconfigFile); err != nil {
return fmt.Errorf("removing old worker nodes from cluster: %v", err)
}
}
return nil
}
func (c *ClusterManager) InstallCustomComponents(ctx context.Context, clusterSpec *cluster.Spec, cluster *types.Cluster, provider providers.Provider) error {
if err := c.eksaComponents.Install(ctx, logger.Get(), cluster, clusterSpec); err != nil {
return err
}
// TODO(g-gaston): should this be moved inside the components installer?
return provider.InstallCustomProviderComponents(ctx, cluster.KubeconfigFile)
}
// Upgrade updates the eksa components in a cluster according to a Spec.
func (c *ClusterManager) Upgrade(ctx context.Context, cluster *types.Cluster, currentSpec, newSpec *cluster.Spec) (*types.ChangeDiff, error) {
return c.eksaComponents.Upgrade(ctx, logger.Get(), cluster, currentSpec, newSpec)
}
func (c *ClusterManager) CreateEKSANamespace(ctx context.Context, cluster *types.Cluster) error {
return c.clusterClient.CreateNamespaceIfNotPresent(ctx, cluster.KubeconfigFile, constants.EksaSystemNamespace)
}
// CreateEKSAResources applies the eks-a cluster specs (cluster, datacenterconfig, machine configs, etc.), as well as the
// release bundle to the cluster. Before applying the spec, we pause eksa controller cluster and datacenter webhook validation
// so that the cluster spec can be created or updated in the cluster without webhook validation error.
func (c *ClusterManager) CreateEKSAResources(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec,
datacenterConfig providers.DatacenterConfig, machineConfigs []providers.MachineConfig,
) error {
if clusterSpec.Cluster.Namespace != "" {
if err := c.clusterClient.CreateNamespaceIfNotPresent(ctx, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace); err != nil {
return err
}
}
clusterSpec.Cluster.PauseReconcile()
datacenterConfig.PauseReconcile()
resourcesSpec, err := clustermarshaller.MarshalClusterSpec(clusterSpec, datacenterConfig, machineConfigs)
if err != nil {
return err
}
logger.V(4).Info("Applying eksa yaml resources to cluster")
logger.V(6).Info(string(resourcesSpec))
if err = c.applyResource(ctx, cluster, resourcesSpec); err != nil {
return err
}
return c.ApplyBundles(ctx, clusterSpec, cluster)
}
func (c *ClusterManager) ApplyBundles(ctx context.Context, clusterSpec *cluster.Spec, cluster *types.Cluster) error {
bundleObj, err := yaml.Marshal(clusterSpec.Bundles)
if err != nil {
return fmt.Errorf("outputting bundle yaml: %v", err)
}
logger.V(1).Info("Applying Bundles to cluster")
err = c.clusterClient.ApplyKubeSpecFromBytes(ctx, cluster, bundleObj)
if err != nil {
return fmt.Errorf("applying bundle spec: %v", err)
}
return nil
}
// PauseCAPIWorkloadClusters pauses all workload CAPI clusters except the management cluster.
func (c *ClusterManager) PauseCAPIWorkloadClusters(ctx context.Context, managementCluster *types.Cluster) error {
clusters, err := c.clusterClient.GetClusters(ctx, managementCluster)
if err != nil {
return err
}
for _, w := range clusters {
// skip pausing management cluster
if w.Metadata.Name == managementCluster.Name {
continue
}
if err = c.clusterClient.PauseCAPICluster(ctx, w.Metadata.Name, managementCluster.KubeconfigFile); err != nil {
return err
}
}
return nil
}
// ResumeCAPIWorkloadClusters resumes all workload CAPI clusters except the management cluster.
func (c *ClusterManager) ResumeCAPIWorkloadClusters(ctx context.Context, managementCluster *types.Cluster) error {
clusters, err := c.clusterClient.GetClusters(ctx, managementCluster)
if err != nil {
return err
}
for _, w := range clusters {
// skip resuming management cluster
if w.Metadata.Name == managementCluster.Name {
continue
}
if err = c.clusterClient.ResumeCAPICluster(ctx, w.Metadata.Name, managementCluster.KubeconfigFile); err != nil {
return err
}
}
return nil
}
func (c *ClusterManager) PauseEKSAControllerReconcile(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) error {
if clusterSpec.Cluster.IsSelfManaged() {
return c.pauseEksaReconcileForManagementAndWorkloadClusters(ctx, cluster, clusterSpec, provider)
}
return c.pauseReconcileForCluster(ctx, cluster, clusterSpec.Cluster, provider)
}
func (c *ClusterManager) pauseEksaReconcileForManagementAndWorkloadClusters(ctx context.Context, managementCluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) error {
clusters := &v1alpha1.ClusterList{}
err := c.clusterClient.ListObjects(ctx, eksaClusterResourceType, clusterSpec.Cluster.Namespace, managementCluster.KubeconfigFile, clusters)
if err != nil {
return err
}
for _, w := range clusters.Items {
if w.ManagedBy() != clusterSpec.Cluster.Name {
continue
}
if err := c.pauseReconcileForCluster(ctx, managementCluster, &w, provider); err != nil {
return err
}
}
return nil
}
func (c *ClusterManager) pauseReconcileForCluster(ctx context.Context, clusterCreds *types.Cluster, cluster *v1alpha1.Cluster, provider providers.Provider) error {
pausedAnnotation := map[string]string{cluster.PausedAnnotation(): "true"}
err := c.clusterClient.UpdateAnnotationInNamespace(ctx, provider.DatacenterResourceType(), cluster.Spec.DatacenterRef.Name, pausedAnnotation, clusterCreds, cluster.Namespace)
if err != nil {
return fmt.Errorf("updating annotation when pausing datacenterconfig reconciliation: %v", err)
}
if provider.MachineResourceType() != "" {
for _, machineConfigRef := range cluster.MachineConfigRefs() {
err = c.clusterClient.UpdateAnnotationInNamespace(ctx, provider.MachineResourceType(), machineConfigRef.Name, pausedAnnotation, clusterCreds, cluster.Namespace)
if err != nil {
return fmt.Errorf("updating annotation when pausing reconciliation for machine config %s: %v", machineConfigRef.Name, err)
}
}
}
err = c.clusterClient.UpdateAnnotationInNamespace(ctx, cluster.ResourceType(), cluster.Name, pausedAnnotation, clusterCreds, cluster.Namespace)
if err != nil {
return fmt.Errorf("updating paused annotation in cluster reconciliation: %v", err)
}
if err = c.clusterClient.UpdateAnnotationInNamespace(ctx,
cluster.ResourceType(),
cluster.Name,
map[string]string{v1alpha1.ManagedByCLIAnnotation: "true"},
clusterCreds,
cluster.Namespace,
); err != nil {
return fmt.Errorf("updating managed by cli annotation in cluster when pausing cluster reconciliation: %v", err)
}
return nil
}
func (c *ClusterManager) resumeEksaReconcileForManagementAndWorkloadClusters(ctx context.Context, managementCluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) error {
clusters := &v1alpha1.ClusterList{}
err := c.clusterClient.ListObjects(ctx, eksaClusterResourceType, clusterSpec.Cluster.Namespace, managementCluster.KubeconfigFile, clusters)
if err != nil {
return err
}
for _, w := range clusters.Items {
if w.ManagedBy() != clusterSpec.Cluster.Name {
continue
}
if err := c.resumeReconcileForCluster(ctx, managementCluster, &w, provider); err != nil {
return err
}
}
return nil
}
func (c *ClusterManager) ResumeEKSAControllerReconcile(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) error {
// clear pause annotation
clusterSpec.Cluster.ClearPauseAnnotation()
provider.DatacenterConfig(clusterSpec).ClearPauseAnnotation()
if clusterSpec.Cluster.IsSelfManaged() {
return c.resumeEksaReconcileForManagementAndWorkloadClusters(ctx, cluster, clusterSpec, provider)
}
return c.resumeReconcileForCluster(ctx, cluster, clusterSpec.Cluster, provider)
}
func (c *ClusterManager) resumeReconcileForCluster(ctx context.Context, clusterCreds *types.Cluster, cluster *v1alpha1.Cluster, provider providers.Provider) error {
pausedAnnotation := cluster.PausedAnnotation()
err := c.clusterClient.RemoveAnnotationInNamespace(ctx, provider.DatacenterResourceType(), cluster.Spec.DatacenterRef.Name, pausedAnnotation, clusterCreds, cluster.Namespace)
if err != nil {
return fmt.Errorf("removing paused annotation when resuming datacenterconfig reconciliation: %v", err)
}
if provider.MachineResourceType() != "" {
for _, machineConfigRef := range cluster.MachineConfigRefs() {
err = c.clusterClient.RemoveAnnotationInNamespace(ctx, provider.MachineResourceType(), machineConfigRef.Name, pausedAnnotation, clusterCreds, cluster.Namespace)
if err != nil {
return fmt.Errorf("removing paused annotation when resuming reconciliation for machine config %s: %v", machineConfigRef.Name, err)
}
}
}
err = c.clusterClient.RemoveAnnotationInNamespace(ctx, cluster.ResourceType(), cluster.Name, pausedAnnotation, clusterCreds, cluster.Namespace)
if err != nil {
return fmt.Errorf("removing paused annotation when resuming cluster reconciliation: %v", err)
}
if err = c.clusterClient.RemoveAnnotationInNamespace(ctx,
cluster.ResourceType(),
cluster.Name,
v1alpha1.ManagedByCLIAnnotation,
clusterCreds,
cluster.Namespace,
); err != nil {
return fmt.Errorf("removing managed by CLI annotation when resuming cluster reconciliation: %v", err)
}
return nil
}
func (c *ClusterManager) applyResource(ctx context.Context, cluster *types.Cluster, resourcesSpec []byte) error {
err := c.clusterClient.ApplyKubeSpecFromBytesForce(ctx, cluster, resourcesSpec)
if err != nil {
return fmt.Errorf("applying eks-a spec: %v", err)
}
return nil
}
func (c *ClusterManager) GetCurrentClusterSpec(ctx context.Context, clus *types.Cluster, clusterName string) (*cluster.Spec, error) {
eksaCluster, err := c.clusterClient.GetEksaCluster(ctx, clus, clusterName)
if err != nil {
return nil, fmt.Errorf("failed getting EKS-A cluster to build current cluster Spec: %v", err)
}
return c.buildSpecForCluster(ctx, clus, eksaCluster)
}
func (c *ClusterManager) buildSpecForCluster(ctx context.Context, clus *types.Cluster, eksaCluster *v1alpha1.Cluster) (*cluster.Spec, error) {
return cluster.BuildSpecForCluster(ctx, eksaCluster, c.bundlesFetcher(clus), c.eksdReleaseFetcher(clus), c.gitOpsFetcher(clus), c.fluxConfigFetcher(clus), c.oidcFetcher(clus), c.awsIamConfigFetcher(clus))
}
func (c *ClusterManager) bundlesFetcher(cluster *types.Cluster) cluster.BundlesFetch {
return func(ctx context.Context, name, namespace string) (*releasev1alpha1.Bundles, error) {
return c.clusterClient.GetBundles(ctx, cluster.KubeconfigFile, name, namespace)
}
}
func (c *ClusterManager) eksdReleaseFetcher(cluster *types.Cluster) cluster.EksdReleaseFetch {
return func(ctx context.Context, name, namespace string) (*eksdv1alpha1.Release, error) {
return c.clusterClient.GetEksdRelease(ctx, name, namespace, cluster.KubeconfigFile)
}
}
func (c *ClusterManager) gitOpsFetcher(cluster *types.Cluster) cluster.GitOpsFetch {
return func(ctx context.Context, name, namespace string) (*v1alpha1.GitOpsConfig, error) {
return c.clusterClient.GetEksaGitOpsConfig(ctx, name, cluster.KubeconfigFile, namespace)
}
}
func (c *ClusterManager) fluxConfigFetcher(cluster *types.Cluster) cluster.FluxConfigFetch {
return func(ctx context.Context, name, namespace string) (*v1alpha1.FluxConfig, error) {
return c.clusterClient.GetEksaFluxConfig(ctx, name, cluster.KubeconfigFile, namespace)
}
}
func (c *ClusterManager) oidcFetcher(cluster *types.Cluster) cluster.OIDCFetch {
return func(ctx context.Context, name, namespace string) (*v1alpha1.OIDCConfig, error) {
return c.clusterClient.GetEksaOIDCConfig(ctx, name, cluster.KubeconfigFile, namespace)
}
}
func (c *ClusterManager) awsIamConfigFetcher(cluster *types.Cluster) cluster.AWSIamConfigFetch {
return func(ctx context.Context, name, namespace string) (*v1alpha1.AWSIamConfig, error) {
return c.clusterClient.GetEksaAWSIamConfig(ctx, name, cluster.KubeconfigFile, namespace)
}
}
func (c *ClusterManager) DeletePackageResources(ctx context.Context, managementCluster *types.Cluster, clusterName string) error {
return c.clusterClient.DeletePackageResources(ctx, managementCluster, clusterName)
}
| 1,348 |
eks-anywhere | aws | Go | package clustermanager_test
import (
"context"
"errors"
"fmt"
"math"
"testing"
"time"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clustermanager"
"github.com/aws/eks-anywhere/pkg/clustermanager/internal"
mocksmanager "github.com/aws/eks-anywhere/pkg/clustermanager/mocks"
"github.com/aws/eks-anywhere/pkg/constants"
mocksdiagnostics "github.com/aws/eks-anywhere/pkg/diagnostics/interfaces/mocks"
"github.com/aws/eks-anywhere/pkg/executables"
"github.com/aws/eks-anywhere/pkg/features"
mockswriter "github.com/aws/eks-anywhere/pkg/filewriter/mocks"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/providers"
mocksprovider "github.com/aws/eks-anywhere/pkg/providers/mocks"
"github.com/aws/eks-anywhere/pkg/retrier"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
)
var (
eksaClusterResourceType = fmt.Sprintf("clusters.%s", v1alpha1.GroupVersion.Group)
eksaVSphereDatacenterResourceType = fmt.Sprintf("vspheredatacenterconfigs.%s", v1alpha1.GroupVersion.Group)
eksaVSphereMachineResourceType = fmt.Sprintf("vspheremachineconfigs.%s", v1alpha1.GroupVersion.Group)
expectedPauseAnnotation = map[string]string{"anywhere.eks.amazonaws.com/paused": "true"}
maxTime = time.Duration(math.MaxInt64)
managementStatePath = fmt.Sprintf("cluster-state-backup-%s", time.Now().Format("2006-01-02T15_04_05"))
)
func TestClusterManagerInstallNetworkingSuccess(t *testing.T) {
ctx := context.Background()
cluster := &types.Cluster{}
clusterSpec := test.NewClusterSpec()
c, m := newClusterManager(t)
m.provider.EXPECT().GetDeployments()
m.networking.EXPECT().Install(ctx, cluster, clusterSpec, []string{})
if err := c.InstallNetworking(ctx, cluster, clusterSpec, m.provider); err != nil {
t.Errorf("ClusterManager.InstallNetworking() error = %v, wantErr nil", err)
}
}
func TestClusterManagerInstallNetworkingNetworkingError(t *testing.T) {
ctx := context.Background()
cluster := &types.Cluster{}
clusterSpec := test.NewClusterSpec()
c, m := newClusterManager(t, clustermanager.WithRetrier(retrier.NewWithMaxRetries(1, 0)))
m.provider.EXPECT().GetDeployments()
m.networking.EXPECT().Install(ctx, cluster, clusterSpec, []string{}).Return(errors.New("error in networking"))
if err := c.InstallNetworking(ctx, cluster, clusterSpec, m.provider); err == nil {
t.Errorf("ClusterManager.InstallNetworking() error = nil, wantErr not nil")
}
}
func getKcpAndMdsForNodeCount(count int32) (*controlplanev1.KubeadmControlPlane, []clusterv1.MachineDeployment) {
kcp := &controlplanev1.KubeadmControlPlane{
Spec: controlplanev1.KubeadmControlPlaneSpec{
Replicas: ptr.Int32(count),
},
}
md := []clusterv1.MachineDeployment{
{
Spec: clusterv1.MachineDeploymentSpec{
Replicas: ptr.Int32(count),
},
},
}
return kcp, md
}
func TestClusterManagerCAPIWaitForDeploymentStackedEtcd(t *testing.T) {
ctx := context.Background()
clusterObj := &types.Cluster{}
c, m := newClusterManager(t)
clusterSpecStackedEtcd := test.NewClusterSpec()
m.client.EXPECT().InitInfrastructure(ctx, clusterSpecStackedEtcd, clusterObj, m.provider)
for namespace, deployments := range internal.CAPIDeployments {
for _, deployment := range deployments {
m.client.EXPECT().WaitForDeployment(ctx, clusterObj, "30m0s", "Available", deployment, namespace)
}
}
providerDeployments := map[string][]string{}
m.provider.EXPECT().GetDeployments().Return(providerDeployments)
for namespace, deployments := range providerDeployments {
for _, deployment := range deployments {
m.client.EXPECT().WaitForDeployment(ctx, clusterObj, "30m0s", "Available", deployment, namespace)
}
}
if err := c.InstallCAPI(ctx, clusterSpecStackedEtcd, clusterObj, m.provider); err != nil {
t.Errorf("ClusterManager.InstallCAPI() error = %v, wantErr nil", err)
}
}
func TestClusterManagerCAPIWaitForDeploymentExternalEtcd(t *testing.T) {
ctx := context.Background()
clusterObj := &types.Cluster{}
c, m := newClusterManager(t)
clusterSpecExternalEtcd := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{Count: 1}
})
m.client.EXPECT().InitInfrastructure(ctx, clusterSpecExternalEtcd, clusterObj, m.provider)
for namespace, deployments := range internal.CAPIDeployments {
for _, deployment := range deployments {
m.client.EXPECT().WaitForDeployment(ctx, clusterObj, "30m0s", "Available", deployment, namespace)
}
}
for namespace, deployments := range internal.ExternalEtcdDeployments {
for _, deployment := range deployments {
m.client.EXPECT().WaitForDeployment(ctx, clusterObj, "30m0s", "Available", deployment, namespace)
}
}
providerDeployments := map[string][]string{}
m.provider.EXPECT().GetDeployments().Return(providerDeployments)
for namespace, deployments := range providerDeployments {
for _, deployment := range deployments {
m.client.EXPECT().WaitForDeployment(ctx, clusterObj, "30m0s", "Available", deployment, namespace)
}
}
if err := c.InstallCAPI(ctx, clusterSpecExternalEtcd, clusterObj, m.provider); err != nil {
t.Errorf("ClusterManager.InstallCAPI() error = %v, wantErr nil", err)
}
}
func TestClusterManagerSaveLogsSuccess(t *testing.T) {
ctx := context.Background()
clusterName := "cluster-name"
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = clusterName
s.Cluster.Spec.ControlPlaneConfiguration.Count = 3
s.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count = ptr.Int(3)
})
bootstrapCluster := &types.Cluster{
Name: "bootstrap",
KubeconfigFile: "bootstrap.kubeconfig",
}
workloadCluster := &types.Cluster{
Name: "workload",
KubeconfigFile: "workload.kubeconfig",
}
c, m := newClusterManager(t)
b := m.diagnosticsBundle
m.diagnosticsFactory.EXPECT().DiagnosticBundleManagementCluster(clusterSpec, bootstrapCluster.KubeconfigFile).Return(b, nil)
b.EXPECT().CollectAndAnalyze(ctx, gomock.AssignableToTypeOf(&time.Time{}))
m.diagnosticsFactory.EXPECT().DiagnosticBundleWorkloadCluster(clusterSpec, m.provider, workloadCluster.KubeconfigFile).Return(b, nil)
b.EXPECT().CollectAndAnalyze(ctx, gomock.AssignableToTypeOf(&time.Time{}))
if err := c.SaveLogsManagementCluster(ctx, clusterSpec, bootstrapCluster); err != nil {
t.Errorf("ClusterManager.SaveLogsManagementCluster() error = %v, wantErr nil", err)
}
if err := c.SaveLogsWorkloadCluster(ctx, m.provider, clusterSpec, workloadCluster); err != nil {
t.Errorf("ClusterManager.SaveLogsWorkloadCluster() error = %v, wantErr nil", err)
}
}
func TestClusterManagerPauseCAPIWorkloadClusters(t *testing.T) {
ctx := context.Background()
mgmtClusterName := "cluster-name"
mgmtCluster := &types.Cluster{
Name: mgmtClusterName,
KubeconfigFile: "mgmt-kubeconfig",
}
capiClusterName := "capi-cluster"
clusters := []types.CAPICluster{{Metadata: types.Metadata{Name: capiClusterName}}}
c, m := newClusterManager(t)
m.client.EXPECT().GetClusters(ctx, mgmtCluster).Return(clusters, nil)
m.client.EXPECT().PauseCAPICluster(ctx, capiClusterName, mgmtCluster.KubeconfigFile).Return(nil)
if err := c.PauseCAPIWorkloadClusters(ctx, mgmtCluster); err != nil {
t.Errorf("ClusterManager.PauseCAPIWorkloadClusters() error = %v", err)
}
}
func TestClusterManagerPauseCAPIWorkloadClustersErrorGetClusters(t *testing.T) {
ctx := context.Background()
mgmtClusterName := "cluster-name"
mgmtCluster := &types.Cluster{
Name: mgmtClusterName,
KubeconfigFile: "mgmt-kubeconfig",
}
c, m := newClusterManager(t)
m.client.EXPECT().GetClusters(ctx, mgmtCluster).Return(nil, errors.New("Error: failed to get clusters"))
if err := c.PauseCAPIWorkloadClusters(ctx, mgmtCluster); err == nil {
t.Error("ClusterManager.PauseCAPIWorkloadClusters() error = nil, wantErr not nil")
}
}
func TestClusterManagerPauseCAPIWorkloadClustersErrorPause(t *testing.T) {
ctx := context.Background()
mgmtClusterName := "cluster-name"
mgmtCluster := &types.Cluster{
Name: mgmtClusterName,
KubeconfigFile: "mgmt-kubeconfig",
}
capiClusterName := "capi-cluster"
clusters := []types.CAPICluster{{Metadata: types.Metadata{Name: capiClusterName}}}
c, m := newClusterManager(t, clustermanager.WithRetrier(retrier.NewWithMaxRetries(1, 0)))
m.client.EXPECT().GetClusters(ctx, mgmtCluster).Return(clusters, nil)
m.client.EXPECT().PauseCAPICluster(ctx, capiClusterName, mgmtCluster.KubeconfigFile).Return(errors.New("Error pausing cluster"))
if err := c.PauseCAPIWorkloadClusters(ctx, mgmtCluster); err == nil {
t.Error("ClusterManager.PauseCAPIWorkloadClusters() error = nil, wantErr not nil")
}
}
func TestClusterManagerPauseCAPIWorkloadClustersSkipManagement(t *testing.T) {
ctx := context.Background()
mgmtClusterName := "cluster-name"
mgmtCluster := &types.Cluster{
Name: mgmtClusterName,
KubeconfigFile: "mgmt-kubeconfig",
}
clusters := []types.CAPICluster{{Metadata: types.Metadata{Name: mgmtClusterName}}}
c, m := newClusterManager(t)
m.client.EXPECT().GetClusters(ctx, mgmtCluster).Return(clusters, nil)
if err := c.PauseCAPIWorkloadClusters(ctx, mgmtCluster); err != nil {
t.Errorf("ClusterManager.PauseCAPIWorkloadClusters() error = %v", err)
}
}
func TestClusterManagerResumeCAPIWorkloadClustersErrorGetClusters(t *testing.T) {
ctx := context.Background()
mgmtClusterName := "cluster-name"
mgmtCluster := &types.Cluster{
Name: mgmtClusterName,
KubeconfigFile: "mgmt-kubeconfig",
}
c, m := newClusterManager(t)
m.client.EXPECT().GetClusters(ctx, mgmtCluster).Return(nil, errors.New("Error: failed to get clusters"))
if err := c.ResumeCAPIWorkloadClusters(ctx, mgmtCluster); err == nil {
t.Error("ClusterManager.ResumeCAPIWorkloadClusters() error = nil, wantErr not nil")
}
}
func TestClusterManagerResumeCAPIWorkloadClustersErrorResume(t *testing.T) {
ctx := context.Background()
mgmtClusterName := "cluster-name"
mgmtCluster := &types.Cluster{
Name: mgmtClusterName,
KubeconfigFile: "mgmt-kubeconfig",
}
capiClusterName := "capi-cluster"
clusters := []types.CAPICluster{{Metadata: types.Metadata{Name: capiClusterName}}}
c, m := newClusterManager(t, clustermanager.WithRetrier(retrier.NewWithMaxRetries(1, 0)))
m.client.EXPECT().GetClusters(ctx, mgmtCluster).Return(clusters, nil)
m.client.EXPECT().ResumeCAPICluster(ctx, capiClusterName, mgmtCluster.KubeconfigFile).Return(errors.New("Error pausing cluster"))
if err := c.ResumeCAPIWorkloadClusters(ctx, mgmtCluster); err == nil {
t.Error("ClusterManager.ResumeCAPIWorkloadClusters() error = nil, wantErr not nil")
}
}
func TestClusterManagerResumeCAPIWorkloadClusters(t *testing.T) {
ctx := context.Background()
mgmtClusterName := "cluster-name"
mgmtCluster := &types.Cluster{
Name: mgmtClusterName,
KubeconfigFile: "mgmt-kubeconfig",
}
capiClusterName := "capi-cluster"
clusters := []types.CAPICluster{{Metadata: types.Metadata{Name: capiClusterName}}}
c, m := newClusterManager(t)
m.client.EXPECT().GetClusters(ctx, mgmtCluster).Return(clusters, nil)
m.client.EXPECT().ResumeCAPICluster(ctx, capiClusterName, mgmtCluster.KubeconfigFile).Return(nil)
if err := c.ResumeCAPIWorkloadClusters(ctx, mgmtCluster); err != nil {
t.Errorf("ClusterManager.ResumeCAPIWorkloadClusters() error = %v", err)
}
}
func TestClusterManagerResumeCAPIWorkloadClustersSkipManagement(t *testing.T) {
ctx := context.Background()
mgmtClusterName := "cluster-name"
mgmtCluster := &types.Cluster{
Name: mgmtClusterName,
KubeconfigFile: "mgmt-kubeconfig",
}
clusters := []types.CAPICluster{{Metadata: types.Metadata{Name: mgmtClusterName}}}
c, m := newClusterManager(t)
m.client.EXPECT().GetClusters(ctx, mgmtCluster).Return(clusters, nil)
if err := c.ResumeCAPIWorkloadClusters(ctx, mgmtCluster); err != nil {
t.Errorf("ClusterManager.ResumeCAPIWorkloadClusters() error = %v", err)
}
}
func TestClusterManagerCreateWorkloadClusterSuccess(t *testing.T) {
ctx := context.Background()
clusterName := "cluster-name"
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = clusterName
s.Cluster.Spec.ControlPlaneConfiguration.Count = 3
s.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count = ptr.Int(3)
})
mgmtCluster := &types.Cluster{
Name: clusterName,
KubeconfigFile: "mgmt-kubeconfig",
}
c, m := newClusterManager(t)
m.provider.EXPECT().GenerateCAPISpecForCreate(ctx, mgmtCluster, clusterSpec)
m.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(ctx, mgmtCluster, test.OfType("[]uint8"), constants.EksaSystemNamespace)
m.client.EXPECT().WaitForControlPlaneAvailable(ctx, mgmtCluster, "1h0m0s", clusterName)
kubeconfig := []byte("content")
m.client.EXPECT().GetWorkloadKubeconfig(ctx, clusterName, mgmtCluster).Return(kubeconfig, nil)
m.provider.EXPECT().UpdateKubeConfig(&kubeconfig, clusterName)
m.writer.EXPECT().Write(clusterName+"-eks-a-cluster.kubeconfig", gomock.Any(), gomock.Not(gomock.Nil()))
m.writer.EXPECT().Write(clusterName+"-eks-a-cluster.yaml", gomock.Any(), gomock.Not(gomock.Nil()))
if _, err := c.CreateWorkloadCluster(ctx, mgmtCluster, clusterSpec, m.provider); err != nil {
t.Errorf("ClusterManager.CreateWorkloadCluster() error = %v, wantErr nil", err)
}
}
func TestClusterManagerCreateWorkloadClusterErrorGetKubeconfig(t *testing.T) {
tt := newTest(t)
tt.clusterSpec.Cluster.Name = tt.clusterName
gomock.InOrder(
tt.mocks.provider.EXPECT().GenerateCAPISpecForCreate(tt.ctx, tt.cluster, tt.clusterSpec),
tt.mocks.writer.EXPECT().Write(tt.clusterName+"-eks-a-cluster.yaml", gomock.Any(), gomock.Not(gomock.Nil())),
tt.mocks.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(tt.ctx, tt.cluster, test.OfType("[]uint8"), constants.EksaSystemNamespace),
tt.mocks.client.EXPECT().WaitForControlPlaneAvailable(tt.ctx, tt.cluster, "1h0m0s", tt.clusterName),
tt.mocks.client.EXPECT().GetWorkloadKubeconfig(tt.ctx, tt.clusterName, tt.cluster).Return(nil, errors.New("get kubeconfig error")),
)
_, err := tt.clusterManager.CreateWorkloadCluster(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)
tt.Expect(err).To(MatchError(ContainSubstring("get kubeconfig error")))
}
func TestClusterManagerCreateWorkloadClusterTimeoutOverrideSuccess(t *testing.T) {
ctx := context.Background()
clusterName := "cluster-name"
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = clusterName
s.Cluster.Spec.ControlPlaneConfiguration.Count = 3
s.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count = ptr.Int(3)
})
mgmtCluster := &types.Cluster{
Name: clusterName,
KubeconfigFile: "mgmt-kubeconfig",
}
c, m := newClusterManager(t, clustermanager.WithControlPlaneWaitTimeout(20*time.Minute))
m.provider.EXPECT().GenerateCAPISpecForCreate(ctx, mgmtCluster, clusterSpec)
m.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(ctx, mgmtCluster, test.OfType("[]uint8"), constants.EksaSystemNamespace)
m.client.EXPECT().WaitForControlPlaneAvailable(ctx, mgmtCluster, "20m0s", clusterName)
kubeconfig := []byte("content")
m.client.EXPECT().GetWorkloadKubeconfig(ctx, clusterName, mgmtCluster).Return(kubeconfig, nil)
m.provider.EXPECT().UpdateKubeConfig(&kubeconfig, clusterName)
m.writer.EXPECT().Write(clusterName+"-eks-a-cluster.kubeconfig", gomock.Any(), gomock.Not(gomock.Nil()))
m.writer.EXPECT().Write(clusterName+"-eks-a-cluster.yaml", gomock.Any(), gomock.Not(gomock.Nil()))
if _, err := c.CreateWorkloadCluster(ctx, mgmtCluster, clusterSpec, m.provider); err != nil {
t.Errorf("ClusterManager.CreateWorkloadCluster() error = %v, wantErr nil", err)
}
}
func TestClusterManagerRunPostCreateWorkloadClusterSuccess(t *testing.T) {
ctx := context.Background()
clusterName := "cluster-name"
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = clusterName
s.Cluster.Spec.ControlPlaneConfiguration.Count = 3
s.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count = ptr.Int(3)
})
mgmtCluster := &types.Cluster{
Name: clusterName,
KubeconfigFile: "mgmt-kubeconfig",
}
workloadCluster := &types.Cluster{
Name: clusterName,
KubeconfigFile: "workload-kubeconfig",
}
kcp, mds := getKcpAndMdsForNodeCount(0)
c, m := newClusterManager(t)
m.client.EXPECT().GetKubeadmControlPlane(ctx,
mgmtCluster,
mgmtCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mgmtCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
m.client.EXPECT().GetMachineDeploymentsForCluster(ctx,
mgmtCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mgmtCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(mds, nil)
m.client.EXPECT().GetMachines(ctx, mgmtCluster, mgmtCluster.Name).AnyTimes().Return([]types.Machine{}, nil)
if err := c.RunPostCreateWorkloadCluster(ctx, mgmtCluster, workloadCluster, clusterSpec); err != nil {
t.Errorf("ClusterManager.RunPostCreateWorkloadCluster() error = %v, wantErr nil", err)
}
}
func TestClusterManagerCreateWorkloadClusterWithExternalEtcdSuccess(t *testing.T) {
ctx := context.Background()
clusterName := "cluster-name"
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = clusterName
s.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{Count: 3}
s.Cluster.Spec.ControlPlaneConfiguration.Count = 2
s.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count = ptr.Int(3)
})
mgmtCluster := &types.Cluster{
Name: clusterName,
KubeconfigFile: "mgmt-kubeconfig",
}
c, m := newClusterManager(t)
m.provider.EXPECT().GenerateCAPISpecForCreate(ctx, mgmtCluster, clusterSpec)
m.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(ctx, mgmtCluster, test.OfType("[]uint8"), constants.EksaSystemNamespace)
m.client.EXPECT().WaitForManagedExternalEtcdReady(ctx, mgmtCluster, "1h0m0s", clusterName)
m.client.EXPECT().WaitForControlPlaneAvailable(ctx, mgmtCluster, "1h0m0s", clusterName)
kubeconfig := []byte("content")
m.client.EXPECT().GetWorkloadKubeconfig(ctx, clusterName, mgmtCluster).Return(kubeconfig, nil)
m.provider.EXPECT().UpdateKubeConfig(&kubeconfig, clusterName)
m.writer.EXPECT().Write(clusterName+"-eks-a-cluster.kubeconfig", gomock.Any(), gomock.Not(gomock.Nil()))
m.writer.EXPECT().Write(clusterName+"-eks-a-cluster.yaml", gomock.Any(), gomock.Not(gomock.Nil()))
if _, err := c.CreateWorkloadCluster(ctx, mgmtCluster, clusterSpec, m.provider); err != nil {
t.Errorf("ClusterManager.CreateWorkloadCluster() error = %v, wantErr nil", err)
}
}
func TestClusterManagerCreateWorkloadClusterWithExternalEtcdTimeoutOverrideSuccess(t *testing.T) {
ctx := context.Background()
clusterName := "cluster-name"
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = clusterName
s.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{Count: 3}
s.Cluster.Spec.ControlPlaneConfiguration.Count = 2
s.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count = ptr.Int(3)
})
mgmtCluster := &types.Cluster{
Name: clusterName,
KubeconfigFile: "mgmt-kubeconfig",
}
c, m := newClusterManager(t, clustermanager.WithExternalEtcdWaitTimeout(30*time.Minute))
m.provider.EXPECT().GenerateCAPISpecForCreate(ctx, mgmtCluster, clusterSpec)
m.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(ctx, mgmtCluster, test.OfType("[]uint8"), constants.EksaSystemNamespace)
m.client.EXPECT().WaitForManagedExternalEtcdReady(ctx, mgmtCluster, "30m0s", clusterName)
m.client.EXPECT().WaitForControlPlaneAvailable(ctx, mgmtCluster, "1h0m0s", clusterName)
kubeconfig := []byte("content")
m.client.EXPECT().GetWorkloadKubeconfig(ctx, clusterName, mgmtCluster).Return(kubeconfig, nil)
m.provider.EXPECT().UpdateKubeConfig(&kubeconfig, clusterName)
m.writer.EXPECT().Write(clusterName+"-eks-a-cluster.kubeconfig", gomock.Any(), gomock.Not(gomock.Nil()))
m.writer.EXPECT().Write(clusterName+"-eks-a-cluster.yaml", gomock.Any(), gomock.Not(gomock.Nil()))
if _, err := c.CreateWorkloadCluster(ctx, mgmtCluster, clusterSpec, m.provider); err != nil {
t.Errorf("ClusterManager.CreateWorkloadCluster() error = %v, wantErr nil", err)
}
}
func TestClusterManagerRunPostCreateWorkloadClusterWaitForMachinesTimeout(t *testing.T) {
ctx := context.Background()
clusterName := "cluster-name"
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = clusterName
s.Cluster.Spec.ControlPlaneConfiguration.Count = 3
s.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count = ptr.Int(3)
})
mgmtCluster := &types.Cluster{
Name: clusterName,
KubeconfigFile: "mgmt-kubeconfig",
}
workloadCluster := &types.Cluster{
Name: clusterName,
KubeconfigFile: "workload-kubeconfig",
}
c, m := newClusterManager(t, clustermanager.WithMachineBackoff(1*time.Nanosecond), clustermanager.WithMachineMaxWait(50*time.Microsecond), clustermanager.WithMachineMinWait(100*time.Microsecond))
kcp, mds := getKcpAndMdsForNodeCount(1)
m.client.EXPECT().GetKubeadmControlPlane(ctx,
mgmtCluster,
mgmtCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mgmtCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
m.client.EXPECT().GetMachineDeploymentsForCluster(ctx,
mgmtCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mgmtCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(mds, nil)
// Fail once
m.client.EXPECT().GetMachines(ctx, mgmtCluster, mgmtCluster.Name).Times(1).Return(nil, errors.New("error get machines"))
// Return a machine with no nodeRef the rest of the retries
m.client.EXPECT().GetMachines(ctx, mgmtCluster, mgmtCluster.Name).MinTimes(1).Return([]types.Machine{{Metadata: types.MachineMetadata{
Labels: map[string]string{clusterv1.MachineControlPlaneNameLabel: ""},
}}}, nil)
if err := c.RunPostCreateWorkloadCluster(ctx, mgmtCluster, workloadCluster, clusterSpec); err == nil {
t.Error("ClusterManager.RunPostCreateWorkloadCluster() error = nil, wantErr not nil", err)
}
}
func TestClusterManagerRunPostCreateWorkloadClusterWaitForMachinesSuccessAfterRetries(t *testing.T) {
retries := 10
ctx := context.Background()
clusterName := "cluster-name"
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = clusterName
s.Cluster.Spec.ControlPlaneConfiguration.Count = 3
s.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count = ptr.Int(3)
})
mgmtCluster := &types.Cluster{
Name: clusterName,
KubeconfigFile: "mgmt-kubeconfig",
}
workloadCluster := &types.Cluster{
Name: clusterName,
KubeconfigFile: "workload-kubeconfig",
}
c, m := newClusterManager(t, clustermanager.WithMachineBackoff(1*time.Nanosecond), clustermanager.WithMachineMaxWait(1*time.Minute), clustermanager.WithMachineMinWait(2*time.Minute))
kcp, mds := getKcpAndMdsForNodeCount(1)
m.client.EXPECT().GetKubeadmControlPlane(ctx,
mgmtCluster,
mgmtCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mgmtCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
m.client.EXPECT().GetMachineDeploymentsForCluster(ctx,
mgmtCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mgmtCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(mds, nil)
// Fail a bunch of times
m.client.EXPECT().GetMachines(ctx, mgmtCluster, mgmtCluster.Name).Times(retries-5).Return(nil, errors.New("error get machines"))
// Return a machine with no nodeRef times
m.client.EXPECT().GetMachines(ctx, mgmtCluster, mgmtCluster.Name).Times(3).Return([]types.Machine{{Metadata: types.MachineMetadata{
Labels: map[string]string{clusterv1.MachineControlPlaneNameLabel: ""},
}}}, nil)
//// Return a machine with nodeRef + NodeHealthy condition and another with it
status := types.MachineStatus{
NodeRef: &types.ResourceRef{},
Conditions: types.Conditions{
{
Type: "NodeHealthy",
Status: "True",
},
},
}
machines := []types.Machine{
{Metadata: types.MachineMetadata{Labels: map[string]string{clusterv1.MachineControlPlaneNameLabel: ""}}},
{Metadata: types.MachineMetadata{Labels: map[string]string{clusterv1.MachineControlPlaneNameLabel: ""}}, Status: status},
}
m.client.EXPECT().GetMachines(ctx, mgmtCluster, mgmtCluster.Name).Times(1).Return(machines, nil)
// Finally return two machines with node ref
machines = []types.Machine{
{Metadata: types.MachineMetadata{Labels: map[string]string{clusterv1.MachineControlPlaneNameLabel: ""}}, Status: status},
{Metadata: types.MachineMetadata{Labels: map[string]string{clusterv1.MachineControlPlaneNameLabel: ""}}, Status: status},
}
m.client.EXPECT().GetMachines(ctx, mgmtCluster, mgmtCluster.Name).Times(1).Return(machines, nil)
if err := c.RunPostCreateWorkloadCluster(ctx, mgmtCluster, workloadCluster, clusterSpec); err != nil {
t.Errorf("ClusterManager.RunPostCreateWorkloadCluster() error = %v, wantErr nil", err)
}
}
func TestClusterManagerUpgradeSelfManagedClusterSuccess(t *testing.T) {
clusterName := "cluster-name"
mCluster := &types.Cluster{
Name: clusterName,
}
wCluster := &types.Cluster{
Name: clusterName,
}
kcp, mds := getKcpAndMdsForNodeCount(0)
tt := newSpecChangedTest(t)
tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, tt.cluster, tt.clusterSpec.Cluster.Name).Return(tt.oldClusterConfig, nil)
tt.mocks.client.EXPECT().GetBundles(tt.ctx, tt.cluster.KubeconfigFile, tt.cluster.Name, "").Return(test.Bundles(t), nil)
tt.mocks.client.EXPECT().GetEksdRelease(tt.ctx, gomock.Any(), constants.EksaSystemNamespace, gomock.Any()).Return(test.EksdRelease(), nil)
tt.mocks.provider.EXPECT().GenerateCAPISpecForUpgrade(tt.ctx, mCluster, wCluster, tt.clusterSpec, tt.clusterSpec.DeepCopy())
tt.mocks.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(tt.ctx, mCluster, test.OfType("[]uint8"), constants.EksaSystemNamespace).Times(2)
tt.mocks.provider.EXPECT().RunPostControlPlaneUpgrade(tt.ctx, tt.clusterSpec, tt.clusterSpec, wCluster, mCluster)
tt.mocks.client.EXPECT().WaitForControlPlaneReady(tt.ctx, mCluster, "1h0m0s", clusterName).MaxTimes(2)
tt.mocks.client.EXPECT().WaitForControlPlaneNotReady(tt.ctx, mCluster, "1m", clusterName)
tt.mocks.client.EXPECT().GetKubeadmControlPlane(tt.ctx,
mCluster,
mCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
tt.mocks.client.EXPECT().GetMachineDeploymentsForCluster(tt.ctx,
mCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(mds, nil)
tt.mocks.client.EXPECT().GetMachines(tt.ctx, mCluster, mCluster.Name).Return([]types.Machine{}, nil).Times(2)
tt.mocks.client.EXPECT().GetMachineDeployment(tt.ctx, "cluster-name-md-0", gomock.AssignableToTypeOf(executables.WithKubeconfig(mCluster.KubeconfigFile)), gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace))).Return(&mds[0], nil)
tt.mocks.client.EXPECT().DeleteOldWorkerNodeGroup(tt.ctx, &mds[0], mCluster.KubeconfigFile)
tt.mocks.client.EXPECT().WaitForDeployment(tt.ctx, wCluster, "30m0s", "Available", gomock.Any(), gomock.Any()).MaxTimes(10)
tt.mocks.client.EXPECT().ValidateControlPlaneNodes(tt.ctx, mCluster, wCluster.Name).Return(nil)
tt.mocks.client.EXPECT().CountMachineDeploymentReplicasReady(tt.ctx, wCluster.Name, mCluster.KubeconfigFile).Return(0, 0, nil)
tt.mocks.provider.EXPECT().GetDeployments()
tt.mocks.writer.EXPECT().Write(clusterName+"-eks-a-cluster.yaml", gomock.Any(), gomock.Not(gomock.Nil()))
tt.mocks.client.EXPECT().GetEksaOIDCConfig(tt.ctx, tt.clusterSpec.Cluster.Spec.IdentityProviderRefs[0].Name, tt.cluster.KubeconfigFile, tt.clusterSpec.Cluster.Namespace).Return(nil, nil)
tt.mocks.networking.EXPECT().RunPostControlPlaneUpgradeSetup(tt.ctx, tt.cluster).Return(nil)
if err := tt.clusterManager.UpgradeCluster(tt.ctx, mCluster, wCluster, tt.clusterSpec, tt.mocks.provider); err != nil {
t.Errorf("ClusterManager.UpgradeCluster() error = %v, wantErr nil", err)
}
}
func TestClusterManagerUpgradeSelfManagedClusterWithUnstackedEtcdSuccess(t *testing.T) {
clusterName := "cluster-name"
mCluster := &types.Cluster{
Name: clusterName,
}
wCluster := &types.Cluster{
Name: clusterName,
}
tt := newSpecChangedTest(t)
tt.clusterSpec.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{
Count: 3,
}
tt.oldClusterConfig.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{
Count: 3,
}
kcp, mds := getKcpAndMdsForNodeCount(0)
tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, tt.cluster, tt.clusterSpec.Cluster.Name).Return(tt.oldClusterConfig, nil)
tt.mocks.client.EXPECT().GetBundles(tt.ctx, tt.cluster.KubeconfigFile, tt.cluster.Name, "").Return(test.Bundles(t), nil)
tt.mocks.client.EXPECT().GetEksdRelease(tt.ctx, gomock.Any(), constants.EksaSystemNamespace, gomock.Any()).Return(test.EksdRelease(), nil)
tt.mocks.provider.EXPECT().GenerateCAPISpecForUpgrade(tt.ctx, mCluster, wCluster, tt.clusterSpec, tt.clusterSpec.DeepCopy())
tt.mocks.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(tt.ctx, mCluster, test.OfType("[]uint8"), constants.EksaSystemNamespace).Times(2)
tt.mocks.provider.EXPECT().RunPostControlPlaneUpgrade(tt.ctx, tt.clusterSpec, tt.clusterSpec, wCluster, mCluster)
tt.mocks.client.EXPECT().WaitForManagedExternalEtcdReady(tt.ctx, mCluster, "1h0m0s", clusterName)
tt.mocks.client.EXPECT().WaitForManagedExternalEtcdNotReady(tt.ctx, mCluster, "1m", clusterName)
tt.mocks.client.EXPECT().WaitForControlPlaneReady(tt.ctx, mCluster, "1h0m0s", clusterName).MaxTimes(2)
tt.mocks.client.EXPECT().WaitForControlPlaneNotReady(tt.ctx, mCluster, "1m", clusterName)
tt.mocks.client.EXPECT().GetKubeadmControlPlane(tt.ctx,
mCluster,
mCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
tt.mocks.client.EXPECT().GetMachineDeploymentsForCluster(tt.ctx,
mCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(mds, nil)
tt.mocks.client.EXPECT().GetMachines(tt.ctx, mCluster, mCluster.Name).Return([]types.Machine{}, nil).Times(2)
tt.mocks.client.EXPECT().GetMachineDeployment(tt.ctx, "cluster-name-md-0", gomock.AssignableToTypeOf(executables.WithKubeconfig(mCluster.KubeconfigFile)), gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace))).Return(&mds[0], nil)
tt.mocks.client.EXPECT().DeleteOldWorkerNodeGroup(tt.ctx, &mds[0], mCluster.KubeconfigFile)
tt.mocks.client.EXPECT().WaitForDeployment(tt.ctx, wCluster, "30m0s", "Available", gomock.Any(), gomock.Any()).MaxTimes(8)
tt.mocks.client.EXPECT().ValidateControlPlaneNodes(tt.ctx, mCluster, wCluster.Name).Return(nil)
tt.mocks.client.EXPECT().CountMachineDeploymentReplicasReady(tt.ctx, wCluster.Name, mCluster.KubeconfigFile).Return(0, 0, nil)
tt.mocks.provider.EXPECT().GetDeployments()
tt.mocks.writer.EXPECT().Write(clusterName+"-eks-a-cluster.yaml", gomock.Any(), gomock.Not(gomock.Nil()))
tt.mocks.client.EXPECT().GetEksaOIDCConfig(tt.ctx, tt.clusterSpec.Cluster.Spec.IdentityProviderRefs[0].Name, tt.cluster.KubeconfigFile, tt.clusterSpec.Cluster.Namespace).Return(nil, nil)
tt.mocks.networking.EXPECT().RunPostControlPlaneUpgradeSetup(tt.ctx, tt.cluster).Return(nil)
if err := tt.clusterManager.UpgradeCluster(tt.ctx, mCluster, wCluster, tt.clusterSpec, tt.mocks.provider); err != nil {
t.Errorf("ClusterManager.UpgradeCluster() error = %v, wantErr nil", err)
}
}
func TestClusterManagerUpgradeSelfManagedClusterWithUnstackedEtcdTimeoutNotReadySuccess(t *testing.T) {
clusterName := "cluster-name"
mCluster := &types.Cluster{
Name: clusterName,
}
wCluster := &types.Cluster{
Name: clusterName,
}
tt := newSpecChangedTest(t)
tt.clusterSpec.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{
Count: 3,
}
tt.oldClusterConfig.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{
Count: 3,
}
kcp, mds := getKcpAndMdsForNodeCount(0)
tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, tt.cluster, tt.clusterSpec.Cluster.Name).Return(tt.oldClusterConfig, nil)
tt.mocks.client.EXPECT().GetBundles(tt.ctx, tt.cluster.KubeconfigFile, tt.cluster.Name, "").Return(test.Bundles(t), nil)
tt.mocks.client.EXPECT().GetEksdRelease(tt.ctx, gomock.Any(), constants.EksaSystemNamespace, gomock.Any()).Return(test.EksdRelease(), nil)
tt.mocks.provider.EXPECT().GenerateCAPISpecForUpgrade(tt.ctx, mCluster, wCluster, tt.clusterSpec, tt.clusterSpec.DeepCopy())
tt.mocks.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(tt.ctx, mCluster, test.OfType("[]uint8"), constants.EksaSystemNamespace).Times(2)
tt.mocks.provider.EXPECT().RunPostControlPlaneUpgrade(tt.ctx, tt.clusterSpec, tt.clusterSpec, wCluster, mCluster)
tt.mocks.client.EXPECT().WaitForManagedExternalEtcdReady(tt.ctx, mCluster, "1h0m0s", clusterName)
tt.mocks.client.EXPECT().WaitForManagedExternalEtcdNotReady(tt.ctx, mCluster, "1m", clusterName).Return(errors.New("timed out waiting for the condition on clusters"))
tt.mocks.client.EXPECT().WaitForControlPlaneReady(tt.ctx, mCluster, "1h0m0s", clusterName).MaxTimes(2)
tt.mocks.client.EXPECT().WaitForControlPlaneNotReady(tt.ctx, mCluster, "1m", clusterName)
tt.mocks.client.EXPECT().GetKubeadmControlPlane(tt.ctx,
mCluster,
mCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
tt.mocks.client.EXPECT().GetMachineDeploymentsForCluster(tt.ctx,
mCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(mds, nil)
tt.mocks.client.EXPECT().GetMachines(tt.ctx, mCluster, mCluster.Name).Return([]types.Machine{}, nil).Times(2)
tt.mocks.client.EXPECT().GetMachineDeployment(tt.ctx, "cluster-name-md-0", gomock.AssignableToTypeOf(executables.WithKubeconfig(mCluster.KubeconfigFile)), gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace))).Return(&mds[0], nil)
tt.mocks.client.EXPECT().DeleteOldWorkerNodeGroup(tt.ctx, &mds[0], mCluster.KubeconfigFile)
tt.mocks.client.EXPECT().WaitForDeployment(tt.ctx, wCluster, "30m0s", "Available", gomock.Any(), gomock.Any()).MaxTimes(8)
tt.mocks.client.EXPECT().ValidateControlPlaneNodes(tt.ctx, mCluster, wCluster.Name).Return(nil)
tt.mocks.client.EXPECT().CountMachineDeploymentReplicasReady(tt.ctx, wCluster.Name, mCluster.KubeconfigFile).Return(0, 0, nil)
tt.mocks.provider.EXPECT().GetDeployments()
tt.mocks.writer.EXPECT().Write(clusterName+"-eks-a-cluster.yaml", gomock.Any(), gomock.Not(gomock.Nil()))
tt.mocks.client.EXPECT().GetEksaOIDCConfig(tt.ctx, tt.clusterSpec.Cluster.Spec.IdentityProviderRefs[0].Name, tt.cluster.KubeconfigFile, tt.clusterSpec.Cluster.Namespace).Return(nil, nil)
tt.mocks.networking.EXPECT().RunPostControlPlaneUpgradeSetup(tt.ctx, tt.cluster).Return(nil)
if err := tt.clusterManager.UpgradeCluster(tt.ctx, mCluster, wCluster, tt.clusterSpec, tt.mocks.provider); err != nil {
t.Errorf("ClusterManager.UpgradeCluster() error = %v, wantErr nil", err)
}
}
func TestClusterManagerUpgradeSelfManagedClusterWithUnstackedEtcdNotReadyError(t *testing.T) {
clusterName := "cluster-name"
mCluster := &types.Cluster{
Name: clusterName,
}
wCluster := &types.Cluster{
Name: clusterName,
}
tt := newSpecChangedTest(t)
tt.clusterSpec.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{
Count: 3,
}
tt.oldClusterConfig.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{
Count: 3,
}
tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, tt.cluster, tt.clusterSpec.Cluster.Name).Return(tt.oldClusterConfig, nil)
tt.mocks.client.EXPECT().GetBundles(tt.ctx, tt.cluster.KubeconfigFile, tt.cluster.Name, "").Return(test.Bundles(t), nil)
tt.mocks.client.EXPECT().GetEksdRelease(tt.ctx, gomock.Any(), constants.EksaSystemNamespace, gomock.Any()).Return(test.EksdRelease(), nil)
tt.mocks.provider.EXPECT().GenerateCAPISpecForUpgrade(tt.ctx, mCluster, wCluster, tt.clusterSpec, tt.clusterSpec.DeepCopy())
tt.mocks.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(tt.ctx, mCluster, test.OfType("[]uint8"), constants.EksaSystemNamespace)
tt.mocks.client.EXPECT().WaitForManagedExternalEtcdNotReady(tt.ctx, mCluster, "1m", clusterName).Return(errors.New("etcd not ready"))
tt.mocks.client.EXPECT().GetEksaOIDCConfig(tt.ctx, tt.clusterSpec.Cluster.Spec.IdentityProviderRefs[0].Name, tt.cluster.KubeconfigFile, tt.clusterSpec.Cluster.Namespace).Return(nil, nil)
tt.mocks.writer.EXPECT().Write(clusterName+"-eks-a-cluster.yaml", gomock.Any(), gomock.Not(gomock.Nil()))
tt.Expect(tt.clusterManager.UpgradeCluster(tt.ctx, mCluster, wCluster, tt.clusterSpec, tt.mocks.provider)).To(MatchError(ContainSubstring("etcd not ready")))
}
func TestClusterManagerUpgradeSelfManagedClusterWithUnstackedEtcdErrorRemovingAnnotation(t *testing.T) {
clusterName := "cluster-name"
mCluster := &types.Cluster{
Name: clusterName,
}
wCluster := &types.Cluster{
Name: clusterName,
}
tt := newSpecChangedTest(t, clustermanager.WithRetrier(retrier.NewWithMaxRetries(1, 0)))
tt.clusterSpec.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{
Count: 3,
}
tt.oldClusterConfig.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{
Count: 3,
}
tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, tt.cluster, tt.clusterSpec.Cluster.Name).Return(tt.oldClusterConfig, nil)
tt.mocks.client.EXPECT().GetBundles(tt.ctx, tt.cluster.KubeconfigFile, tt.cluster.Name, "").Return(test.Bundles(t), nil)
tt.mocks.client.EXPECT().GetEksdRelease(tt.ctx, gomock.Any(), constants.EksaSystemNamespace, gomock.Any()).Return(test.EksdRelease(), nil)
tt.mocks.provider.EXPECT().GenerateCAPISpecForUpgrade(tt.ctx, mCluster, wCluster, tt.clusterSpec, tt.clusterSpec.DeepCopy())
tt.mocks.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(tt.ctx, mCluster, test.OfType("[]uint8"), constants.EksaSystemNamespace)
tt.mocks.client.EXPECT().WaitForManagedExternalEtcdNotReady(tt.ctx, mCluster, "1m", clusterName)
tt.mocks.client.EXPECT().WaitForManagedExternalEtcdReady(tt.ctx, mCluster, "1h0m0s", clusterName).Return(errors.New("timed out"))
tt.mocks.client.EXPECT().RemoveAnnotationInNamespace(tt.ctx, gomock.Any(), gomock.Any(), gomock.Any(), mCluster, constants.EksaSystemNamespace).Return(errors.New("removing annotation"))
tt.mocks.client.EXPECT().GetEksaOIDCConfig(tt.ctx, tt.clusterSpec.Cluster.Spec.IdentityProviderRefs[0].Name, tt.cluster.KubeconfigFile, tt.clusterSpec.Cluster.Namespace).Return(nil, nil)
tt.mocks.writer.EXPECT().Write(clusterName+"-eks-a-cluster.yaml", gomock.Any(), gomock.Not(gomock.Nil()))
tt.Expect(tt.clusterManager.UpgradeCluster(tt.ctx, mCluster, wCluster, tt.clusterSpec, tt.mocks.provider)).To(MatchError(ContainSubstring("removing annotation")))
}
func TestClusterManagerUpgradeWorkloadClusterSuccess(t *testing.T) {
mgmtClusterName := "cluster-name"
workClusterName := "cluster-name-w"
mCluster := &types.Cluster{
Name: mgmtClusterName,
ExistingManagement: true,
}
wCluster := &types.Cluster{
Name: workClusterName,
}
tt := newSpecChangedTest(t)
kcp, mds := getKcpAndMdsForNodeCount(0)
tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, mCluster, mgmtClusterName).Return(tt.oldClusterConfig, nil)
tt.mocks.client.EXPECT().GetBundles(tt.ctx, mCluster.KubeconfigFile, mCluster.Name, "").Return(test.Bundles(t), nil)
tt.mocks.client.EXPECT().GetEksdRelease(tt.ctx, gomock.Any(), constants.EksaSystemNamespace, gomock.Any()).Return(test.EksdRelease(), nil)
tt.mocks.provider.EXPECT().GenerateCAPISpecForUpgrade(tt.ctx, mCluster, mCluster, tt.clusterSpec, tt.clusterSpec.DeepCopy())
tt.mocks.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(tt.ctx, mCluster, test.OfType("[]uint8"), constants.EksaSystemNamespace).Times(2)
tt.mocks.provider.EXPECT().RunPostControlPlaneUpgrade(tt.ctx, tt.clusterSpec, tt.clusterSpec, wCluster, mCluster)
tt.mocks.client.EXPECT().WaitForControlPlaneReady(tt.ctx, mCluster, "1h0m0s", mgmtClusterName).MaxTimes(2)
tt.mocks.client.EXPECT().WaitForControlPlaneNotReady(tt.ctx, mCluster, "1m", mgmtClusterName)
tt.mocks.client.EXPECT().GetKubeadmControlPlane(tt.ctx,
mCluster,
mCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
tt.mocks.client.EXPECT().GetMachineDeploymentsForCluster(tt.ctx,
mCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(mds, nil)
tt.mocks.client.EXPECT().GetMachines(tt.ctx, mCluster, mCluster.Name).Return([]types.Machine{}, nil).Times(2)
tt.mocks.client.EXPECT().GetMachineDeployment(tt.ctx, "cluster-name-md-0", gomock.AssignableToTypeOf(executables.WithKubeconfig(mCluster.KubeconfigFile)), gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace))).Return(&mds[0], nil)
tt.mocks.client.EXPECT().DeleteOldWorkerNodeGroup(tt.ctx, &mds[0], mCluster.KubeconfigFile)
tt.mocks.client.EXPECT().WaitForDeployment(tt.ctx, mCluster, "30m0s", "Available", gomock.Any(), gomock.Any()).MaxTimes(10)
tt.mocks.client.EXPECT().ValidateControlPlaneNodes(tt.ctx, mCluster, mCluster.Name).Return(nil)
tt.mocks.client.EXPECT().CountMachineDeploymentReplicasReady(tt.ctx, mCluster.Name, mCluster.KubeconfigFile).Return(0, 0, nil)
tt.mocks.provider.EXPECT().GetDeployments()
tt.mocks.writer.EXPECT().Write(mgmtClusterName+"-eks-a-cluster.yaml", gomock.Any(), gomock.Not(gomock.Nil()))
tt.mocks.client.EXPECT().GetEksaOIDCConfig(tt.ctx, tt.clusterSpec.Cluster.Spec.IdentityProviderRefs[0].Name, mCluster.KubeconfigFile, tt.clusterSpec.Cluster.Namespace).Return(nil, nil)
tt.mocks.networking.EXPECT().RunPostControlPlaneUpgradeSetup(tt.ctx, wCluster).Return(nil)
if err := tt.clusterManager.UpgradeCluster(tt.ctx, mCluster, wCluster, tt.clusterSpec, tt.mocks.provider); err != nil {
t.Errorf("ClusterManager.UpgradeCluster() error = %v, wantErr nil", err)
}
}
func TestClusterManagerUpgradeWorkloadClusterAWSIamConfigSuccess(t *testing.T) {
mgmtClusterName := "cluster-name"
workClusterName := "cluster-name-w"
mCluster := &types.Cluster{
Name: mgmtClusterName,
ExistingManagement: true,
}
wCluster := &types.Cluster{
Name: workClusterName,
}
tt := newSpecChangedTest(t)
// Adding AWSIamConfig to the cluster spec.
oldIamConfig := &v1alpha1.AWSIamConfig{
ObjectMeta: metav1.ObjectMeta{
Name: tt.clusterName,
},
}
tt.oldClusterConfig.Spec.IdentityProviderRefs = []v1alpha1.Ref{{Kind: v1alpha1.AWSIamConfigKind, Name: oldIamConfig.Name}}
tt.newClusterConfig = tt.oldClusterConfig.DeepCopy()
tt.clusterSpec = test.NewClusterSpecForConfig(t,
&cluster.Config{
Cluster: tt.newClusterConfig,
AWSIAMConfigs: map[string]*v1alpha1.AWSIamConfig{
oldIamConfig.Name: oldIamConfig,
},
},
)
kcp, mds := getKcpAndMdsForNodeCount(0)
tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, mCluster, mgmtClusterName).Return(tt.oldClusterConfig, nil)
tt.mocks.client.EXPECT().GetBundles(tt.ctx, mCluster.KubeconfigFile, mCluster.Name, "").Return(test.Bundles(t), nil)
tt.mocks.client.EXPECT().GetEksdRelease(tt.ctx, gomock.Any(), constants.EksaSystemNamespace, gomock.Any()).Return(test.EksdRelease(), nil)
tt.mocks.client.EXPECT().GetEksaAWSIamConfig(tt.ctx, tt.clusterSpec.Cluster.Spec.IdentityProviderRefs[0].Name, tt.cluster.KubeconfigFile, tt.clusterSpec.Cluster.Namespace).Return(oldIamConfig, nil)
tt.mocks.provider.EXPECT().GenerateCAPISpecForUpgrade(tt.ctx, mCluster, mCluster, tt.clusterSpec, tt.clusterSpec.DeepCopy())
tt.mocks.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(tt.ctx, mCluster, test.OfType("[]uint8"), constants.EksaSystemNamespace).Times(2)
tt.mocks.provider.EXPECT().RunPostControlPlaneUpgrade(tt.ctx, tt.clusterSpec, tt.clusterSpec, wCluster, mCluster)
tt.mocks.client.EXPECT().WaitForControlPlaneReady(tt.ctx, mCluster, "1h0m0s", mgmtClusterName).MaxTimes(2)
tt.mocks.client.EXPECT().WaitForControlPlaneNotReady(tt.ctx, mCluster, "1m", mgmtClusterName)
tt.mocks.client.EXPECT().GetKubeadmControlPlane(tt.ctx,
mCluster,
mCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
tt.mocks.client.EXPECT().GetMachineDeploymentsForCluster(tt.ctx,
mCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(mds, nil)
tt.mocks.client.EXPECT().GetMachines(tt.ctx, mCluster, mCluster.Name).Return([]types.Machine{}, nil).Times(2)
tt.mocks.client.EXPECT().GetMachineDeployment(tt.ctx, "cluster-name-md-0", gomock.AssignableToTypeOf(executables.WithKubeconfig(mCluster.KubeconfigFile)), gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace))).Return(&mds[0], nil)
tt.mocks.client.EXPECT().DeleteOldWorkerNodeGroup(tt.ctx, &mds[0], mCluster.KubeconfigFile)
tt.mocks.client.EXPECT().WaitForDeployment(tt.ctx, mCluster, "30m0s", "Available", gomock.Any(), gomock.Any()).MaxTimes(10)
tt.mocks.client.EXPECT().ValidateControlPlaneNodes(tt.ctx, mCluster, mCluster.Name).Return(nil)
tt.mocks.client.EXPECT().CountMachineDeploymentReplicasReady(tt.ctx, mCluster.Name, mCluster.KubeconfigFile).Return(0, 0, nil)
tt.mocks.provider.EXPECT().GetDeployments()
tt.mocks.writer.EXPECT().Write(mgmtClusterName+"-eks-a-cluster.yaml", gomock.Any(), gomock.Not(gomock.Nil()))
tt.mocks.networking.EXPECT().RunPostControlPlaneUpgradeSetup(tt.ctx, wCluster).Return(nil)
tt.mocks.awsIamAuth.EXPECT().UpgradeAWSIAMAuth(tt.ctx, wCluster, tt.clusterSpec).Return(nil)
if err := tt.clusterManager.UpgradeCluster(tt.ctx, mCluster, wCluster, tt.clusterSpec, tt.mocks.provider); err != nil {
t.Errorf("ClusterManager.UpgradeCluster() error = %v, wantErr nil", err)
}
}
func TestClusterManagerUpgradeCloudStackWorkloadClusterSuccess(t *testing.T) {
mgmtClusterName := "cluster-name"
workClusterName := "cluster-name-w"
mCluster := &types.Cluster{
Name: mgmtClusterName,
ExistingManagement: true,
}
wCluster := &types.Cluster{
Name: workClusterName,
}
tt := newSpecChangedTest(t)
kcp, mds := getKcpAndMdsForNodeCount(0)
tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, mCluster, mgmtClusterName).Return(tt.oldClusterConfig, nil)
tt.mocks.client.EXPECT().GetBundles(tt.ctx, mCluster.KubeconfigFile, mCluster.Name, "").Return(test.Bundles(t), nil)
tt.mocks.client.EXPECT().GetEksdRelease(tt.ctx, gomock.Any(), constants.EksaSystemNamespace, gomock.Any()).Return(test.EksdRelease(), nil)
tt.mocks.provider.EXPECT().GenerateCAPISpecForUpgrade(tt.ctx, mCluster, mCluster, tt.clusterSpec, tt.clusterSpec.DeepCopy())
tt.mocks.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(tt.ctx, mCluster, test.OfType("[]uint8"), constants.EksaSystemNamespace).Times(2)
tt.mocks.provider.EXPECT().RunPostControlPlaneUpgrade(tt.ctx, tt.clusterSpec, tt.clusterSpec, wCluster, mCluster)
tt.mocks.client.EXPECT().WaitForControlPlaneReady(tt.ctx, mCluster, "1h0m0s", mgmtClusterName).MaxTimes(2)
tt.mocks.client.EXPECT().WaitForControlPlaneNotReady(tt.ctx, mCluster, "1m", mgmtClusterName)
tt.mocks.client.EXPECT().GetKubeadmControlPlane(tt.ctx,
mCluster,
mCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
tt.mocks.client.EXPECT().GetMachineDeploymentsForCluster(tt.ctx,
mCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(mds, nil)
tt.mocks.client.EXPECT().GetMachines(tt.ctx, mCluster, mCluster.Name).Return([]types.Machine{}, nil).Times(2)
tt.mocks.client.EXPECT().GetMachineDeployment(tt.ctx, "cluster-name-md-0", gomock.AssignableToTypeOf(executables.WithKubeconfig(mCluster.KubeconfigFile)), gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace))).Return(&mds[0], nil)
tt.mocks.client.EXPECT().DeleteOldWorkerNodeGroup(tt.ctx, &mds[0], mCluster.KubeconfigFile)
tt.mocks.client.EXPECT().WaitForDeployment(tt.ctx, mCluster, "30m0s", "Available", gomock.Any(), gomock.Any()).MaxTimes(10)
tt.mocks.client.EXPECT().ValidateControlPlaneNodes(tt.ctx, mCluster, mCluster.Name).Return(nil)
tt.mocks.client.EXPECT().CountMachineDeploymentReplicasReady(tt.ctx, mCluster.Name, mCluster.KubeconfigFile).Return(0, 0, nil)
tt.mocks.provider.EXPECT().GetDeployments()
tt.mocks.writer.EXPECT().Write(mgmtClusterName+"-eks-a-cluster.yaml", gomock.Any(), gomock.Not(gomock.Nil()))
tt.mocks.client.EXPECT().GetEksaOIDCConfig(tt.ctx, tt.clusterSpec.Cluster.Spec.IdentityProviderRefs[0].Name, mCluster.KubeconfigFile, tt.clusterSpec.Cluster.Namespace).Return(nil, nil)
tt.mocks.networking.EXPECT().RunPostControlPlaneUpgradeSetup(tt.ctx, wCluster).Return(nil)
if err := tt.clusterManager.UpgradeCluster(tt.ctx, mCluster, wCluster, tt.clusterSpec, tt.mocks.provider); err != nil {
t.Errorf("ClusterManager.UpgradeCluster() error = %v, wantErr nil", err)
}
}
func TestClusterManagerUpgradeWorkloadClusterWaitForMDReadyErrorOnce(t *testing.T) {
mgmtClusterName := "cluster-name"
workClusterName := "cluster-name-w"
mCluster := &types.Cluster{
Name: mgmtClusterName,
ExistingManagement: true,
}
wCluster := &types.Cluster{
Name: workClusterName,
}
tt := newSpecChangedTest(t)
kcp, mds := getKcpAndMdsForNodeCount(0)
tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, mCluster, mgmtClusterName).Return(tt.oldClusterConfig, nil)
tt.mocks.client.EXPECT().GetBundles(tt.ctx, mCluster.KubeconfigFile, mCluster.Name, "").Return(test.Bundles(t), nil)
tt.mocks.client.EXPECT().GetEksdRelease(tt.ctx, gomock.Any(), constants.EksaSystemNamespace, gomock.Any()).Return(test.EksdRelease(), nil)
tt.mocks.provider.EXPECT().GenerateCAPISpecForUpgrade(tt.ctx, mCluster, mCluster, tt.clusterSpec, tt.clusterSpec.DeepCopy())
tt.mocks.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(tt.ctx, mCluster, test.OfType("[]uint8"), constants.EksaSystemNamespace).Times(2)
tt.mocks.provider.EXPECT().RunPostControlPlaneUpgrade(tt.ctx, tt.clusterSpec, tt.clusterSpec, wCluster, mCluster)
tt.mocks.client.EXPECT().WaitForControlPlaneReady(tt.ctx, mCluster, "1h0m0s", mgmtClusterName).MaxTimes(2)
tt.mocks.client.EXPECT().WaitForControlPlaneNotReady(tt.ctx, mCluster, "1m", mgmtClusterName)
tt.mocks.client.EXPECT().GetKubeadmControlPlane(tt.ctx,
mCluster,
mCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
tt.mocks.client.EXPECT().GetMachineDeploymentsForCluster(tt.ctx,
mCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(mds, nil)
tt.mocks.client.EXPECT().GetMachines(tt.ctx, mCluster, mCluster.Name).Return([]types.Machine{}, nil).Times(2)
tt.mocks.client.EXPECT().GetMachineDeployment(tt.ctx, "cluster-name-md-0", gomock.AssignableToTypeOf(executables.WithKubeconfig(mCluster.KubeconfigFile)), gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace))).Return(&mds[0], nil)
tt.mocks.client.EXPECT().DeleteOldWorkerNodeGroup(tt.ctx, &mds[0], mCluster.KubeconfigFile)
tt.mocks.client.EXPECT().WaitForDeployment(tt.ctx, mCluster, "30m0s", "Available", gomock.Any(), gomock.Any()).MaxTimes(10)
tt.mocks.client.EXPECT().ValidateControlPlaneNodes(tt.ctx, mCluster, mCluster.Name).Return(nil)
// Fail once
tt.mocks.client.EXPECT().CountMachineDeploymentReplicasReady(tt.ctx, mCluster.Name, mCluster.KubeconfigFile).Times(1).Return(0, 0, errors.New("error counting MD replicas"))
// Return 1 and 1 for ready and total replicas
tt.mocks.client.EXPECT().CountMachineDeploymentReplicasReady(tt.ctx, mCluster.Name, mCluster.KubeconfigFile).Times(1).Return(1, 1, nil)
tt.mocks.provider.EXPECT().GetDeployments()
tt.mocks.writer.EXPECT().Write(mgmtClusterName+"-eks-a-cluster.yaml", gomock.Any(), gomock.Not(gomock.Nil()))
tt.mocks.client.EXPECT().GetEksaOIDCConfig(tt.ctx, tt.clusterSpec.Cluster.Spec.IdentityProviderRefs[0].Name, mCluster.KubeconfigFile, tt.clusterSpec.Cluster.Namespace).Return(nil, nil)
tt.mocks.networking.EXPECT().RunPostControlPlaneUpgradeSetup(tt.ctx, wCluster).Return(nil)
if err := tt.clusterManager.UpgradeCluster(tt.ctx, mCluster, wCluster, tt.clusterSpec, tt.mocks.provider); err != nil {
t.Errorf("ClusterManager.UpgradeCluster() error = %v, wantErr nil", err)
}
}
func TestClusterManagerUpgradeWorkloadClusterWaitForMDReadyUnreadyOnce(t *testing.T) {
mgmtClusterName := "cluster-name"
workClusterName := "cluster-name-w"
mCluster := &types.Cluster{
Name: mgmtClusterName,
ExistingManagement: true,
}
wCluster := &types.Cluster{
Name: workClusterName,
}
tt := newSpecChangedTest(t)
kcp, mds := getKcpAndMdsForNodeCount(0)
tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, mCluster, mgmtClusterName).Return(tt.oldClusterConfig, nil)
tt.mocks.client.EXPECT().GetBundles(tt.ctx, mCluster.KubeconfigFile, mCluster.Name, "").Return(test.Bundles(t), nil)
tt.mocks.client.EXPECT().GetEksdRelease(tt.ctx, gomock.Any(), constants.EksaSystemNamespace, gomock.Any()).Return(test.EksdRelease(), nil)
tt.mocks.provider.EXPECT().GenerateCAPISpecForUpgrade(tt.ctx, mCluster, mCluster, tt.clusterSpec, tt.clusterSpec.DeepCopy())
tt.mocks.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(tt.ctx, mCluster, test.OfType("[]uint8"), constants.EksaSystemNamespace).Times(2)
tt.mocks.provider.EXPECT().RunPostControlPlaneUpgrade(tt.ctx, tt.clusterSpec, tt.clusterSpec, wCluster, mCluster)
tt.mocks.client.EXPECT().WaitForControlPlaneReady(tt.ctx, mCluster, "1h0m0s", mgmtClusterName).MaxTimes(2)
tt.mocks.client.EXPECT().WaitForControlPlaneNotReady(tt.ctx, mCluster, "1m", mgmtClusterName)
tt.mocks.client.EXPECT().GetKubeadmControlPlane(tt.ctx,
mCluster,
mCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
tt.mocks.client.EXPECT().GetMachineDeploymentsForCluster(tt.ctx,
mCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(mds, nil)
tt.mocks.client.EXPECT().GetMachines(tt.ctx, mCluster, mCluster.Name).Return([]types.Machine{}, nil).Times(2)
tt.mocks.client.EXPECT().GetMachineDeployment(tt.ctx, "cluster-name-md-0", gomock.AssignableToTypeOf(executables.WithKubeconfig(mCluster.KubeconfigFile)), gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace))).Return(&mds[0], nil)
tt.mocks.client.EXPECT().DeleteOldWorkerNodeGroup(tt.ctx, &mds[0], mCluster.KubeconfigFile)
tt.mocks.client.EXPECT().WaitForDeployment(tt.ctx, mCluster, "30m0s", "Available", gomock.Any(), gomock.Any()).MaxTimes(10)
tt.mocks.client.EXPECT().ValidateControlPlaneNodes(tt.ctx, mCluster, mCluster.Name).Return(nil)
// Return 0 and 1 for ready and total replicas once
tt.mocks.client.EXPECT().CountMachineDeploymentReplicasReady(tt.ctx, mCluster.Name, mCluster.KubeconfigFile).Times(1).Return(0, 1, nil)
// Return 1 and 1 for ready and total replicas
tt.mocks.client.EXPECT().CountMachineDeploymentReplicasReady(tt.ctx, mCluster.Name, mCluster.KubeconfigFile).Times(1).Return(1, 1, nil)
tt.mocks.provider.EXPECT().GetDeployments()
tt.mocks.writer.EXPECT().Write(mgmtClusterName+"-eks-a-cluster.yaml", gomock.Any(), gomock.Not(gomock.Nil()))
tt.mocks.client.EXPECT().GetEksaOIDCConfig(tt.ctx, tt.clusterSpec.Cluster.Spec.IdentityProviderRefs[0].Name, mCluster.KubeconfigFile, tt.clusterSpec.Cluster.Namespace).Return(nil, nil)
tt.mocks.networking.EXPECT().RunPostControlPlaneUpgradeSetup(tt.ctx, wCluster).Return(nil)
if err := tt.clusterManager.UpgradeCluster(tt.ctx, mCluster, wCluster, tt.clusterSpec, tt.mocks.provider); err != nil {
t.Errorf("ClusterManager.UpgradeCluster() error = %v, wantErr nil", err)
}
}
func TestClusterManagerUpgradeWorkloadClusterWaitForMachinesTimeout(t *testing.T) {
ctx := context.Background()
clusterName := "cluster-name"
mCluster := &types.Cluster{
Name: clusterName,
}
wCluster := &types.Cluster{
Name: clusterName,
}
tt := newSpecChangedTest(t, clustermanager.WithMachineBackoff(1*time.Nanosecond), clustermanager.WithMachineMaxWait(50*time.Microsecond), clustermanager.WithMachineMinWait(100*time.Microsecond))
kcp, _ := getKcpAndMdsForNodeCount(1)
tt.mocks.client.EXPECT().GetEksaOIDCConfig(tt.ctx, tt.clusterSpec.Cluster.Spec.IdentityProviderRefs[0].Name, tt.cluster.KubeconfigFile, tt.clusterSpec.Cluster.Namespace).Return(nil, nil)
tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, tt.cluster, tt.clusterSpec.Cluster.Name).Return(tt.oldClusterConfig, nil)
tt.mocks.client.EXPECT().GetBundles(tt.ctx, tt.cluster.KubeconfigFile, tt.cluster.Name, "").Return(test.Bundles(t), nil)
tt.mocks.client.EXPECT().GetEksdRelease(tt.ctx, gomock.Any(), constants.EksaSystemNamespace, gomock.Any()).Return(test.EksdRelease(), nil)
tt.mocks.provider.EXPECT().GenerateCAPISpecForUpgrade(ctx, mCluster, wCluster, tt.clusterSpec, tt.clusterSpec.DeepCopy())
tt.mocks.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(ctx, mCluster, test.OfType("[]uint8"), constants.EksaSystemNamespace)
tt.mocks.provider.EXPECT().RunPostControlPlaneUpgrade(tt.ctx, tt.clusterSpec, tt.clusterSpec, wCluster, mCluster)
tt.mocks.client.EXPECT().WaitForControlPlaneReady(ctx, mCluster, "1h0m0s", clusterName)
tt.mocks.client.EXPECT().WaitForControlPlaneNotReady(tt.ctx, mCluster, "1m", clusterName)
tt.mocks.writer.EXPECT().Write(clusterName+"-eks-a-cluster.yaml", gomock.Any(), gomock.Not(gomock.Nil()))
tt.mocks.client.EXPECT().GetKubeadmControlPlane(tt.ctx,
mCluster,
mCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
// Fail once
tt.mocks.client.EXPECT().GetMachines(ctx, mCluster, mCluster.Name).Times(1).Return(nil, errors.New("error get machines"))
// Return a machine with no nodeRef the rest of the retries
tt.mocks.client.EXPECT().GetMachines(ctx, mCluster, mCluster.Name).MinTimes(1).Return([]types.Machine{{Metadata: types.MachineMetadata{
Labels: map[string]string{clusterv1.MachineControlPlaneNameLabel: ""},
}}}, nil)
if err := tt.clusterManager.UpgradeCluster(ctx, mCluster, wCluster, tt.clusterSpec, tt.mocks.provider); err == nil {
t.Error("ClusterManager.UpgradeCluster() error = nil, wantErr not nil")
}
}
func TestClusterManagerUpgradeWorkloadClusterGetMachineDeploymentError(t *testing.T) {
mgmtClusterName := "cluster-name"
workClusterName := "cluster-name-w"
mCluster := &types.Cluster{
Name: mgmtClusterName,
ExistingManagement: true,
}
wCluster := &types.Cluster{
Name: workClusterName,
}
tt := newSpecChangedTest(t)
kcp, _ := getKcpAndMdsForNodeCount(0)
tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, mCluster, mgmtClusterName).Return(tt.oldClusterConfig, nil)
tt.mocks.client.EXPECT().GetBundles(tt.ctx, mCluster.KubeconfigFile, mCluster.Name, "").Return(test.Bundles(t), nil)
tt.mocks.client.EXPECT().GetEksdRelease(tt.ctx, gomock.Any(), constants.EksaSystemNamespace, gomock.Any()).Return(test.EksdRelease(), nil)
tt.mocks.provider.EXPECT().GenerateCAPISpecForUpgrade(tt.ctx, mCluster, mCluster, tt.clusterSpec, tt.clusterSpec.DeepCopy())
tt.mocks.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(tt.ctx, mCluster, test.OfType("[]uint8"), constants.EksaSystemNamespace).Times(2)
tt.mocks.provider.EXPECT().RunPostControlPlaneUpgrade(tt.ctx, tt.clusterSpec, tt.clusterSpec, wCluster, mCluster)
tt.mocks.client.EXPECT().WaitForControlPlaneReady(tt.ctx, mCluster, "1h0m0s", mgmtClusterName).MaxTimes(2)
tt.mocks.client.EXPECT().WaitForControlPlaneNotReady(tt.ctx, mCluster, "1m", mgmtClusterName)
tt.mocks.client.EXPECT().GetKubeadmControlPlane(tt.ctx,
mCluster,
mCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
tt.mocks.client.EXPECT().GetMachines(tt.ctx, mCluster, mCluster.Name).Return([]types.Machine{}, nil)
tt.mocks.client.EXPECT().GetMachineDeployment(tt.ctx, "cluster-name-md-0", gomock.AssignableToTypeOf(executables.WithKubeconfig(mCluster.KubeconfigFile)), gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace))).Return(nil, errors.New("get md err"))
tt.mocks.client.EXPECT().WaitForDeployment(tt.ctx, mCluster, "30m0s", "Available", gomock.Any(), gomock.Any()).MaxTimes(10)
tt.mocks.client.EXPECT().ValidateControlPlaneNodes(tt.ctx, mCluster, mCluster.Name).Return(nil)
tt.mocks.writer.EXPECT().Write(mgmtClusterName+"-eks-a-cluster.yaml", gomock.Any(), gomock.Not(gomock.Nil()))
tt.mocks.client.EXPECT().GetEksaOIDCConfig(tt.ctx, tt.clusterSpec.Cluster.Spec.IdentityProviderRefs[0].Name, mCluster.KubeconfigFile, tt.clusterSpec.Cluster.Namespace).Return(nil, nil)
tt.mocks.networking.EXPECT().RunPostControlPlaneUpgradeSetup(tt.ctx, wCluster).Return(nil)
tt.Expect(tt.clusterManager.UpgradeCluster(tt.ctx, mCluster, wCluster, tt.clusterSpec, tt.mocks.provider)).To(MatchError(ContainSubstring("md err")))
}
func TestClusterManagerUpgradeWorkloadClusterRemoveOldWorkerNodeGroupsError(t *testing.T) {
mgmtClusterName := "cluster-name"
workClusterName := "cluster-name-w"
mCluster := &types.Cluster{
Name: mgmtClusterName,
ExistingManagement: true,
}
wCluster := &types.Cluster{
Name: workClusterName,
}
tt := newSpecChangedTest(t)
kcp, mds := getKcpAndMdsForNodeCount(0)
tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, mCluster, mgmtClusterName).Return(tt.oldClusterConfig, nil)
tt.mocks.client.EXPECT().GetBundles(tt.ctx, mCluster.KubeconfigFile, mCluster.Name, "").Return(test.Bundles(t), nil)
tt.mocks.client.EXPECT().GetEksdRelease(tt.ctx, gomock.Any(), constants.EksaSystemNamespace, gomock.Any()).Return(test.EksdRelease(), nil)
tt.mocks.provider.EXPECT().GenerateCAPISpecForUpgrade(tt.ctx, mCluster, mCluster, tt.clusterSpec, tt.clusterSpec.DeepCopy())
tt.mocks.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(tt.ctx, mCluster, test.OfType("[]uint8"), constants.EksaSystemNamespace).Times(2)
tt.mocks.provider.EXPECT().RunPostControlPlaneUpgrade(tt.ctx, tt.clusterSpec, tt.clusterSpec, wCluster, mCluster)
tt.mocks.client.EXPECT().WaitForControlPlaneReady(tt.ctx, mCluster, "1h0m0s", mgmtClusterName).MaxTimes(2)
tt.mocks.client.EXPECT().WaitForControlPlaneNotReady(tt.ctx, mCluster, "1m", mgmtClusterName)
tt.mocks.client.EXPECT().GetKubeadmControlPlane(tt.ctx,
mCluster,
mCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
tt.mocks.client.EXPECT().GetMachines(tt.ctx, mCluster, mCluster.Name).Return([]types.Machine{}, nil)
tt.mocks.client.EXPECT().GetMachineDeployment(tt.ctx, "cluster-name-md-0", gomock.AssignableToTypeOf(executables.WithKubeconfig(mCluster.KubeconfigFile)), gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace))).Return(&mds[0], nil)
tt.mocks.client.EXPECT().DeleteOldWorkerNodeGroup(tt.ctx, &mds[0], mCluster.KubeconfigFile).Return(errors.New("delete wng error"))
tt.mocks.client.EXPECT().WaitForDeployment(tt.ctx, mCluster, "30m0s", "Available", gomock.Any(), gomock.Any()).MaxTimes(10)
tt.mocks.client.EXPECT().ValidateControlPlaneNodes(tt.ctx, mCluster, mCluster.Name).Return(nil)
tt.mocks.writer.EXPECT().Write(mgmtClusterName+"-eks-a-cluster.yaml", gomock.Any(), gomock.Not(gomock.Nil()))
tt.mocks.client.EXPECT().GetEksaOIDCConfig(tt.ctx, tt.clusterSpec.Cluster.Spec.IdentityProviderRefs[0].Name, mCluster.KubeconfigFile, tt.clusterSpec.Cluster.Namespace).Return(nil, nil)
tt.mocks.networking.EXPECT().RunPostControlPlaneUpgradeSetup(tt.ctx, wCluster).Return(nil)
tt.Expect(tt.clusterManager.UpgradeCluster(tt.ctx, mCluster, wCluster, tt.clusterSpec, tt.mocks.provider)).To(MatchError(ContainSubstring("wng err")))
}
func TestClusterManagerUpgradeWorkloadClusterWaitForMachinesFailedWithUnhealthyNode(t *testing.T) {
clusterName := "cluster-name"
mCluster := &types.Cluster{
Name: clusterName,
}
wCluster := &types.Cluster{
Name: clusterName,
}
status := types.MachineStatus{
NodeRef: &types.ResourceRef{},
Conditions: types.Conditions{
{
Type: "NodeHealthy",
Status: "False",
},
},
}
machines := []types.Machine{
{Metadata: types.MachineMetadata{Labels: map[string]string{clusterv1.MachineControlPlaneNameLabel: ""}}, Status: status},
}
tt := newSpecChangedTest(t, clustermanager.WithMachineBackoff(1*time.Nanosecond), clustermanager.WithMachineMaxWait(50*time.Microsecond), clustermanager.WithMachineMinWait(100*time.Microsecond))
kcp, _ := getKcpAndMdsForNodeCount(1)
tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, tt.cluster, tt.clusterSpec.Cluster.Name).Return(tt.oldClusterConfig, nil)
tt.mocks.client.EXPECT().GetBundles(tt.ctx, tt.cluster.KubeconfigFile, tt.cluster.Name, "").Return(test.Bundles(t), nil)
tt.mocks.client.EXPECT().GetEksdRelease(tt.ctx, gomock.Any(), constants.EksaSystemNamespace, gomock.Any()).Return(test.EksdRelease(), nil)
tt.mocks.provider.EXPECT().GenerateCAPISpecForUpgrade(tt.ctx, mCluster, wCluster, tt.clusterSpec, tt.clusterSpec.DeepCopy())
tt.mocks.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(tt.ctx, mCluster, test.OfType("[]uint8"), constants.EksaSystemNamespace)
tt.mocks.provider.EXPECT().RunPostControlPlaneUpgrade(tt.ctx, tt.clusterSpec, tt.clusterSpec, wCluster, mCluster)
tt.mocks.client.EXPECT().WaitForControlPlaneReady(tt.ctx, mCluster, "1h0m0s", clusterName).MaxTimes(5)
tt.mocks.client.EXPECT().WaitForControlPlaneNotReady(tt.ctx, mCluster, "1m", clusterName)
tt.mocks.client.EXPECT().WaitForDeployment(tt.ctx, wCluster, "30m", "Available", gomock.Any(), gomock.Any()).MaxTimes(10)
tt.mocks.writer.EXPECT().Write(clusterName+"-eks-a-cluster.yaml", gomock.Any(), gomock.Not(gomock.Nil()))
tt.mocks.client.EXPECT().GetKubeadmControlPlane(tt.ctx,
mCluster,
mCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
// Return a machine with no nodeRef the rest of the retries
tt.mocks.client.EXPECT().GetMachines(tt.ctx, mCluster, mCluster.Name).MinTimes(1).Return(machines, nil)
tt.mocks.client.EXPECT().GetEksaOIDCConfig(tt.ctx, tt.clusterSpec.Cluster.Spec.IdentityProviderRefs[0].Name, tt.cluster.KubeconfigFile, tt.clusterSpec.Cluster.Namespace).Return(nil, nil)
if err := tt.clusterManager.UpgradeCluster(tt.ctx, mCluster, wCluster, tt.clusterSpec, tt.mocks.provider); err == nil {
t.Error("ClusterManager.UpgradeCluster() error = nil, wantErr not nil")
}
}
func TestClusterManagerUpgradeWorkloadClusterWaitForCAPITimeout(t *testing.T) {
clusterName := "cluster-name"
mCluster := &types.Cluster{
Name: clusterName,
}
wCluster := &types.Cluster{
Name: clusterName,
}
tt := newSpecChangedTest(t)
kcp, mds := getKcpAndMdsForNodeCount(0)
tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, tt.cluster, tt.clusterSpec.Cluster.Name).Return(tt.oldClusterConfig, nil)
tt.mocks.client.EXPECT().GetBundles(tt.ctx, tt.cluster.KubeconfigFile, tt.cluster.Name, "").Return(test.Bundles(t), nil)
tt.mocks.client.EXPECT().GetEksdRelease(tt.ctx, gomock.Any(), constants.EksaSystemNamespace, gomock.Any()).Return(test.EksdRelease(), nil)
tt.mocks.provider.EXPECT().GenerateCAPISpecForUpgrade(tt.ctx, mCluster, wCluster, tt.clusterSpec, tt.clusterSpec.DeepCopy())
tt.mocks.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(tt.ctx, mCluster, test.OfType("[]uint8"), constants.EksaSystemNamespace).Times(2)
tt.mocks.provider.EXPECT().RunPostControlPlaneUpgrade(tt.ctx, tt.clusterSpec, tt.clusterSpec, wCluster, mCluster)
tt.mocks.client.EXPECT().WaitForControlPlaneReady(tt.ctx, mCluster, "1h0m0s", clusterName).MaxTimes(2)
tt.mocks.client.EXPECT().WaitForControlPlaneNotReady(tt.ctx, mCluster, "1m", clusterName)
tt.mocks.client.EXPECT().GetKubeadmControlPlane(tt.ctx,
mCluster,
mCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
tt.mocks.client.EXPECT().GetMachineDeploymentsForCluster(tt.ctx,
mCluster.Name,
gomock.AssignableToTypeOf(executables.WithCluster(mCluster)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(mds, nil)
tt.mocks.client.EXPECT().GetMachines(tt.ctx, mCluster, mCluster.Name).Return([]types.Machine{}, nil).Times(2)
tt.mocks.client.EXPECT().GetMachineDeployment(tt.ctx, "cluster-name-md-0", gomock.AssignableToTypeOf(executables.WithKubeconfig(mCluster.KubeconfigFile)), gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace))).Return(&mds[0], nil)
tt.mocks.client.EXPECT().DeleteOldWorkerNodeGroup(tt.ctx, &mds[0], mCluster.KubeconfigFile)
tt.mocks.client.EXPECT().WaitForDeployment(tt.ctx, wCluster, "30m0s", "Available", gomock.Any(), gomock.Any()).Return(errors.New("time out"))
tt.mocks.client.EXPECT().ValidateControlPlaneNodes(tt.ctx, mCluster, wCluster.Name).Return(nil)
tt.mocks.client.EXPECT().CountMachineDeploymentReplicasReady(tt.ctx, wCluster.Name, mCluster.KubeconfigFile).Return(0, 0, nil)
tt.mocks.writer.EXPECT().Write(clusterName+"-eks-a-cluster.yaml", gomock.Any(), gomock.Not(gomock.Nil()))
tt.mocks.client.EXPECT().GetEksaOIDCConfig(tt.ctx, tt.clusterSpec.Cluster.Spec.IdentityProviderRefs[0].Name, tt.cluster.KubeconfigFile, tt.clusterSpec.Cluster.Namespace).Return(nil, nil)
tt.mocks.networking.EXPECT().RunPostControlPlaneUpgradeSetup(tt.ctx, wCluster).Return(nil)
if err := tt.clusterManager.UpgradeCluster(tt.ctx, mCluster, wCluster, tt.clusterSpec, tt.mocks.provider); err == nil {
t.Error("ClusterManager.UpgradeCluster() error = nil, wantErr not nil")
}
}
func TestClusterManagerBackupCAPISuccess(t *testing.T) {
from := &types.Cluster{
Name: "from-cluster",
}
ctx := context.Background()
c, m := newClusterManager(t)
m.client.EXPECT().BackupManagement(ctx, from, managementStatePath)
if err := c.BackupCAPI(ctx, from, managementStatePath); err != nil {
t.Errorf("ClusterManager.BackupCAPI() error = %v, wantErr nil", err)
}
}
func TestClusterManagerBackupCAPIRetrySuccess(t *testing.T) {
from := &types.Cluster{
Name: "from-cluster",
}
ctx := context.Background()
c, m := newClusterManager(t)
// m.client.EXPECT().BackupManagement(ctx, from, managementStatePath)
firstTry := m.client.EXPECT().BackupManagement(ctx, from, managementStatePath).Return(errors.New("Error: failed to connect to the management cluster: action failed after 9 attempts: Get \"https://127.0.0.1:61994/api?timeout=30s\": EOF"))
secondTry := m.client.EXPECT().BackupManagement(ctx, from, managementStatePath).Return(nil)
gomock.InOrder(
firstTry,
secondTry,
)
if err := c.BackupCAPI(ctx, from, managementStatePath); err != nil {
t.Errorf("ClusterManager.BackupCAPI() error = %v, wantErr nil", err)
}
}
func TestClusterctlWaitRetryPolicy(t *testing.T) {
connectionRefusedError := fmt.Errorf("Error: failed to connect to the management cluster: action failed after 9 attempts: Get \"https://127.0.0.1:53733/api?timeout=30s\": dial tcp 127.0.0.1:53733: connect: connection refused")
ioTimeoutError := fmt.Errorf("Error: failed to connect to the management cluster: action failed after 9 attempts: Get \"https://127.0.0.1:61994/api?timeout=30s\": net/http: TLS handshake timeout")
miscellaneousError := fmt.Errorf("Some other random miscellaneous error")
_, wait := clustermanager.ClusterctlMoveRetryPolicy(1, connectionRefusedError)
if wait != 10*time.Second {
t.Errorf("ClusterctlMoveRetryPolicy didn't correctly calculate first retry wait for connection refused")
}
_, wait = clustermanager.ClusterctlMoveRetryPolicy(-1, connectionRefusedError)
if wait != 10*time.Second {
t.Errorf("ClusterctlMoveRetryPolicy didn't correctly protect for total retries < 0")
}
_, wait = clustermanager.ClusterctlMoveRetryPolicy(2, connectionRefusedError)
if wait != 15*time.Second {
t.Errorf("ClusterctlMoveRetryPolicy didn't correctly protect for second retry wait")
}
_, wait = clustermanager.ClusterctlMoveRetryPolicy(1, ioTimeoutError)
if wait != 10*time.Second {
t.Errorf("ClusterctlMoveRetryPolicy didn't correctly calculate first retry wait for ioTimeout")
}
retry, _ := clustermanager.ClusterctlMoveRetryPolicy(1, miscellaneousError)
if retry != false {
t.Errorf("ClusterctlMoveRetryPolicy didn't not-retry on non-network error")
}
}
func TestClusterManagerBackupCAPIError(t *testing.T) {
from := &types.Cluster{}
ctx := context.Background()
c, m := newClusterManager(t)
m.client.EXPECT().BackupManagement(ctx, from, managementStatePath).Return(errors.New("backing up CAPI resources"))
if err := c.BackupCAPI(ctx, from, managementStatePath); err == nil {
t.Errorf("ClusterManager.BackupCAPI() error = %v, wantErr nil", err)
}
}
func TestClusterManagerMoveCAPISuccess(t *testing.T) {
from := &types.Cluster{
Name: "from-cluster",
}
to := &types.Cluster{
Name: "to-cluster",
}
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = to.Name
s.Cluster.Spec.ControlPlaneConfiguration.Count = 3
s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(3), MachineGroupRef: &v1alpha1.Ref{Name: "test-wn"}}}
})
ctx := context.Background()
c, m := newClusterManager(t)
kcp, mds := getKcpAndMdsForNodeCount(0)
m.client.EXPECT().GetKubeadmControlPlane(ctx,
from,
to.Name,
gomock.AssignableToTypeOf(executables.WithCluster(from)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
m.client.EXPECT().GetMachineDeploymentsForCluster(ctx,
to.Name,
gomock.AssignableToTypeOf(executables.WithCluster(from)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(mds, nil)
m.client.EXPECT().GetMachines(ctx, from, to.Name)
m.client.EXPECT().WaitForClusterReady(ctx, from, "1h0m0s", to.Name)
m.client.EXPECT().MoveManagement(ctx, from, to, to.Name)
m.client.EXPECT().WaitForControlPlaneReady(ctx, to, "15m0s", to.Name)
m.client.EXPECT().ValidateControlPlaneNodes(ctx, to, to.Name)
m.client.EXPECT().CountMachineDeploymentReplicasReady(ctx, to.Name, to.KubeconfigFile)
m.client.EXPECT().GetKubeadmControlPlane(ctx,
to,
to.Name,
gomock.AssignableToTypeOf(executables.WithCluster(from)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
m.client.EXPECT().GetMachineDeploymentsForCluster(ctx,
to.Name,
gomock.AssignableToTypeOf(executables.WithCluster(from)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(mds, nil)
m.client.EXPECT().GetMachines(ctx, to, to.Name)
if err := c.MoveCAPI(ctx, from, to, to.Name, clusterSpec); err != nil {
t.Errorf("ClusterManager.MoveCAPI() error = %v, wantErr nil", err)
}
}
func TestClusterManagerMoveCAPIRetrySuccess(t *testing.T) {
from := &types.Cluster{
Name: "from-cluster",
}
to := &types.Cluster{
Name: "to-cluster",
}
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = to.Name
s.Cluster.Spec.ControlPlaneConfiguration.Count = 3
s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(3), MachineGroupRef: &v1alpha1.Ref{Name: "test-wn"}}}
})
ctx := context.Background()
c, m := newClusterManager(t)
kcp, mds := getKcpAndMdsForNodeCount(0)
m.client.EXPECT().GetKubeadmControlPlane(ctx,
from,
to.Name,
gomock.AssignableToTypeOf(executables.WithCluster(from)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
m.client.EXPECT().GetMachineDeploymentsForCluster(ctx,
to.Name,
gomock.AssignableToTypeOf(executables.WithCluster(from)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(mds, nil)
m.client.EXPECT().GetMachines(ctx, from, to.Name)
m.client.EXPECT().WaitForClusterReady(ctx, from, "1h0m0s", to.Name)
firstTry := m.client.EXPECT().MoveManagement(ctx, from, to, to.Name).Return(errors.New("Error: failed to connect to the management cluster: action failed after 9 attempts: Get \"https://127.0.0.1:61994/api?timeout=30s\": EOF"))
secondTry := m.client.EXPECT().MoveManagement(ctx, from, to, to.Name).Return(nil)
gomock.InOrder(
firstTry,
secondTry,
)
m.client.EXPECT().WaitForControlPlaneReady(ctx, to, "15m0s", to.Name)
m.client.EXPECT().ValidateControlPlaneNodes(ctx, to, to.Name)
m.client.EXPECT().CountMachineDeploymentReplicasReady(ctx, to.Name, to.KubeconfigFile)
m.client.EXPECT().GetKubeadmControlPlane(ctx,
to,
to.Name,
gomock.AssignableToTypeOf(executables.WithCluster(from)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
m.client.EXPECT().GetMachineDeploymentsForCluster(ctx,
to.Name,
gomock.AssignableToTypeOf(executables.WithCluster(from)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(mds, nil)
m.client.EXPECT().GetMachines(ctx, to, to.Name)
if err := c.MoveCAPI(ctx, from, to, to.Name, clusterSpec); err != nil {
t.Errorf("ClusterManager.MoveCAPI() error = %v, wantErr nil", err)
}
}
func TestClusterManagerMoveCAPIErrorMove(t *testing.T) {
from := &types.Cluster{
Name: "from-cluster",
}
to := &types.Cluster{
Name: "to-cluster",
}
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = to.Name
s.Cluster.Spec.ControlPlaneConfiguration.Count = 3
s.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count = ptr.Int(3)
})
ctx := context.Background()
c, m := newClusterManager(t)
kcp, mds := getKcpAndMdsForNodeCount(0)
m.client.EXPECT().GetKubeadmControlPlane(ctx,
from,
from.Name,
gomock.AssignableToTypeOf(executables.WithCluster(from)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
m.client.EXPECT().GetMachineDeploymentsForCluster(ctx,
from.Name,
gomock.AssignableToTypeOf(executables.WithCluster(from)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(mds, nil)
m.client.EXPECT().GetMachines(ctx, from, from.Name)
m.client.EXPECT().WaitForClusterReady(ctx, from, "1h0m0s", from.Name)
m.client.EXPECT().MoveManagement(ctx, from, to, from.Name).Return(errors.New("error moving"))
if err := c.MoveCAPI(ctx, from, to, from.Name, clusterSpec); err == nil {
t.Error("ClusterManager.MoveCAPI() error = nil, wantErr not nil")
}
}
func TestClusterManagerMoveCAPIErrorWaitForClusterReady(t *testing.T) {
from := &types.Cluster{
Name: "from-cluster",
}
to := &types.Cluster{
Name: "to-cluster",
}
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = to.Name
s.Cluster.Spec.ControlPlaneConfiguration.Count = 3
s.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count = ptr.Int(3)
})
ctx := context.Background()
c, m := newClusterManager(t)
kcp, mds := getKcpAndMdsForNodeCount(0)
m.client.EXPECT().GetKubeadmControlPlane(ctx,
from,
from.Name,
gomock.AssignableToTypeOf(executables.WithCluster(from)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
m.client.EXPECT().GetMachineDeploymentsForCluster(ctx,
from.Name,
gomock.AssignableToTypeOf(executables.WithCluster(from)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(mds, nil)
m.client.EXPECT().GetMachines(ctx, from, from.Name)
m.client.EXPECT().WaitForClusterReady(ctx, from, "1h0m0s", from.Name).Return(errors.New("error waiting for cluster to be ready"))
if err := c.MoveCAPI(ctx, from, to, from.Name, clusterSpec); err == nil {
t.Error("ClusterManager.MoveCAPI() error = nil, wantErr not nil")
}
}
func TestClusterManagerMoveCAPIErrorWaitForControlPlane(t *testing.T) {
from := &types.Cluster{
Name: "from-cluster",
}
to := &types.Cluster{
Name: "to-cluster",
}
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = to.Name
s.Cluster.Spec.ControlPlaneConfiguration.Count = 3
s.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count = ptr.Int(3)
})
ctx := context.Background()
c, m := newClusterManager(t)
m.client.EXPECT().WaitForClusterReady(ctx, from, "1h0m0s", from.Name)
m.client.EXPECT().MoveManagement(ctx, from, to, from.Name)
kcp, mds := getKcpAndMdsForNodeCount(0)
m.client.EXPECT().GetKubeadmControlPlane(ctx,
from,
from.Name,
gomock.AssignableToTypeOf(executables.WithCluster(from)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
m.client.EXPECT().GetMachineDeploymentsForCluster(ctx,
from.Name,
gomock.AssignableToTypeOf(executables.WithCluster(from)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(mds, nil)
m.client.EXPECT().GetMachines(ctx, from, from.Name)
m.client.EXPECT().WaitForControlPlaneReady(ctx, to, "15m0s", from.Name).Return(errors.New("error waiting for control plane"))
if err := c.MoveCAPI(ctx, from, to, from.Name, clusterSpec); err == nil {
t.Error("ClusterManager.MoveCAPI() error = nil, wantErr not nil")
}
}
func TestClusterManagerMoveCAPIErrorGetMachines(t *testing.T) {
from := &types.Cluster{
Name: "from-cluster",
}
to := &types.Cluster{
Name: "to-cluster",
}
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = to.Name
s.Cluster.Spec.ControlPlaneConfiguration.Count = 3
s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(3), MachineGroupRef: &v1alpha1.Ref{Name: "test-wn"}}}
})
ctx := context.Background()
c, m := newClusterManager(t, clustermanager.WithMachineBackoff(0), clustermanager.WithMachineMaxWait(10*time.Microsecond), clustermanager.WithMachineMinWait(20*time.Microsecond))
kcp, mds := getKcpAndMdsForNodeCount(0)
m.client.EXPECT().GetKubeadmControlPlane(ctx,
from,
from.Name,
gomock.AssignableToTypeOf(executables.WithCluster(from)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
m.client.EXPECT().GetMachineDeploymentsForCluster(ctx,
from.Name,
gomock.AssignableToTypeOf(executables.WithCluster(from)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(mds, nil)
m.client.EXPECT().GetMachines(ctx, from, from.Name)
m.client.EXPECT().WaitForClusterReady(ctx, from, "1h0m0s", from.Name)
m.client.EXPECT().MoveManagement(ctx, from, to, from.Name)
m.client.EXPECT().WaitForControlPlaneReady(ctx, to, "15m0s", from.Name)
m.client.EXPECT().ValidateControlPlaneNodes(ctx, to, to.Name)
m.client.EXPECT().CountMachineDeploymentReplicasReady(ctx, to.Name, to.KubeconfigFile)
m.client.EXPECT().GetKubeadmControlPlane(ctx,
to,
from.Name,
gomock.AssignableToTypeOf(executables.WithCluster(from)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
m.client.EXPECT().GetMachineDeploymentsForCluster(ctx,
from.Name,
gomock.AssignableToTypeOf(executables.WithCluster(from)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(mds, nil)
m.client.EXPECT().GetMachines(ctx, to, from.Name).Return(nil, errors.New("error getting machines")).AnyTimes()
if err := c.MoveCAPI(ctx, from, to, from.Name, clusterSpec); err == nil {
t.Error("ClusterManager.MoveCAPI() error = nil, wantErr not nil")
}
}
func TestClusterManagerMoveCAPIErrorGetKubeadmControlPlane(t *testing.T) {
from := &types.Cluster{
Name: "from-cluster",
}
to := &types.Cluster{
Name: "to-cluster",
}
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = to.Name
s.Cluster.Spec.ControlPlaneConfiguration.Count = 3
s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(3), MachineGroupRef: &v1alpha1.Ref{Name: "test-wn"}}}
})
ctx := context.Background()
c, m := newClusterManager(t, clustermanager.WithMachineBackoff(0), clustermanager.WithMachineMaxWait(10*time.Microsecond), clustermanager.WithMachineMinWait(20*time.Microsecond))
m.client.EXPECT().GetKubeadmControlPlane(ctx,
from,
from.Name,
gomock.AssignableToTypeOf(executables.WithCluster(from)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(nil, errors.New("error getting KubeadmControlPlane"))
if err := c.MoveCAPI(ctx, from, to, from.Name, clusterSpec); err == nil {
t.Error("ClusterManager.MoveCAPI() error = nil, wantErr not nil")
}
}
func TestClusterManagerMoveCAPIErrorGetMachineDeploymentsForCluster(t *testing.T) {
from := &types.Cluster{
Name: "from-cluster",
}
to := &types.Cluster{
Name: "to-cluster",
}
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = to.Name
s.Cluster.Spec.ControlPlaneConfiguration.Count = 3
s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{{Count: ptr.Int(3), MachineGroupRef: &v1alpha1.Ref{Name: "test-wn"}}}
})
ctx := context.Background()
c, m := newClusterManager(t, clustermanager.WithMachineBackoff(0), clustermanager.WithMachineMaxWait(10*time.Microsecond), clustermanager.WithMachineMinWait(20*time.Microsecond))
kcp, _ := getKcpAndMdsForNodeCount(0)
m.client.EXPECT().GetKubeadmControlPlane(ctx,
from,
from.Name,
gomock.AssignableToTypeOf(executables.WithCluster(from)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(kcp, nil)
m.client.EXPECT().GetMachineDeploymentsForCluster(ctx,
from.Name,
gomock.AssignableToTypeOf(executables.WithCluster(from)),
gomock.AssignableToTypeOf(executables.WithNamespace(constants.EksaSystemNamespace)),
).Return(nil, errors.New("error getting MachineDeployments"))
if err := c.MoveCAPI(ctx, from, to, from.Name, clusterSpec); err == nil {
t.Error("ClusterManager.MoveCAPI() error = nil, wantErr not nil")
}
}
func TestClusterManagerCreateEKSAResourcesSuccess(t *testing.T) {
features.ClearCache()
ctx := context.Background()
tt := newTest(t)
tt.clusterSpec.VersionsBundle.EksD.Components = "testdata/eksa_components.yaml"
tt.clusterSpec.VersionsBundle.EksD.EksDReleaseUrl = "testdata/eksa_components.yaml"
datacenterConfig := &v1alpha1.VSphereDatacenterConfig{}
machineConfigs := []providers.MachineConfig{}
c, m := newClusterManager(t)
m.client.EXPECT().ApplyKubeSpecFromBytesForce(ctx, tt.cluster, gomock.Any())
m.client.EXPECT().ApplyKubeSpecFromBytes(ctx, tt.cluster, gomock.Any())
m.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(ctx, tt.cluster, gomock.Any(), gomock.Any()).MaxTimes(2)
tt.Expect(c.CreateEKSAResources(ctx, tt.cluster, tt.clusterSpec, datacenterConfig, machineConfigs)).To(Succeed())
_, ok := datacenterConfig.GetAnnotations()["anywhere.eks.amazonaws.com/paused"]
tt.Expect(ok).To(BeTrue())
_, ok = tt.clusterSpec.Cluster.GetAnnotations()["anywhere.eks.amazonaws.com/paused"]
tt.Expect(ok).To(BeTrue())
}
func TestClusterManagerCreateEKSAResourcesFailure(t *testing.T) {
features.ClearCache()
ctx := context.Background()
tt := newTest(t)
tt.clusterSpec.VersionsBundle.EksD.Components = "testdata/eksa_components.yaml"
tt.clusterSpec.VersionsBundle.EksD.EksDReleaseUrl = "testdata/eksa_components.yaml"
tt.clusterSpec.Cluster.Namespace = "test_namespace"
datacenterConfig := &v1alpha1.VSphereDatacenterConfig{}
machineConfigs := []providers.MachineConfig{}
c, m := newClusterManager(t)
m.client.EXPECT().CreateNamespaceIfNotPresent(ctx, gomock.Any(), tt.clusterSpec.Cluster.Namespace).Return(errors.New(""))
tt.Expect(c.CreateEKSAResources(ctx, tt.cluster, tt.clusterSpec, datacenterConfig, machineConfigs)).NotTo(Succeed())
}
func expectedMachineHealthCheck(unhealthyMachineTimeout, nodeStartupTimeout time.Duration) []byte {
healthCheck := fmt.Sprintf(`apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineHealthCheck
metadata:
creationTimestamp: null
name: fluxTestCluster-worker-1-worker-unhealthy
namespace: eksa-system
spec:
clusterName: fluxTestCluster
maxUnhealthy: 40%%
nodeStartupTimeout: %[2]s
selector:
matchLabels:
cluster.x-k8s.io/deployment-name: fluxTestCluster-worker-1
unhealthyConditions:
- status: Unknown
timeout: %[1]s
type: Ready
- status: "False"
timeout: %[1]s
type: Ready
status:
currentHealthy: 0
expectedMachines: 0
remediationsAllowed: 0
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineHealthCheck
metadata:
creationTimestamp: null
name: fluxTestCluster-kcp-unhealthy
namespace: eksa-system
spec:
clusterName: fluxTestCluster
maxUnhealthy: 100%%
nodeStartupTimeout: %[2]s
selector:
matchLabels:
cluster.x-k8s.io/control-plane: ""
unhealthyConditions:
- status: Unknown
timeout: %[1]s
type: Ready
- status: "False"
timeout: %[1]s
type: Ready
status:
currentHealthy: 0
expectedMachines: 0
remediationsAllowed: 0
---
`, unhealthyMachineTimeout, nodeStartupTimeout)
return []byte(healthCheck)
}
func TestInstallMachineHealthChecks(t *testing.T) {
ctx := context.Background()
tt := newTest(t)
tt.clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Name = "worker-1"
wantMHC := expectedMachineHealthCheck(clustermanager.DefaultUnhealthyMachineTimeout, clustermanager.DefaultNodeStartupTimeout)
tt.mocks.client.EXPECT().ApplyKubeSpecFromBytes(ctx, tt.cluster, wantMHC)
if err := tt.clusterManager.InstallMachineHealthChecks(ctx, tt.clusterSpec, tt.cluster); err != nil {
t.Errorf("ClusterManager.InstallMachineHealthChecks() error = %v, wantErr nil", err)
}
}
func TestInstallMachineHealthChecksWithTimeoutOverride(t *testing.T) {
ctx := context.Background()
tt := newTest(t, clustermanager.WithUnhealthyMachineTimeout(30*time.Minute), clustermanager.WithNodeStartupTimeout(20*time.Minute))
tt.clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Name = "worker-1"
wantMHC := expectedMachineHealthCheck(30*time.Minute, 20*time.Minute)
tt.mocks.client.EXPECT().ApplyKubeSpecFromBytes(ctx, tt.cluster, wantMHC)
if err := tt.clusterManager.InstallMachineHealthChecks(ctx, tt.clusterSpec, tt.cluster); err != nil {
t.Errorf("ClusterManager.InstallMachineHealthChecks() error = %v, wantErr nil", err)
}
}
func TestInstallMachineHealthChecksWithNoTimeout(t *testing.T) {
tt := newTest(t, clustermanager.WithNoTimeouts())
tt.clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Name = "worker-1"
wantMHC := expectedMachineHealthCheck(maxTime, maxTime)
tt.mocks.client.EXPECT().ApplyKubeSpecFromBytes(tt.ctx, tt.cluster, wantMHC)
tt.Expect(tt.clusterManager.InstallMachineHealthChecks(tt.ctx, tt.clusterSpec, tt.cluster)).To(Succeed())
}
func TestInstallMachineHealthChecksApplyError(t *testing.T) {
ctx := context.Background()
tt := newTest(t, clustermanager.WithRetrier(retrier.NewWithMaxRetries(2, 0)))
tt.clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].Name = "worker-1"
wantMHC := expectedMachineHealthCheck(clustermanager.DefaultUnhealthyMachineTimeout, clustermanager.DefaultNodeStartupTimeout)
tt.mocks.client.EXPECT().ApplyKubeSpecFromBytes(ctx, tt.cluster, wantMHC).Return(errors.New("apply error")).MaxTimes(2)
if err := tt.clusterManager.InstallMachineHealthChecks(ctx, tt.clusterSpec, tt.cluster); err == nil {
t.Error("ClusterManager.InstallMachineHealthChecks() error = nil, wantErr apply error")
}
}
func TestPauseEKSAControllerReconcileWorkloadCluster(t *testing.T) {
tt := newTest(t)
tt.clusterSpec.Cluster = &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: tt.clusterName,
},
Spec: v1alpha1.ClusterSpec{
DatacenterRef: v1alpha1.Ref{
Kind: v1alpha1.VSphereDatacenterKind,
Name: "data-center-name",
},
ManagementCluster: v1alpha1.ManagementCluster{
Name: "mgmt-cluster",
},
},
}
tt.expectPauseClusterReconciliation()
tt.Expect(tt.clusterManager.PauseEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).To(Succeed())
}
func TestPauseEKSAControllerReconcileWorkloadClusterUpdateAnnotationError(t *testing.T) {
tt := newTest(t, clustermanager.WithRetrier(retrier.NewWithMaxRetries(1, 0)))
tt.clusterSpec.Cluster = &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: tt.clusterName,
},
Spec: v1alpha1.ClusterSpec{
DatacenterRef: v1alpha1.Ref{
Kind: v1alpha1.VSphereDatacenterKind,
Name: "data-center-name",
},
ManagementCluster: v1alpha1.ManagementCluster{
Name: "mgmt-cluster",
},
},
}
tt.mocks.provider.EXPECT().DatacenterResourceType().Return(eksaVSphereDatacenterResourceType)
tt.mocks.provider.EXPECT().MachineResourceType().Return("")
tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, eksaVSphereDatacenterResourceType, tt.clusterSpec.Cluster.Spec.DatacenterRef.Name, expectedPauseAnnotation, tt.cluster, "").Return(nil)
tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, eksaClusterResourceType, tt.clusterSpec.Cluster.Name, expectedPauseAnnotation, tt.cluster, "").Return(errors.New("pause eksa cluster error"))
tt.Expect(tt.clusterManager.PauseEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).NotTo(Succeed())
}
func TestPauseEKSAControllerReconcileManagementCluster(t *testing.T) {
tt := newTest(t)
tt.clusterSpec.Cluster = &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: tt.clusterName,
},
Spec: v1alpha1.ClusterSpec{
DatacenterRef: v1alpha1.Ref{
Kind: v1alpha1.VSphereDatacenterKind,
Name: "data-center-name",
},
ManagementCluster: v1alpha1.ManagementCluster{
Name: tt.clusterName,
},
},
}
tt.mocks.client.EXPECT().
ListObjects(tt.ctx, eksaClusterResourceType, "", "", &v1alpha1.ClusterList{}).
DoAndReturn(func(_ context.Context, _, _, _ string, obj *v1alpha1.ClusterList) error {
obj.Items = []v1alpha1.Cluster{
*tt.clusterSpec.Cluster,
{
ObjectMeta: metav1.ObjectMeta{
Name: "workload-cluster-1",
},
Spec: v1alpha1.ClusterSpec{
DatacenterRef: v1alpha1.Ref{
Kind: v1alpha1.VSphereDatacenterKind,
Name: "data-center-name",
},
ManagementCluster: v1alpha1.ManagementCluster{
Name: tt.clusterName,
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "workload-cluster-2",
},
Spec: v1alpha1.ClusterSpec{
DatacenterRef: v1alpha1.Ref{
Kind: v1alpha1.VSphereDatacenterKind,
Name: "data-center-name",
},
ManagementCluster: v1alpha1.ManagementCluster{
Name: "mgmt-cluster-2",
},
},
},
}
return nil
})
tt.mocks.provider.EXPECT().DatacenterResourceType().Return(eksaVSphereDatacenterResourceType).Times(2)
tt.mocks.provider.EXPECT().MachineResourceType().Return("").Times(2)
tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, eksaVSphereDatacenterResourceType, tt.clusterSpec.Cluster.Spec.DatacenterRef.Name, expectedPauseAnnotation, tt.cluster, "").Return(nil).Times(2)
tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, eksaClusterResourceType, tt.clusterSpec.Cluster.Name, expectedPauseAnnotation, tt.cluster, "").Return(nil)
tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(
tt.ctx,
eksaClusterResourceType,
tt.clusterSpec.Cluster.Name,
map[string]string{
v1alpha1.ManagedByCLIAnnotation: "true",
},
tt.cluster,
"",
).Return(nil)
tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, eksaClusterResourceType, "workload-cluster-1", expectedPauseAnnotation, tt.cluster, "").Return(nil)
tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(
tt.ctx,
eksaClusterResourceType,
"workload-cluster-1",
map[string]string{
v1alpha1.ManagedByCLIAnnotation: "true",
},
tt.cluster,
"",
).Return(nil)
tt.Expect(tt.clusterManager.PauseEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).To(Succeed())
}
func TestPauseEKSAControllerReconcileManagementClusterListObjectsError(t *testing.T) {
tt := newTest(t, clustermanager.WithRetrier(retrier.NewWithMaxRetries(1, 0)))
tt.clusterSpec.Cluster = &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: tt.clusterName,
},
Spec: v1alpha1.ClusterSpec{
ManagementCluster: v1alpha1.ManagementCluster{
Name: tt.clusterName,
},
},
}
tt.mocks.client.EXPECT().ListObjects(tt.ctx, eksaClusterResourceType, "", "", &v1alpha1.ClusterList{}).Return(errors.New("list error"))
tt.Expect(tt.clusterManager.PauseEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).NotTo(Succeed())
}
func TestPauseEKSAControllerReconcileWorkloadClusterWithMachineConfig(t *testing.T) {
tt := newTest(t)
tt.clusterSpec.Cluster = &v1alpha1.Cluster{
Spec: v1alpha1.ClusterSpec{
DatacenterRef: v1alpha1.Ref{
Kind: v1alpha1.VSphereDatacenterKind,
Name: "datasourcename",
},
ControlPlaneConfiguration: v1alpha1.ControlPlaneConfiguration{
MachineGroupRef: &v1alpha1.Ref{
Name: tt.clusterName + "-cp",
},
},
WorkerNodeGroupConfigurations: []v1alpha1.WorkerNodeGroupConfiguration{{
MachineGroupRef: &v1alpha1.Ref{
Name: tt.clusterName,
},
}},
ManagementCluster: v1alpha1.ManagementCluster{
Name: "mgmt-cluster",
},
},
}
tt.mocks.provider.EXPECT().DatacenterResourceType().Return(eksaVSphereDatacenterResourceType)
tt.mocks.provider.EXPECT().MachineResourceType().Return(eksaVSphereMachineResourceType).Times(3)
tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, eksaVSphereDatacenterResourceType, tt.clusterSpec.Cluster.Spec.DatacenterRef.Name, expectedPauseAnnotation, tt.cluster, "").Return(nil)
tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, eksaVSphereMachineResourceType, tt.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name, expectedPauseAnnotation, tt.cluster, "").Return(nil)
tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, eksaVSphereMachineResourceType, tt.clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name, expectedPauseAnnotation, tt.cluster, "").Return(nil)
tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, eksaClusterResourceType, tt.clusterSpec.Cluster.Name, expectedPauseAnnotation, tt.cluster, "").Return(nil)
tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(
tt.ctx,
eksaClusterResourceType,
tt.clusterSpec.Cluster.Name,
map[string]string{
v1alpha1.ManagedByCLIAnnotation: "true",
},
tt.cluster,
"",
).Return(nil)
tt.Expect(tt.clusterManager.PauseEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).To(Succeed())
}
func TestResumeEKSAControllerReconcileWorkloadCluster(t *testing.T) {
tt := newTest(t)
tt.clusterSpec.Cluster = &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: tt.clusterName,
},
Spec: v1alpha1.ClusterSpec{
DatacenterRef: v1alpha1.Ref{
Kind: v1alpha1.VSphereDatacenterKind,
Name: "data-center-name",
},
ManagementCluster: v1alpha1.ManagementCluster{
Name: "mgmt-cluster",
},
},
}
datacenterConfig := &v1alpha1.VSphereDatacenterConfig{
ObjectMeta: metav1.ObjectMeta{
Name: tt.clusterName,
},
Spec: v1alpha1.VSphereDatacenterConfigSpec{
Insecure: true,
},
}
pauseAnnotation := "anywhere.eks.amazonaws.com/paused"
tt.mocks.provider.EXPECT().DatacenterResourceType().Return(eksaVSphereDatacenterResourceType)
tt.mocks.provider.EXPECT().MachineResourceType().Return("")
tt.mocks.provider.EXPECT().DatacenterConfig(tt.clusterSpec).Return(datacenterConfig)
tt.mocks.client.EXPECT().RemoveAnnotationInNamespace(tt.ctx, eksaVSphereDatacenterResourceType, tt.clusterSpec.Cluster.Spec.DatacenterRef.Name, pauseAnnotation, tt.cluster, "").Return(nil)
tt.mocks.client.EXPECT().RemoveAnnotationInNamespace(tt.ctx, eksaClusterResourceType, tt.clusterSpec.Cluster.Name, pauseAnnotation, tt.cluster, "").Return(nil)
tt.mocks.client.EXPECT().RemoveAnnotationInNamespace(
tt.ctx,
eksaClusterResourceType,
tt.clusterSpec.Cluster.Name,
v1alpha1.ManagedByCLIAnnotation,
tt.cluster,
"",
).Return(nil)
tt.Expect(tt.clusterManager.ResumeEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).To(Succeed())
}
func TestResumeEKSAControllerReconcileWorkloadClusterUpdateAnnotationError(t *testing.T) {
tt := newTest(t, clustermanager.WithRetrier(retrier.NewWithMaxRetries(1, 0)))
tt.clusterSpec.Cluster = &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: tt.clusterName,
},
Spec: v1alpha1.ClusterSpec{
DatacenterRef: v1alpha1.Ref{
Kind: v1alpha1.VSphereDatacenterKind,
Name: "data-center-name",
},
ManagementCluster: v1alpha1.ManagementCluster{
Name: "mgmt-cluster",
},
},
}
datacenterConfig := &v1alpha1.VSphereDatacenterConfig{
ObjectMeta: metav1.ObjectMeta{
Name: tt.clusterName,
},
Spec: v1alpha1.VSphereDatacenterConfigSpec{
Insecure: true,
},
}
pauseAnnotation := "anywhere.eks.amazonaws.com/paused"
tt.mocks.provider.EXPECT().DatacenterResourceType().Return(eksaVSphereDatacenterResourceType)
tt.mocks.provider.EXPECT().MachineResourceType().Return("")
tt.mocks.provider.EXPECT().DatacenterConfig(tt.clusterSpec).Return(datacenterConfig)
tt.mocks.client.EXPECT().RemoveAnnotationInNamespace(tt.ctx, eksaVSphereDatacenterResourceType, tt.clusterSpec.Cluster.Spec.DatacenterRef.Name, pauseAnnotation, tt.cluster, "").Return(nil)
tt.mocks.client.EXPECT().RemoveAnnotationInNamespace(tt.ctx, eksaClusterResourceType, tt.clusterSpec.Cluster.Name, pauseAnnotation, tt.cluster, "").Return(errors.New("pause eksa cluster error"))
tt.Expect(tt.clusterManager.ResumeEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).NotTo(Succeed())
}
func TestResumeEKSAControllerReconcileManagementCluster(t *testing.T) {
tt := newTest(t)
tt.clusterSpec.Cluster = &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: tt.clusterName,
},
Spec: v1alpha1.ClusterSpec{
DatacenterRef: v1alpha1.Ref{
Kind: v1alpha1.VSphereDatacenterKind,
Name: "data-center-name",
},
ManagementCluster: v1alpha1.ManagementCluster{
Name: tt.clusterName,
},
},
}
tt.clusterSpec.Cluster.PauseReconcile()
datacenterConfig := &v1alpha1.VSphereDatacenterConfig{
ObjectMeta: metav1.ObjectMeta{
Name: tt.clusterName,
},
Spec: v1alpha1.VSphereDatacenterConfigSpec{
Insecure: true,
},
}
pauseAnnotation := "anywhere.eks.amazonaws.com/paused"
tt.mocks.client.EXPECT().
ListObjects(tt.ctx, eksaClusterResourceType, "", "", &v1alpha1.ClusterList{}).
DoAndReturn(func(_ context.Context, _, _, _ string, obj *v1alpha1.ClusterList) error {
obj.Items = []v1alpha1.Cluster{
*tt.clusterSpec.Cluster,
{
ObjectMeta: metav1.ObjectMeta{
Name: "workload-cluster-1",
},
Spec: v1alpha1.ClusterSpec{
DatacenterRef: v1alpha1.Ref{
Kind: v1alpha1.VSphereDatacenterKind,
Name: "data-center-name",
},
ManagementCluster: v1alpha1.ManagementCluster{
Name: tt.clusterName,
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "workload-cluster-2",
},
Spec: v1alpha1.ClusterSpec{
DatacenterRef: v1alpha1.Ref{
Kind: v1alpha1.VSphereDatacenterKind,
Name: "data-center-name",
},
ManagementCluster: v1alpha1.ManagementCluster{
Name: "mgmt-cluster-2",
},
},
},
}
return nil
})
tt.mocks.provider.EXPECT().DatacenterResourceType().Return(eksaVSphereDatacenterResourceType).Times(2)
tt.mocks.provider.EXPECT().MachineResourceType().Return("").Times(2)
tt.mocks.provider.EXPECT().DatacenterConfig(tt.clusterSpec).Return(datacenterConfig)
tt.mocks.client.EXPECT().RemoveAnnotationInNamespace(tt.ctx, eksaVSphereDatacenterResourceType, tt.clusterSpec.Cluster.Spec.DatacenterRef.Name, pauseAnnotation, tt.cluster, "").Return(nil).Times(2)
tt.mocks.client.EXPECT().RemoveAnnotationInNamespace(tt.ctx, eksaClusterResourceType, tt.clusterSpec.Cluster.Name, pauseAnnotation, tt.cluster, "").Return(nil)
tt.mocks.client.EXPECT().RemoveAnnotationInNamespace(
tt.ctx,
eksaClusterResourceType,
tt.clusterSpec.Cluster.Name,
v1alpha1.ManagedByCLIAnnotation,
tt.cluster,
"",
).Return(nil)
tt.mocks.client.EXPECT().RemoveAnnotationInNamespace(tt.ctx, eksaClusterResourceType, "workload-cluster-1", pauseAnnotation, tt.cluster, "").Return(nil)
tt.mocks.client.EXPECT().RemoveAnnotationInNamespace(
tt.ctx,
eksaClusterResourceType,
"workload-cluster-1",
v1alpha1.ManagedByCLIAnnotation,
tt.cluster,
"",
).Return(nil)
tt.Expect(tt.clusterManager.ResumeEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).To(Succeed())
annotations := tt.clusterSpec.Cluster.GetAnnotations()
if _, ok := annotations[pauseAnnotation]; ok {
t.Errorf("%s annotation exists, should be removed", pauseAnnotation)
}
if _, ok := annotations[v1alpha1.ManagedByCLIAnnotation]; ok {
t.Errorf("%s annotation exists, should be removed", v1alpha1.ManagedByCLIAnnotation)
}
}
func TestResumeEKSAControllerReconcileManagementClusterListObjectsError(t *testing.T) {
tt := newTest(t, clustermanager.WithRetrier(retrier.NewWithMaxRetries(1, 0)))
tt.clusterSpec.Cluster = &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: tt.clusterName,
},
Spec: v1alpha1.ClusterSpec{
ManagementCluster: v1alpha1.ManagementCluster{
Name: tt.clusterName,
},
},
}
datacenterConfig := &v1alpha1.VSphereDatacenterConfig{
ObjectMeta: metav1.ObjectMeta{
Name: tt.clusterName,
},
Spec: v1alpha1.VSphereDatacenterConfigSpec{
Insecure: true,
},
}
tt.mocks.provider.EXPECT().DatacenterConfig(tt.clusterSpec).Return(datacenterConfig)
tt.mocks.client.EXPECT().ListObjects(tt.ctx, eksaClusterResourceType, "", "", &v1alpha1.ClusterList{}).Return(errors.New("list error"))
tt.Expect(tt.clusterManager.ResumeEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).NotTo(Succeed())
}
func TestClusterManagerInstallCustomComponentsSuccess(t *testing.T) {
features.ClearCache()
tt := newTest(t)
tt.mocks.eksaComponents.EXPECT().Install(tt.ctx, logger.Get(), tt.cluster, tt.clusterSpec)
tt.mocks.provider.EXPECT().InstallCustomProviderComponents(tt.ctx, tt.cluster.KubeconfigFile)
if err := tt.clusterManager.InstallCustomComponents(tt.ctx, tt.clusterSpec, tt.cluster, tt.mocks.provider); err != nil {
t.Errorf("ClusterManager.InstallCustomComponents() error = %v, wantErr nil", err)
}
}
func TestClusterManagerInstallCustomComponentsErrorInstalling(t *testing.T) {
tt := newTest(t, clustermanager.WithRetrier(retrier.NewWithMaxRetries(2, 0)))
tt.mocks.eksaComponents.EXPECT().Install(tt.ctx, logger.Get(), tt.cluster, tt.clusterSpec).Return(errors.New("error from apply"))
if err := tt.clusterManager.InstallCustomComponents(tt.ctx, tt.clusterSpec, tt.cluster, nil); err == nil {
t.Error("ClusterManager.InstallCustomComponents() error = nil, wantErr not nil")
}
}
type specChangedTest struct {
*testSetup
oldClusterConfig, newClusterConfig *v1alpha1.Cluster
oldDatacenterConfig, newDatacenterConfig *v1alpha1.VSphereDatacenterConfig
oldControlPlaneMachineConfig, newControlPlaneMachineConfig *v1alpha1.VSphereMachineConfig
oldWorkerMachineConfig, newWorkerMachineConfig *v1alpha1.VSphereMachineConfig
oldOIDCConfig *v1alpha1.OIDCConfig
}
func newSpecChangedTest(t *testing.T, opts ...clustermanager.ClusterManagerOpt) *specChangedTest {
testSetup := newTest(t, opts...)
clusterName := testSetup.clusterName
clusterConfig := &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
},
Spec: v1alpha1.ClusterSpec{
KubernetesVersion: "1.19",
ControlPlaneConfiguration: v1alpha1.ControlPlaneConfiguration{
Count: 1,
Endpoint: &v1alpha1.Endpoint{
Host: "1.1.1.1",
},
MachineGroupRef: &v1alpha1.Ref{
Name: clusterName,
},
},
WorkerNodeGroupConfigurations: []v1alpha1.WorkerNodeGroupConfiguration{{
Count: ptr.Int(1),
MachineGroupRef: &v1alpha1.Ref{
Name: clusterName + "-worker",
},
}},
DatacenterRef: v1alpha1.Ref{
Kind: v1alpha1.VSphereDatacenterKind,
Name: clusterName,
},
IdentityProviderRefs: []v1alpha1.Ref{{
Kind: v1alpha1.OIDCConfigKind,
Name: clusterName,
}},
},
}
newClusterConfig := clusterConfig.DeepCopy()
datacenterConfig := &v1alpha1.VSphereDatacenterConfig{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
},
Spec: v1alpha1.VSphereDatacenterConfigSpec{
Insecure: true,
},
}
machineConfig := &v1alpha1.VSphereMachineConfig{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
},
Spec: v1alpha1.VSphereMachineConfigSpec{
DiskGiB: 20,
MemoryMiB: 8192,
NumCPUs: 2,
},
}
workerMachineConfig := machineConfig.DeepCopy()
workerMachineConfig.Name = clusterConfig.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name
oidcConfig := &v1alpha1.OIDCConfig{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
},
Spec: v1alpha1.OIDCConfigSpec{
ClientId: "test",
},
}
changedTest := &specChangedTest{
testSetup: testSetup,
oldClusterConfig: clusterConfig,
newClusterConfig: newClusterConfig,
oldDatacenterConfig: datacenterConfig,
newDatacenterConfig: datacenterConfig.DeepCopy(),
oldControlPlaneMachineConfig: machineConfig,
newControlPlaneMachineConfig: machineConfig.DeepCopy(),
oldWorkerMachineConfig: workerMachineConfig,
newWorkerMachineConfig: workerMachineConfig.DeepCopy(),
oldOIDCConfig: oidcConfig,
}
changedTest.clusterSpec = test.NewClusterSpecForCluster(t, newClusterConfig)
return changedTest
}
func TestClusterManagerClusterSpecChangedNoChanges(t *testing.T) {
tt := newSpecChangedTest(t)
tt.clusterSpec.Cluster.Spec.IdentityProviderRefs = []v1alpha1.Ref{{Kind: v1alpha1.OIDCConfigKind, Name: tt.clusterName}}
tt.clusterSpec.OIDCConfig = tt.oldOIDCConfig.DeepCopy()
tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, tt.cluster, tt.clusterSpec.Cluster.Name).Return(tt.oldClusterConfig, nil)
tt.mocks.client.EXPECT().GetBundles(tt.ctx, tt.cluster.KubeconfigFile, tt.cluster.Name, "").Return(test.Bundles(t), nil)
tt.mocks.client.EXPECT().GetEksdRelease(tt.ctx, gomock.Any(), constants.EksaSystemNamespace, gomock.Any()).Return(test.EksdRelease(), nil)
tt.mocks.client.EXPECT().GetEksaOIDCConfig(tt.ctx, tt.oldClusterConfig.Spec.IdentityProviderRefs[0].Name, tt.cluster.KubeconfigFile, tt.clusterSpec.Cluster.Namespace).Return(tt.oldOIDCConfig, nil)
diff, err := tt.clusterManager.EKSAClusterSpecChanged(tt.ctx, tt.cluster, tt.clusterSpec)
assert.Nil(t, err, "Error should be nil")
assert.False(t, diff, "No changes should have been detected")
}
func TestClusterManagerClusterSpecChangedClusterChanged(t *testing.T) {
tt := newSpecChangedTest(t)
tt.newClusterConfig.Spec.KubernetesVersion = "1.20"
tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, tt.cluster, tt.clusterSpec.Cluster.Name).Return(tt.oldClusterConfig, nil)
diff, err := tt.clusterManager.EKSAClusterSpecChanged(tt.ctx, tt.cluster, tt.clusterSpec)
assert.Nil(t, err, "Error should be nil")
assert.True(t, diff, "Changes should have been detected")
}
func TestClusterManagerClusterSpecChangedEksDReleaseChanged(t *testing.T) {
tt := newSpecChangedTest(t)
tt.clusterSpec.VersionsBundle.EksD.Name = "kubernetes-1-19-eks-5"
tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, tt.cluster, tt.clusterSpec.Cluster.Name).Return(tt.oldClusterConfig, nil)
tt.mocks.client.EXPECT().GetBundles(tt.ctx, tt.cluster.KubeconfigFile, tt.cluster.Name, "").Return(test.Bundles(t), nil)
tt.mocks.client.EXPECT().GetEksdRelease(tt.ctx, gomock.Any(), constants.EksaSystemNamespace, gomock.Any()).Return(test.EksdRelease(), nil)
tt.mocks.client.EXPECT().GetEksaOIDCConfig(tt.ctx, tt.clusterSpec.Cluster.Spec.IdentityProviderRefs[0].Name, tt.cluster.KubeconfigFile, tt.clusterSpec.Cluster.Namespace).Return(tt.oldOIDCConfig, nil)
diff, err := tt.clusterManager.EKSAClusterSpecChanged(tt.ctx, tt.cluster, tt.clusterSpec)
assert.Nil(t, err, "Error should be nil")
assert.True(t, diff, "Changes should have been detected")
}
func TestClusterManagerClusterSpecChangedGitOpsDefault(t *testing.T) {
tt := newSpecChangedTest(t)
tt.clusterSpec.Cluster.Spec.GitOpsRef = &v1alpha1.Ref{Kind: v1alpha1.GitOpsConfigKind}
tt.oldClusterConfig = tt.clusterSpec.Cluster.DeepCopy()
oldGitOpsConfig := tt.clusterSpec.GitOpsConfig.DeepCopy()
tt.clusterSpec.Cluster.Spec.IdentityProviderRefs = []v1alpha1.Ref{{Kind: v1alpha1.OIDCConfigKind, Name: tt.clusterName}}
tt.clusterSpec.OIDCConfig = tt.oldOIDCConfig.DeepCopy()
tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, tt.cluster, tt.clusterSpec.Cluster.Name).Return(tt.oldClusterConfig, nil)
tt.mocks.client.EXPECT().GetEksaGitOpsConfig(tt.ctx, tt.clusterSpec.Cluster.Spec.GitOpsRef.Name, tt.cluster.KubeconfigFile, tt.clusterSpec.Cluster.Namespace).Return(oldGitOpsConfig, nil)
tt.mocks.client.EXPECT().GetBundles(tt.ctx, tt.cluster.KubeconfigFile, tt.cluster.Name, "").Return(test.Bundles(t), nil)
tt.mocks.client.EXPECT().GetEksdRelease(tt.ctx, gomock.Any(), constants.EksaSystemNamespace, gomock.Any()).Return(test.EksdRelease(), nil)
tt.mocks.client.EXPECT().GetEksaOIDCConfig(tt.ctx, tt.clusterSpec.Cluster.Spec.IdentityProviderRefs[0].Name, tt.cluster.KubeconfigFile, tt.clusterSpec.Cluster.Namespace).Return(tt.oldOIDCConfig, nil)
diff, err := tt.clusterManager.EKSAClusterSpecChanged(tt.ctx, tt.cluster, tt.clusterSpec)
assert.Nil(t, err, "Error should be nil")
assert.False(t, diff, "No changes should have been detected")
}
func TestClusterManagerClusterSpecChangedAWSIamConfigChanged(t *testing.T) {
tt := newSpecChangedTest(t)
tt.clusterSpec.Cluster.Spec.IdentityProviderRefs = []v1alpha1.Ref{{Kind: v1alpha1.AWSIamConfigKind, Name: tt.clusterName}}
tt.clusterSpec.AWSIamConfig = &v1alpha1.AWSIamConfig{}
tt.oldClusterConfig = tt.clusterSpec.Cluster.DeepCopy()
oldIamConfig := tt.clusterSpec.AWSIamConfig.DeepCopy()
tt.clusterSpec.AWSIamConfig = &v1alpha1.AWSIamConfig{Spec: v1alpha1.AWSIamConfigSpec{
MapRoles: []v1alpha1.MapRoles{},
}}
tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, tt.cluster, tt.clusterSpec.Cluster.Name).Return(tt.oldClusterConfig, nil)
tt.mocks.client.EXPECT().GetBundles(tt.ctx, tt.cluster.KubeconfigFile, tt.cluster.Name, "").Return(test.Bundles(t), nil)
tt.mocks.client.EXPECT().GetEksdRelease(tt.ctx, gomock.Any(), constants.EksaSystemNamespace, gomock.Any()).Return(test.EksdRelease(), nil)
tt.mocks.client.EXPECT().GetEksaAWSIamConfig(tt.ctx, tt.clusterSpec.Cluster.Spec.IdentityProviderRefs[0].Name, tt.cluster.KubeconfigFile, tt.clusterSpec.Cluster.Namespace).Return(oldIamConfig, nil)
diff, err := tt.clusterManager.EKSAClusterSpecChanged(tt.ctx, tt.cluster, tt.clusterSpec)
assert.Nil(t, err, "Error should be nil")
assert.True(t, diff, "Changes should have been detected")
}
type testSetup struct {
*WithT
clusterManager *clustermanager.ClusterManager
mocks *clusterManagerMocks
ctx context.Context
clusterSpec *cluster.Spec
cluster *types.Cluster
clusterName string
}
func (tt *testSetup) expectPauseClusterReconciliation() *gomock.Call {
lastCall := tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(
tt.ctx,
eksaClusterResourceType,
tt.clusterSpec.Cluster.Name,
map[string]string{
v1alpha1.ManagedByCLIAnnotation: "true",
},
tt.cluster,
"",
).Return(nil)
gomock.InOrder(
tt.mocks.provider.EXPECT().DatacenterResourceType().Return(eksaVSphereDatacenterResourceType),
tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, eksaVSphereDatacenterResourceType, tt.clusterSpec.Cluster.Spec.DatacenterRef.Name, expectedPauseAnnotation, tt.cluster, "").Return(nil),
tt.mocks.provider.EXPECT().MachineResourceType().Return(""),
tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, eksaClusterResourceType, tt.clusterSpec.Cluster.Name, expectedPauseAnnotation, tt.cluster, "").Return(nil),
lastCall,
)
return lastCall
}
func newTest(t *testing.T, opts ...clustermanager.ClusterManagerOpt) *testSetup {
c, m := newClusterManager(t, opts...)
clusterName := "cluster-name"
return &testSetup{
WithT: NewWithT(t),
clusterManager: c,
mocks: m,
ctx: context.Background(),
clusterSpec: test.NewClusterSpec(),
cluster: &types.Cluster{
Name: clusterName,
},
clusterName: clusterName,
}
}
type clusterManagerMocks struct {
writer *mockswriter.MockFileWriter
networking *mocksmanager.MockNetworking
awsIamAuth *mocksmanager.MockAwsIamAuth
client *mocksmanager.MockClusterClient
provider *mocksprovider.MockProvider
diagnosticsBundle *mocksdiagnostics.MockDiagnosticBundle
diagnosticsFactory *mocksdiagnostics.MockDiagnosticBundleFactory
eksaComponents *mocksmanager.MockEKSAComponents
}
func newClusterManager(t *testing.T, opts ...clustermanager.ClusterManagerOpt) (*clustermanager.ClusterManager, *clusterManagerMocks) {
mockCtrl := gomock.NewController(t)
m := &clusterManagerMocks{
writer: mockswriter.NewMockFileWriter(mockCtrl),
networking: mocksmanager.NewMockNetworking(mockCtrl),
awsIamAuth: mocksmanager.NewMockAwsIamAuth(mockCtrl),
client: mocksmanager.NewMockClusterClient(mockCtrl),
provider: mocksprovider.NewMockProvider(mockCtrl),
diagnosticsFactory: mocksdiagnostics.NewMockDiagnosticBundleFactory(mockCtrl),
diagnosticsBundle: mocksdiagnostics.NewMockDiagnosticBundle(mockCtrl),
eksaComponents: mocksmanager.NewMockEKSAComponents(mockCtrl),
}
client := clustermanager.NewRetrierClient(m.client, clustermanager.DefaultRetrier())
c := clustermanager.New(client, m.networking, m.writer, m.diagnosticsFactory, m.awsIamAuth, m.eksaComponents, opts...)
return c, m
}
func TestClusterManagerGetCurrentClusterSpecGetClusterError(t *testing.T) {
tt := newTest(t)
tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, tt.cluster, tt.clusterName).Return(nil, errors.New("error from client"))
_, err := tt.clusterManager.GetCurrentClusterSpec(tt.ctx, tt.cluster, tt.clusterName)
tt.Expect(err).ToNot(BeNil())
}
func TestClusterManagerGetCurrentClusterSpecGetBundlesError(t *testing.T) {
tt := newTest(t)
tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, tt.cluster, tt.clusterName).Return(tt.clusterSpec.Cluster, nil)
tt.mocks.client.EXPECT().GetBundles(tt.ctx, tt.cluster.KubeconfigFile, tt.clusterSpec.Cluster.Name, "").Return(nil, errors.New("error from client"))
_, err := tt.clusterManager.GetCurrentClusterSpec(tt.ctx, tt.cluster, tt.clusterName)
tt.Expect(err).ToNot(BeNil())
}
func TestClusterManagerDeletePackageResources(t *testing.T) {
tt := newTest(t)
tt.mocks.client.EXPECT().DeletePackageResources(tt.ctx, tt.cluster, tt.clusterName).Return(nil)
err := tt.clusterManager.DeletePackageResources(tt.ctx, tt.cluster, tt.clusterName)
tt.Expect(err).To(BeNil())
}
func TestCreateAwsIamAuthCaSecretSuccess(t *testing.T) {
tt := newTest(t)
tt.mocks.awsIamAuth.EXPECT().CreateAndInstallAWSIAMAuthCASecret(tt.ctx, tt.cluster, tt.clusterName).Return(nil)
err := tt.clusterManager.CreateAwsIamAuthCaSecret(tt.ctx, tt.cluster, tt.clusterName)
tt.Expect(err).To(BeNil())
}
func TestClusterManagerDeleteClusterSelfManagedCluster(t *testing.T) {
tt := newTest(t)
managementCluster := &types.Cluster{
Name: "m-cluster",
}
tt.mocks.client.EXPECT().DeleteCluster(tt.ctx, managementCluster, tt.cluster)
tt.mocks.provider.EXPECT().PostClusterDeleteValidate(tt.ctx, managementCluster)
tt.Expect(
tt.clusterManager.DeleteCluster(tt.ctx, managementCluster, tt.cluster, tt.mocks.provider, tt.clusterSpec),
).To(Succeed())
}
func TestClusterManagerDeleteClusterManagedCluster(t *testing.T) {
tt := newTest(t)
managementCluster := &types.Cluster{
Name: "m-cluster",
}
tt.clusterSpec.Cluster.SetManagedBy("m-cluster")
tt.clusterSpec.GitOpsConfig = &v1alpha1.GitOpsConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "my-config-git",
Namespace: "my-ns",
},
}
tt.clusterSpec.OIDCConfig = &v1alpha1.OIDCConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "my-config-oidc",
Namespace: "my-ns",
},
}
tt.clusterSpec.AWSIamConfig = &v1alpha1.AWSIamConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "my-config-aws",
Namespace: "my-ns",
},
}
gomock.InOrder(
tt.expectPauseClusterReconciliation(),
tt.mocks.client.EXPECT().DeleteEKSACluster(tt.ctx, managementCluster, tt.clusterSpec.Cluster.Name, tt.clusterSpec.Cluster.Namespace),
tt.mocks.client.EXPECT().DeleteGitOpsConfig(tt.ctx, managementCluster, "my-config-git", "my-ns"),
tt.mocks.client.EXPECT().DeleteOIDCConfig(tt.ctx, managementCluster, "my-config-oidc", "my-ns"),
tt.mocks.client.EXPECT().DeleteAWSIamConfig(tt.ctx, managementCluster, "my-config-aws", "my-ns"),
tt.mocks.provider.EXPECT().DeleteResources(tt.ctx, tt.clusterSpec),
tt.mocks.client.EXPECT().DeleteCluster(tt.ctx, managementCluster, tt.cluster),
tt.mocks.provider.EXPECT().PostClusterDeleteValidate(tt.ctx, managementCluster),
)
tt.Expect(
tt.clusterManager.DeleteCluster(tt.ctx, managementCluster, tt.cluster, tt.mocks.provider, tt.clusterSpec),
).To(Succeed())
}
| 2,610 |
eks-anywhere | aws | Go | package clustermanager
import (
"math"
"testing"
"time"
. "github.com/onsi/gomega"
)
var ClusterctlMoveRetryPolicy = clusterctlMoveRetryPolicy
func TestClusterManager_totalTimeoutForMachinesReadyWait(t *testing.T) {
tests := []struct {
name string
replicas int
opts []ClusterManagerOpt
want time.Duration
}{
{
name: "default timeouts with 1 replica",
replicas: 1,
want: 30 * time.Minute,
},
{
name: "default timeouts with 2 replicas",
replicas: 2,
want: 30 * time.Minute,
},
{
name: "default timeouts with 4 replicas",
replicas: 4,
want: 40 * time.Minute,
},
{
name: "no timeouts with 1 replica",
replicas: 1,
opts: []ClusterManagerOpt{WithNoTimeouts()},
want: math.MaxInt64,
},
{
name: "no timeouts with 2 replicas",
replicas: 2,
opts: []ClusterManagerOpt{WithNoTimeouts()},
want: math.MaxInt64,
},
{
name: "no timeouts with 1 replica",
replicas: 1,
opts: []ClusterManagerOpt{WithNoTimeouts()},
want: math.MaxInt64,
},
{
name: "no timeouts with 0 replicas",
replicas: 1,
opts: []ClusterManagerOpt{WithNoTimeouts()},
want: math.MaxInt64,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := New(nil, nil, nil, nil, nil, nil, tt.opts...)
g := NewWithT(t)
g.Expect(c.totalTimeoutForMachinesReadyWait(tt.replicas)).To(Equal(tt.want))
})
}
}
| 68 |
eks-anywhere | aws | Go | package clustermanager
import (
"context"
"fmt"
"math"
"sort"
"strings"
"time"
"github.com/go-logr/logr"
"golang.org/x/exp/maps"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/features"
"github.com/aws/eks-anywhere/pkg/manifests"
"github.com/aws/eks-anywhere/pkg/manifests/bundles"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/yamlutil"
)
// EKSAInstallerOpt updates an EKSAInstaller.
type EKSAInstallerOpt func(*EKSAInstaller)
// EKSAInstaller allows to install eks-a components in a cluster.
type EKSAInstaller struct {
client KubernetesClient
reader manifests.FileReader
deploymentWaitTimeout time.Duration
}
// NewEKSAInstaller constructs a new EKSAInstaller.
func NewEKSAInstaller(client KubernetesClient, reader manifests.FileReader, opts ...EKSAInstallerOpt) *EKSAInstaller {
i := &EKSAInstaller{
client: client,
reader: reader,
deploymentWaitTimeout: DefaultDeploymentWait,
}
for _, o := range opts {
o(i)
}
return i
}
// WithEKSAInstallerNoTimeouts disables the timeout when waiting for a deployment to be ready.
func WithEKSAInstallerNoTimeouts() EKSAInstallerOpt {
return func(i *EKSAInstaller) {
i.deploymentWaitTimeout = time.Duration(math.MaxInt64)
}
}
// Install configures and applies eks-a components in a cluster accordingly to a spec.
func (i *EKSAInstaller) Install(ctx context.Context, log logr.Logger, cluster *types.Cluster, spec *cluster.Spec) error {
generator := EKSAComponentGenerator{log: log, reader: i.reader}
components, err := generator.buildEKSAComponentsSpec(spec)
if err != nil {
return err
}
objs := make([]runtime.Object, 0, len(components.rest)+1)
objs = append(objs, components.deployment)
for _, o := range components.rest {
objs = append(objs, o)
}
for _, o := range objs {
if err = i.client.Apply(ctx, cluster.KubeconfigFile, o); err != nil {
return fmt.Errorf("applying eksa components: %v", err)
}
}
if err := i.client.WaitForDeployment(ctx, cluster, i.deploymentWaitTimeout.String(), "Available", constants.EksaControllerManagerDeployment, constants.EksaSystemNamespace); err != nil {
return fmt.Errorf("waiting for eksa-controller-manager: %v", err)
}
return nil
}
// Upgrade re-installs the eksa components in a cluster if the VersionBundle defined in the
// new spec has a different eks-a components version. Workload clusters are ignored.
func (i *EKSAInstaller) Upgrade(ctx context.Context, log logr.Logger, cluster *types.Cluster, currentSpec, newSpec *cluster.Spec) (*types.ChangeDiff, error) {
log.V(1).Info("Checking for EKS-A components upgrade")
if !newSpec.Cluster.IsSelfManaged() {
log.V(1).Info("Skipping EKS-A components upgrade, not a self-managed cluster")
return nil, nil
}
changeDiff := EksaChangeDiff(currentSpec, newSpec)
if changeDiff == nil {
log.V(1).Info("Nothing to upgrade for controller and CRDs")
return nil, nil
}
log.V(1).Info("Starting EKS-A components upgrade")
oldVersion := currentSpec.VersionsBundle.Eksa.Version
newVersion := newSpec.VersionsBundle.Eksa.Version
if err := i.Install(ctx, log, cluster, newSpec); err != nil {
return nil, fmt.Errorf("upgrading EKS-A components from version %v to version %v: %v", oldVersion, newVersion, err)
}
return changeDiff, nil
}
// EKSAComponentGenerator generates and configures eks-a components.
type EKSAComponentGenerator struct {
log logr.Logger
reader manifests.FileReader
}
func (g *EKSAComponentGenerator) buildEKSAComponentsSpec(spec *cluster.Spec) (*eksaComponents, error) {
components, err := g.parseEKSAComponentsSpec(spec)
if err != nil {
return nil, err
}
g.configureEKSAComponents(components, spec)
return components, nil
}
func (g *EKSAComponentGenerator) configureEKSAComponents(c *eksaComponents, spec *cluster.Spec) {
// TODO(g-gaston): we should do this with a custom ControllerManagerConfig.
// This requires wider changes in the controller manager setup and config manifest,
// so leaving this for later.
setManagerFlags(c.deployment, spec)
setManagerEnvVars(c.deployment, spec)
}
func setManagerFlags(d *appsv1.Deployment, spec *cluster.Spec) {
gates := []string{}
for _, g := range managerEnabledGates(spec) {
gates = append(gates, fmt.Sprintf("%s=true", g))
}
args := d.Spec.Template.Spec.Containers[0].Args
if len(gates) > 0 {
args = append(args, fmt.Sprintf("--feature-gates=%s", strings.Join(gates, ",")))
}
d.Spec.Template.Spec.Containers[0].Args = args
}
func setManagerEnvVars(d *appsv1.Deployment, spec *cluster.Spec) {
envVars := d.Spec.Template.Spec.Containers[0].Env
proxy := spec.Cluster.ProxyConfiguration()
if proxy != nil {
proxyEnvVarNames := maps.Keys(proxy)
sort.Strings(proxyEnvVarNames)
for _, name := range proxyEnvVarNames {
envVars = append(envVars, v1.EnvVar{Name: name, Value: proxy[name]})
}
}
d.Spec.Template.Spec.Containers[0].Env = envVars
}
func managerEnabledGates(spec *cluster.Spec) []string {
g := []string{}
// TODO(g-gaston): inject a "features" checker instead of using global one
if features.IsActive(features.FullLifecycleAPI()) || fullLifeCycleControllerForProvider(spec.Cluster) {
g = append(g, features.FullLifecycleGate)
}
return g
}
func fullLifeCycleControllerForProvider(cluster *anywherev1.Cluster) bool {
// TODO(g-gaston): inject a dependency where this check can be delegated
// We can use some kind of configurator registering that allow to decouple this generator
// from the logic that drives the gates.
return cluster.Spec.DatacenterRef.Kind == anywherev1.VSphereDatacenterKind ||
cluster.Spec.DatacenterRef.Kind == anywherev1.DockerDatacenterKind ||
cluster.Spec.DatacenterRef.Kind == anywherev1.SnowDatacenterKind ||
cluster.Spec.DatacenterRef.Kind == anywherev1.NutanixDatacenterKind ||
cluster.Spec.DatacenterRef.Kind == anywherev1.TinkerbellDatacenterKind ||
cluster.Spec.DatacenterRef.Kind == anywherev1.CloudStackDatacenterKind
}
func (g *EKSAComponentGenerator) parseEKSAComponentsSpec(spec *cluster.Spec) (*eksaComponents, error) {
componentsManifest, err := bundles.ReadManifest(g.reader, spec.VersionsBundle.Eksa.Components)
if err != nil {
return nil, fmt.Errorf("loading manifest for eksa components: %v", err)
}
p := yamlutil.NewParser(g.log)
err = p.RegisterMappings(
yamlutil.NewMapping(
"Deployment", func() yamlutil.APIObject {
return &appsv1.Deployment{}
},
),
)
if err != nil {
return nil, fmt.Errorf("registering yaml mappings for eksa components: %v", err)
}
p.RegisterMappingForAnyKind(func() yamlutil.APIObject {
return &unstructured.Unstructured{}
})
components := &eksaComponents{}
if err = p.Parse(componentsManifest.Content, components); err != nil {
return nil, fmt.Errorf("parsing eksa components: %v", err)
}
return components, nil
}
type eksaComponents struct {
deployment *appsv1.Deployment
rest []*unstructured.Unstructured
}
func (c *eksaComponents) BuildFromParsed(lookup yamlutil.ObjectLookup) error {
for _, obj := range lookup {
if obj.GetObjectKind().GroupVersionKind().Kind == "Deployment" {
c.deployment = obj.(*appsv1.Deployment)
} else {
c.rest = append(c.rest, obj.(*unstructured.Unstructured))
}
}
return nil
}
// EksaChangeDiff computes the version diff in eksa components between two specs.
func EksaChangeDiff(currentSpec, newSpec *cluster.Spec) *types.ChangeDiff {
if currentSpec.VersionsBundle.Eksa.Version != newSpec.VersionsBundle.Eksa.Version {
return &types.ChangeDiff{
ComponentReports: []types.ComponentChangeDiff{
{
ComponentName: "EKS-A",
NewVersion: newSpec.VersionsBundle.Eksa.Version,
OldVersion: currentSpec.VersionsBundle.Eksa.Version,
},
},
}
}
return nil
}
| 246 |
eks-anywhere | aws | Go | package clustermanager_test
import (
"context"
"os"
"testing"
"github.com/go-logr/logr"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/aws/eks-anywhere/internal/test"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clustermanager"
"github.com/aws/eks-anywhere/pkg/clustermanager/mocks"
"github.com/aws/eks-anywhere/pkg/features"
"github.com/aws/eks-anywhere/pkg/files"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/release/api/v1alpha1"
)
type installerTest struct {
*WithT
ctx context.Context
log logr.Logger
client *mocks.MockKubernetesClient
currentSpec *cluster.Spec
newSpec *cluster.Spec
installer *clustermanager.EKSAInstaller
cluster *types.Cluster
}
func newInstallerTest(t *testing.T, opts ...clustermanager.EKSAInstallerOpt) *installerTest {
ctrl := gomock.NewController(t)
client := mocks.NewMockKubernetesClient(ctrl)
currentSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.VersionsBundle.Eksa.Version = "v0.1.0"
s.Cluster = &anywherev1.Cluster{
Spec: anywherev1.ClusterSpec{
ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{
Endpoint: &anywherev1.Endpoint{
Host: "1.2.3.4",
},
},
},
}
})
return &installerTest{
WithT: NewWithT(t),
ctx: context.Background(),
log: test.NewNullLogger(),
client: client,
installer: clustermanager.NewEKSAInstaller(client, files.NewReader(), opts...),
currentSpec: currentSpec,
newSpec: currentSpec.DeepCopy(),
cluster: &types.Cluster{
Name: "cluster-name",
KubeconfigFile: "k.kubeconfig",
},
}
}
func TestEKSAInstallerInstallSuccessWithRealManifest(t *testing.T) {
tt := newInstallerTest(t)
tt.newSpec.VersionsBundle.Eksa.Components.URI = "../../config/manifest/eksa-components.yaml"
tt.client.EXPECT().Apply(tt.ctx, tt.cluster.KubeconfigFile, gomock.AssignableToTypeOf(&appsv1.Deployment{}))
tt.client.EXPECT().Apply(tt.ctx, tt.cluster.KubeconfigFile, gomock.Any()).Times(34) // there are 34 objects in the manifest
tt.client.EXPECT().WaitForDeployment(tt.ctx, tt.cluster, "30m0s", "Available", "eksa-controller-manager", "eksa-system")
tt.Expect(tt.installer.Install(tt.ctx, test.NewNullLogger(), tt.cluster, tt.newSpec)).To(Succeed())
}
func TestEKSAInstallerInstallSuccessWithTestManifest(t *testing.T) {
tt := newInstallerTest(t)
tt.newSpec.VersionsBundle.Eksa.Components.URI = "testdata/eksa_components.yaml"
tt.newSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host = "1.2.3.4"
tt.newSpec.Cluster.Spec.DatacenterRef.Kind = anywherev1.VSphereDatacenterKind
tt.newSpec.Cluster.Spec.ProxyConfiguration = &anywherev1.ProxyConfiguration{
HttpProxy: "proxy",
HttpsProxy: "proxy",
NoProxy: []string{"no-proxy", "no-proxy-2"},
}
wantDeployment := &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "eksa-controller-manager",
Namespace: "eksa-system",
},
Spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Args: []string{
"--leader-elect",
"--feature-gates=FullLifecycleAPI=true",
},
Env: []corev1.EnvVar{
{
Name: "HTTPS_PROXY",
Value: "proxy",
},
{
Name: "HTTP_PROXY",
Value: "proxy",
},
{
Name: "NO_PROXY",
Value: "no-proxy,no-proxy-2,1.2.3.4",
},
},
},
},
},
},
},
}
wantNamespace := &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": "Namespace",
"apiVersion": "v1",
"metadata": map[string]interface{}{
"name": "eksa-system",
},
},
}
tt.client.EXPECT().Apply(tt.ctx, tt.cluster.KubeconfigFile, wantDeployment)
tt.client.EXPECT().Apply(tt.ctx, tt.cluster.KubeconfigFile, wantNamespace)
tt.client.EXPECT().WaitForDeployment(tt.ctx, tt.cluster, "30m0s", "Available", "eksa-controller-manager", "eksa-system")
tt.Expect(tt.installer.Install(tt.ctx, test.NewNullLogger(), tt.cluster, tt.newSpec)).To(Succeed())
}
func TestEKSAInstallerInstallSuccessWithNoTimeout(t *testing.T) {
tt := newInstallerTest(t, clustermanager.WithEKSAInstallerNoTimeouts())
tt.newSpec.VersionsBundle.Eksa.Components.URI = "../../config/manifest/eksa-components.yaml"
tt.client.EXPECT().Apply(tt.ctx, tt.cluster.KubeconfigFile, gomock.AssignableToTypeOf(&appsv1.Deployment{}))
tt.client.EXPECT().Apply(tt.ctx, tt.cluster.KubeconfigFile, gomock.Any()).Times(34) // there are 34 objects in the manifest
tt.client.EXPECT().WaitForDeployment(tt.ctx, tt.cluster, maxTime.String(), "Available", "eksa-controller-manager", "eksa-system")
tt.Expect(tt.installer.Install(tt.ctx, test.NewNullLogger(), tt.cluster, tt.newSpec)).To(Succeed())
}
func TestInstallerUpgradeNoSelfManaged(t *testing.T) {
tt := newInstallerTest(t)
tt.newSpec.Cluster.SetManagedBy("management-cluster")
tt.Expect(tt.installer.Upgrade(tt.ctx, tt.log, tt.cluster, tt.currentSpec, tt.newSpec)).To(BeNil())
}
func TestInstallerUpgradeNoChanges(t *testing.T) {
tt := newInstallerTest(t)
tt.Expect(tt.installer.Upgrade(tt.ctx, tt.log, tt.cluster, tt.currentSpec, tt.newSpec)).To(BeNil())
}
func TestInstallerUpgradeSuccess(t *testing.T) {
tt := newInstallerTest(t)
tt.newSpec.VersionsBundle.Eksa.Version = "v0.2.0"
tt.newSpec.VersionsBundle.Eksa.Components = v1alpha1.Manifest{
URI: "testdata/eksa_components.yaml",
}
wantDiff := &types.ChangeDiff{
ComponentReports: []types.ComponentChangeDiff{
{
ComponentName: "EKS-A",
NewVersion: "v0.2.0",
OldVersion: "v0.1.0",
},
},
}
tt.client.EXPECT().Apply(tt.ctx, tt.cluster.KubeconfigFile, gomock.AssignableToTypeOf(&appsv1.Deployment{}))
tt.client.EXPECT().Apply(tt.ctx, tt.cluster.KubeconfigFile, gomock.AssignableToTypeOf(&unstructured.Unstructured{}))
tt.client.EXPECT().WaitForDeployment(tt.ctx, tt.cluster, "30m0s", "Available", "eksa-controller-manager", "eksa-system")
tt.Expect(tt.installer.Upgrade(tt.ctx, tt.log, tt.cluster, tt.currentSpec, tt.newSpec)).To(Equal(wantDiff))
}
func TestInstallerUpgradeInstallError(t *testing.T) {
tt := newInstallerTest(t)
tt.newSpec.VersionsBundle.Eksa.Version = "v0.2.0"
// components file not set so this should return an error in failing to load manifest
_, err := tt.installer.Upgrade(tt.ctx, tt.log, tt.cluster, tt.currentSpec, tt.newSpec)
tt.Expect(err).NotTo(BeNil())
}
func TestSetManagerFlags(t *testing.T) {
tests := []struct {
name string
deployment *appsv1.Deployment
spec *cluster.Spec
featureEnvVars []string
want *appsv1.Deployment
}{
{
name: "no flags",
deployment: deployment(),
spec: test.NewClusterSpec(),
want: deployment(),
},
{
name: "full lifecycle, vsphere",
deployment: deployment(),
spec: test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.DatacenterRef.Kind = anywherev1.VSphereDatacenterKind
}),
want: deployment(func(d *appsv1.Deployment) {
d.Spec.Template.Spec.Containers[0].Args = []string{
"--feature-gates=FullLifecycleAPI=true",
}
}),
},
{
name: "full lifecycle, docker",
deployment: deployment(),
spec: test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.DatacenterRef.Kind = anywherev1.DockerDatacenterKind
}),
want: deployment(func(d *appsv1.Deployment) {
d.Spec.Template.Spec.Containers[0].Args = []string{
"--feature-gates=FullLifecycleAPI=true",
}
}),
},
{
name: "full lifecycle, snow",
deployment: deployment(),
spec: test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.DatacenterRef.Kind = anywherev1.SnowDatacenterKind
}),
want: deployment(func(d *appsv1.Deployment) {
d.Spec.Template.Spec.Containers[0].Args = []string{
"--feature-gates=FullLifecycleAPI=true",
}
}),
},
{
name: "full lifecycle, nutanix",
deployment: deployment(),
spec: test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.DatacenterRef.Kind = anywherev1.NutanixDatacenterKind
}),
want: deployment(func(d *appsv1.Deployment) {
d.Spec.Template.Spec.Containers[0].Args = []string{
"--feature-gates=FullLifecycleAPI=true",
}
}),
},
{
name: "full lifecycle, tinkerbell",
deployment: deployment(),
spec: test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.DatacenterRef.Kind = anywherev1.TinkerbellDatacenterKind
}),
want: deployment(func(d *appsv1.Deployment) {
d.Spec.Template.Spec.Containers[0].Args = []string{
"--feature-gates=FullLifecycleAPI=true",
}
}),
},
{
name: "full lifecycle, cloudstack",
deployment: deployment(),
spec: test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Spec.DatacenterRef.Kind = anywherev1.CloudStackDatacenterKind
}),
want: deployment(func(d *appsv1.Deployment) {
d.Spec.Template.Spec.Containers[0].Args = []string{
"--feature-gates=FullLifecycleAPI=true",
}
}),
},
{
name: "full lifecycle, feature flag enabled",
deployment: deployment(),
spec: test.NewClusterSpec(),
featureEnvVars: []string{features.FullLifecycleAPIEnvVar},
want: deployment(func(d *appsv1.Deployment) {
d.Spec.Template.Spec.Containers[0].Args = []string{
"--feature-gates=FullLifecycleAPI=true",
}
}),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
os.Unsetenv(features.FullLifecycleAPIEnvVar)
features.ClearCache()
for _, e := range tt.featureEnvVars {
t.Setenv(e, "true")
}
g := NewWithT(t)
clustermanager.SetManagerFlags(tt.deployment, tt.spec)
g.Expect(tt.deployment).To(Equal(tt.want))
})
}
}
func TestSetManagerEnvVars(t *testing.T) {
tests := []struct {
name string
deployment *appsv1.Deployment
spec *cluster.Spec
want *appsv1.Deployment
}{
{
name: "no env vars",
deployment: deployment(),
spec: test.NewClusterSpec(),
want: deployment(),
},
{
name: "proxy env vars",
deployment: deployment(),
spec: test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster = &anywherev1.Cluster{
Spec: anywherev1.ClusterSpec{
ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{
Endpoint: &anywherev1.Endpoint{
Host: "1.2.3.4",
},
},
ProxyConfiguration: &anywherev1.ProxyConfiguration{
HttpProxy: "proxy",
HttpsProxy: "proxy",
NoProxy: []string{"no-proxy", "no-proxy-2"},
},
},
}
}),
want: deployment(func(d *appsv1.Deployment) {
d.Spec.Template.Spec.Containers[0].Env = []corev1.EnvVar{
{
Name: "HTTPS_PROXY",
Value: "proxy",
},
{
Name: "HTTP_PROXY",
Value: "proxy",
},
{
Name: "NO_PROXY",
Value: "no-proxy,no-proxy-2,1.2.3.4",
},
}
}),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
clustermanager.SetManagerEnvVars(tt.deployment, tt.spec)
g.Expect(tt.deployment).To(Equal(tt.want))
})
}
}
type deploymentOpt func(*appsv1.Deployment)
func deployment(opts ...deploymentOpt) *appsv1.Deployment {
d := &appsv1.Deployment{
Spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{},
},
},
},
},
}
for _, opt := range opts {
opt(d)
}
return d
}
| 395 |
eks-anywhere | aws | Go | package clustermanager
var (
SetManagerFlags = setManagerFlags
SetManagerEnvVars = setManagerEnvVars
)
| 7 |
eks-anywhere | aws | Go | package clustermanager
import (
"context"
"regexp"
"strconv"
"time"
"github.com/go-logr/logr"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/controller/clientutil"
"github.com/aws/eks-anywhere/pkg/retrier"
"github.com/aws/eks-anywhere/pkg/utils/oci"
)
const (
iptablesLegacyLabel = "anywhere.eks.amazonaws.com/iptableslegacy"
iptablesLegacyKubeProxyDSName = "kube-proxy-iptables-legacy"
k8sAppLabel = "k8s-app"
kubeProxyLabel = "kube-proxy"
kubeProxyDSName = "kube-proxy"
kubeProxyDSNamespace = "kube-system"
)
var firstEKSDWithNewKubeProxy = map[anywherev1.KubernetesVersion]int{
anywherev1.Kube122: 22,
anywherev1.Kube123: 17,
anywherev1.Kube124: 12,
anywherev1.Kube125: 8,
}
// ClientFactory builds Kubernetes clients.
type ClientFactory interface {
// BuildClientFromKubeconfig builds a Kubernetes client from a kubeconfig file.
BuildClientFromKubeconfig(kubeconfigPath string) (kubernetes.Client, error)
}
// NewKubeProxyCLIUpgrader builds a new KubeProxyCLIUpgrader.
func NewKubeProxyCLIUpgrader(log logr.Logger, factory ClientFactory, opts ...KubeProxyCLIUpgraderOpt) KubeProxyCLIUpgrader {
u := &KubeProxyCLIUpgrader{
log: log,
clientFactory: factory,
retrier: *retrier.NewWithMaxRetries(12, time.Second),
}
for _, opt := range opts {
opt(u)
}
return *u
}
// KubeProxyCLIUpgrader prepares a cluster for a kube-proxy upgrade.
// It's mostly a wrapper around [KubeProxyUpgrader] to be used from the CLI.
// It builds clients from kubeconfig files and facilitates mocking. It also uses a retrier
// around [KubeProxyCLIUpgrader] to deal with transient errors.
type KubeProxyCLIUpgrader struct {
clientFactory ClientFactory
log logr.Logger
retrier retrier.Retrier
}
// KubeProxyCLIUpgraderOpt allows to customize a KubeProxyCLIUpgrader
// on construction.
type KubeProxyCLIUpgraderOpt func(*KubeProxyCLIUpgrader)
// KubeProxyCLIUpgraderRetrier allows to use a custom retrier.
func KubeProxyCLIUpgraderRetrier(retrier retrier.Retrier) KubeProxyCLIUpgraderOpt {
return func(u *KubeProxyCLIUpgrader) {
u.retrier = retrier
}
}
// PrepareUpgrade perfoms the necessary steps prior to a kube-proxy upgrade.
func (u KubeProxyCLIUpgrader) PrepareUpgrade(ctx context.Context,
spec *cluster.Spec,
managementClusterKubeconfigPath, workloadClusterKubeconfigPath string,
) error {
managementClusterClient, workloadClusterClient, err := u.buildClients(
managementClusterKubeconfigPath, workloadClusterKubeconfigPath,
)
if err != nil {
return err
}
up := NewKubeProxyUpgrader()
return u.retrier.Retry(func() error {
return up.PrepareForUpgrade(ctx, u.log, managementClusterClient, workloadClusterClient, spec)
})
}
// CleanupAfterUpgrade perfoms the necessary steps after an upgrade.
func (u KubeProxyCLIUpgrader) CleanupAfterUpgrade(ctx context.Context,
spec *cluster.Spec,
managementClusterKubeconfigPath, workloadClusterKubeconfigPath string,
) error {
managementClusterClient, workloadClusterClient, err := u.buildClients(
managementClusterKubeconfigPath, workloadClusterKubeconfigPath,
)
if err != nil {
return err
}
up := NewKubeProxyUpgrader()
return u.retrier.Retry(func() error {
return up.CleanupAfterUpgrade(ctx, u.log, managementClusterClient, workloadClusterClient, spec)
})
}
func (u KubeProxyCLIUpgrader) buildClients(
managementClusterKubeconfigPath, workloadClusterKubeconfigPath string,
) (managementClusterClient, workloadClusterClient kubernetes.Client, err error) {
u.log.V(4).Info("Building client for management cluster", "kubeconfig", managementClusterKubeconfigPath)
if err = u.retrier.Retry(func() error {
managementClusterClient, err = u.clientFactory.BuildClientFromKubeconfig(managementClusterKubeconfigPath)
return err
}); err != nil {
return nil, nil, err
}
u.log.V(4).Info("Building client for workload cluster", "kubeconfig", workloadClusterKubeconfigPath)
if err = u.retrier.Retry(func() error {
workloadClusterClient, err = u.clientFactory.BuildClientFromKubeconfig(workloadClusterKubeconfigPath)
return err
}); err != nil {
return nil, nil, err
}
return managementClusterClient, workloadClusterClient, nil
}
// NewKubeProxyUpgrader builds a new KubeProxyUpgrader.
func NewKubeProxyUpgrader(opts ...KubeProxyUpgraderOpt) KubeProxyUpgrader {
u := &KubeProxyUpgrader{
updateKubeProxyRetries: 30,
updateKubeProxyBackoff: 2 * time.Second,
}
for _, opt := range opts {
opt(u)
}
return *u
}
// KubeProxyUpgrader prepares a cluster for a kube-proxy upgrade.
type KubeProxyUpgrader struct {
updateKubeProxyRetries int
updateKubeProxyBackoff time.Duration
}
// KubeProxyUpgraderOpt allows to customize a KubeProxyUpgraderOpt
// on construction.
type KubeProxyUpgraderOpt func(*KubeProxyUpgrader)
// WithUpdateKubeProxyTiming allows to customize the retry paramenter for the
// kube-proxy version update. This is for unit tests.
func WithUpdateKubeProxyTiming(retries int, backoff time.Duration) KubeProxyUpgraderOpt {
return func(u *KubeProxyUpgrader) {
u.updateKubeProxyRetries = retries
u.updateKubeProxyBackoff = backoff
}
}
// PrepareForUpgrade gets the workload cluster ready for a smooth transition between the
// old kube-proxy that always uses iptables legacy and the new one that detects the host preference
// and is able to work with nft as well. This is idempotent, so it can be called in a loop if transient
// errors are a risk.
func (u KubeProxyUpgrader) PrepareForUpgrade(ctx context.Context, log logr.Logger, managementClusterClient, workloadClusterClient kubernetes.Client, spec *cluster.Spec) error {
kcp, err := getKubeadmControlPlane(ctx, managementClusterClient, spec.Cluster)
if err != nil {
return errors.Wrap(err, "reading the kubeadm control plane for an upgrade")
}
_, newVersion := oci.Split(spec.VersionsBundle.KubeDistro.KubeProxy.URI)
// If the new spec doesn't include the new kube-proxy or if the current cluster already has it, skip this
if needsPrepare, err := needsKubeProxyPreUpgrade(spec, kcp); err != nil {
return err
} else if !needsPrepare {
log.V(4).Info("Kube-proxy upgrade doesn't need special handling", "currentVersion", kcp.Spec.Version, "newVersion", newVersion)
return nil
}
log.V(4).Info("Detected upgrade from kube-proxy with iptables legacy upgrade to new version", "currentVersion", kcp.Spec.Version, "newVersion", newVersion)
// Add the annotation to the kcp so it doesn't undo our changes to the kube-proxy DS
if err := annotateKCPWithSKipKubeProxy(ctx, log, managementClusterClient, kcp); err != nil {
return err
}
// Add label to nodes so we can use nodeAffinity to control the kube-proxy scheduling
if err := addIPTablesLegacyLabelToAllNodes(ctx, log, workloadClusterClient); err != nil {
return err
}
originalKubeProxy, err := getKubeProxy(ctx, workloadClusterClient)
if err != nil {
return err
}
// Make sure original kube-proxy DS is only scheduled in new nodes and it stops running in current nodes.
if err := restrictKubeProxyToNewNodes(ctx, workloadClusterClient, originalKubeProxy); err != nil {
return err
}
// Once old kube-proxy pods are deleted, create the new DS that will only be scheduled in the old nodes.
if err := createIPTablesLegacyKubeProxy(ctx, workloadClusterClient, kcp, originalKubeProxy); err != nil {
return err
}
// Finally update the main kube-proxy DS to reflect the new version so all the new nodes
// get that one scheduled from the beginning.
log.V(4).Info("Updating kube-proxy DS version", "oldVersion", kcp.Spec.Version, "newVersion", newVersion)
if err := u.ensureUpdateKubeProxyVersion(ctx, log, workloadClusterClient, spec); err != nil {
return err
}
return nil
}
// CleanupAfterUpgrade cleanups all the leftover changes made by PrepareForUpgrade.
// It's idempotent so it can be call multiple timesm even if PrepareForUpgrade wasn't
// called before.
func (u KubeProxyUpgrader) CleanupAfterUpgrade(ctx context.Context, log logr.Logger, managementClusterClient, workloadClusterClient kubernetes.Client, spec *cluster.Spec) error {
log.V(4).Info("Deleting iptables legacy kube-proxy", "name", iptablesLegacyKubeProxyDSName)
if err := deleteIPTablesLegacyKubeProxy(ctx, workloadClusterClient); err != nil {
return err
}
// Remove nodeAffinity from original kube-proxy. It's not strcitly necessary since there
// won't be more nodes with that label, but it prevents future errors.
kubeProxy, err := getKubeProxy(ctx, workloadClusterClient)
if err != nil {
return err
}
if kubeProxy.Spec.Template.Spec.Affinity != nil {
kubeProxy.Spec.Template.Spec.Affinity = nil
log.V(4).Info("Removing node-affinity from kube-proxy")
if err := workloadClusterClient.Update(ctx, kubeProxy); err != nil {
return errors.Wrap(err, "updating main kube-proxy version to remove nodeAffinity")
}
}
// Remove the skip annotation from the kubeadm control plane so it starts reconciling the kube-proxy again
kcp, err := getKubeadmControlPlane(ctx, managementClusterClient, spec.Cluster)
if err != nil {
return errors.Wrap(err, "reading the kubeadm control plane to cleanup the skip annotations")
}
if _, ok := kcp.Annotations[controlplanev1.SkipKubeProxyAnnotation]; !ok {
return nil
}
delete(kcp.Annotations, controlplanev1.SkipKubeProxyAnnotation)
log.V(4).Info("Removing skip kube-proxy annotation from KubeadmControlPlane")
if err := managementClusterClient.Update(ctx, kcp); err != nil {
return errors.Wrap(err, "preparing kcp for kube-proxy upgrade")
}
return nil
}
func specIncludesNewKubeProxy(spec *cluster.Spec) bool {
return eksdIncludesNewKubeProxy(spec.Cluster.Spec.KubernetesVersion, spec.VersionsBundle.KubeDistro.EKSD.Number)
}
func eksdIncludesNewKubeProxy(version anywherev1.KubernetesVersion, number int) bool {
return number >= firstEKSDWithNewKubeProxy[version]
}
var eksDNumberRegex = regexp.MustCompile(`(?m)^.*-eks-(\d)-(\d+)-(\d+)$`)
func eksdVersionAndNumberFromTag(tag string) (anywherev1.KubernetesVersion, int, error) {
matches := eksDNumberRegex.FindStringSubmatch(tag)
if len(matches) != 4 {
return "", 0, errors.Errorf("invalid eksd tag format %s", tag)
}
kubeMajor := matches[1]
kubeMinor := matches[2]
kubeVersion := anywherev1.KubernetesVersion(kubeMajor + "." + kubeMinor)
numberStr := matches[3]
number, err := strconv.Atoi(numberStr)
if err != nil {
return "", 0, errors.Wrapf(err, "invalid number in eksd tag %s", tag)
}
return kubeVersion, number, nil
}
func needsKubeProxyPreUpgrade(spec *cluster.Spec, currentKCP *controlplanev1.KubeadmControlPlane) (bool, error) {
currentKubeVersion, currentEKSDNumber, err := eksdVersionAndNumberFromTag(currentKCP.Spec.Version)
if err != nil {
return false, err
}
return specIncludesNewKubeProxy(spec) && !eksdIncludesNewKubeProxy(currentKubeVersion, currentEKSDNumber), nil
}
func annotateKCPWithSKipKubeProxy(ctx context.Context, log logr.Logger, c kubernetes.Client, kcp *controlplanev1.KubeadmControlPlane) error {
log.V(4).Info("Adding skip annotation to kcp", "kcp", klog.KObj(kcp), "annotation", controlplanev1.SkipKubeProxyAnnotation)
clientutil.AddAnnotation(kcp, controlplanev1.SkipKubeProxyAnnotation, "true")
if err := c.Update(ctx, kcp); err != nil {
return errors.Wrap(err, "preparing kcp for kube-proxy upgrade")
}
return nil
}
func addIPTablesLegacyLabelToAllNodes(ctx context.Context, log logr.Logger, client kubernetes.Client) error {
nodeList := &corev1.NodeList{}
if err := client.List(ctx, nodeList); err != nil {
return errors.Wrap(err, "listing workload cluster nodes for kube-proxy upgrade")
}
nodes := make([]*corev1.Node, 0, len(nodeList.Items))
for i := range nodeList.Items {
nodes = append(nodes, &nodeList.Items[i])
}
log.V(4).Info("Adding iptables-legacy label to nodes", "nodes", klog.KObjSlice(nodes), "label", iptablesLegacyLabel)
for i := range nodeList.Items {
n := &nodeList.Items[i]
clientutil.AddLabel(n, iptablesLegacyLabel, "true")
if err := client.Update(ctx, n); err != nil {
return errors.Wrap(err, "preparing workload cluster nodes for kube-proxy upgrade")
}
}
return nil
}
func getKubeProxy(ctx context.Context, c kubernetes.Client) (*appsv1.DaemonSet, error) {
kubeProxy := &appsv1.DaemonSet{}
if err := c.Get(ctx, kubeProxyDSName, kubeProxyDSNamespace, kubeProxy); err != nil {
return nil, errors.Wrap(err, "reading kube-proxy for upgrade")
}
return kubeProxy, nil
}
func getKubeadmControlPlane(ctx context.Context, c kubernetes.Client, cluster *anywherev1.Cluster) (*controlplanev1.KubeadmControlPlane, error) {
key := controller.CAPIKubeadmControlPlaneKey(cluster)
kubeadmControlPlane := &controlplanev1.KubeadmControlPlane{}
if err := c.Get(ctx, key.Name, key.Namespace, kubeadmControlPlane); err != nil {
return nil, err
}
return kubeadmControlPlane, nil
}
func addAntiNodeAffinityToKubeProxy(ctx context.Context, client kubernetes.Client, kubeProxy *appsv1.DaemonSet) error {
kubeProxy.Spec.Template.Spec.Affinity = &corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
NodeSelectorTerms: []corev1.NodeSelectorTerm{
{
MatchExpressions: []corev1.NodeSelectorRequirement{
{
Key: iptablesLegacyLabel,
Operator: corev1.NodeSelectorOpDoesNotExist,
},
},
},
},
},
},
}
if err := client.Update(ctx, kubeProxy); err != nil {
return errors.Wrap(err, "preparing main kube-proxty for upgrade")
}
return nil
}
func deleteAllOriginalKubeProxyPods(ctx context.Context, c kubernetes.Client) error {
if err := c.DeleteAllOf(ctx, &corev1.Pod{},
&kubernetes.DeleteAllOfOptions{
Namespace: kubeProxyDSNamespace,
HasLabels: map[string]string{
k8sAppLabel: kubeProxyLabel,
},
},
); err != nil && !apierrors.IsNotFound(err) {
return errors.Wrap(err, "deleting kube-proxy pods before upgrade")
}
return nil
}
func restrictKubeProxyToNewNodes(ctx context.Context, client kubernetes.Client, kubeProxy *appsv1.DaemonSet) error {
kubeProxy = kubeProxy.DeepCopy()
// Add nodeAffinity to kube-proxy so it's not scheduled in new nodes without our label
if err := addAntiNodeAffinityToKubeProxy(ctx, client, kubeProxy); err != nil {
return err
}
// Delete original kube-proxy pods to ensure there is only one copy of kube-proxy running
// on each node.
if err := deleteAllOriginalKubeProxyPods(ctx, client); err != nil {
return err
}
return nil
}
func iptablesLegacyKubeProxyFromCurrentDaemonSet(kcp *controlplanev1.KubeadmControlPlane, kubeProxy *appsv1.DaemonSet) *appsv1.DaemonSet {
iptablesLegacyKubeProxy := kubeProxy.DeepCopy()
// Generate a new DS with the old kube-proxy version with nodeAffinity so it only
// gets scheduled in the old (current) nodes.
iptablesLegacyKubeProxy.Name = iptablesLegacyKubeProxyDSName
iptablesLegacyKubeProxy.ObjectMeta.ResourceVersion = ""
iptablesLegacyKubeProxy.ObjectMeta.UID = ""
image := kcp.Spec.KubeadmConfigSpec.ClusterConfiguration.ImageRepository + "/kube-proxy" +
":" + kcp.Spec.Version
iptablesLegacyKubeProxy.Spec.Template.Spec.Containers[0].Image = image
iptablesLegacyKubeProxy.Spec.Template.Spec.Affinity = &corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
NodeSelectorTerms: []corev1.NodeSelectorTerm{
{
MatchExpressions: []corev1.NodeSelectorRequirement{
{
Key: iptablesLegacyLabel,
Operator: corev1.NodeSelectorOpExists,
},
},
},
},
},
},
}
return iptablesLegacyKubeProxy
}
func createIPTablesLegacyKubeProxy(ctx context.Context, client kubernetes.Client, kcp *controlplanev1.KubeadmControlPlane, originalKubeProxy *appsv1.DaemonSet) error {
iptablesLegacyKubeProxy := iptablesLegacyKubeProxyFromCurrentDaemonSet(kcp, originalKubeProxy)
if err := client.Create(ctx, iptablesLegacyKubeProxy); err != nil && !apierrors.IsAlreadyExists(err) {
return errors.Wrap(err, "creating secondary kube-proxy DS with iptables-legacy for old nodes")
}
return nil
}
func deleteIPTablesLegacyKubeProxy(ctx context.Context, client kubernetes.Client) error {
iptablesLegacyKubeProxy := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: iptablesLegacyKubeProxyDSName,
Namespace: kubeProxyDSNamespace,
},
}
if err := client.Delete(ctx, iptablesLegacyKubeProxy); err != nil && !apierrors.IsNotFound(err) {
return errors.Wrap(err, "deleting secondary kube-proxy DS with iptables-legacy")
}
return nil
}
func updateKubeProxyVersion(ctx context.Context, client kubernetes.Client, kubeProxy *appsv1.DaemonSet, image string) error {
kubeProxy.Spec.Template.Spec.Containers[0].Image = image
if err := client.Update(ctx, kubeProxy); err != nil {
return errors.Wrap(err, "updating main kube-proxy version before upgrade")
}
return nil
}
func (u KubeProxyUpgrader) ensureUpdateKubeProxyVersion(ctx context.Context, log logr.Logger, client kubernetes.Client, spec *cluster.Spec) error {
newKubeProxyImage := spec.VersionsBundle.KubeDistro.KubeProxy.URI
return retrier.Retry(u.updateKubeProxyRetries, u.updateKubeProxyBackoff, func() error {
kubeProxy, err := getKubeProxy(ctx, client)
if err != nil {
return err
}
currentImage := kubeProxy.Spec.Template.Spec.Containers[0].Image
if currentImage == newKubeProxyImage {
log.V(4).Info("Kube-proxy image update seems stable", "wantImage", newKubeProxyImage, "currentImage", currentImage)
return nil
}
log.V(4).Info("Kube-proxy image update has been reverted or was never updated", "wantImage", newKubeProxyImage, "currentImage", currentImage)
log.V(4).Info("Updating Kube-proxy image", "newImage", newKubeProxyImage)
if err := updateKubeProxyVersion(ctx, client, kubeProxy, newKubeProxyImage); err != nil {
return err
}
return errors.Errorf("kube-proxy image update has been reverted from %s to %s", newKubeProxyImage, currentImage)
})
}
| 510 |
eks-anywhere | aws | Go | package clustermanager_test
import (
"context"
"errors"
"testing"
"time"
"github.com/go-logr/logr"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/internal/test/envtest"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/clustermanager"
"github.com/aws/eks-anywhere/pkg/clustermanager/mocks"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/controller/clientutil"
"github.com/aws/eks-anywhere/pkg/retrier"
)
type prepareKubeProxyTest struct {
ctx context.Context
log logr.Logger
spec *cluster.Spec
kcp *controlplanev1.KubeadmControlPlane
kubeProxy *appsv1.DaemonSet
nodeCP *corev1.Node
nodeWorker *corev1.Node
kubeProxyCP *corev1.Pod
kubeProxyWorker *corev1.Pod
managementClient kubernetes.Client
// managementImplClient is a controller-runtime client that serves as the
// underlying implementation for managementClient.
managementImplClient client.Client
workloadClient kubernetes.Client
// workloadImplClient is a controller-runtime client that serves as the
// underlying implementation for workloadClient.
workloadImplClient client.Client
workloadClusterExtraObjects []client.Object
}
func newPrepareKubeProxyTest() *prepareKubeProxyTest {
tt := &prepareKubeProxyTest{}
tt.ctx = context.Background()
tt.log = test.NewNullLogger()
tt.spec = test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = "my-cluster-test"
s.Cluster.Spec.KubernetesVersion = anywherev1.Kube123
s.VersionsBundle.KubeDistro.EKSD.Channel = "1.23"
s.VersionsBundle.KubeDistro.EKSD.Number = 18
s.VersionsBundle.KubeDistro.KubeProxy.URI = "public.ecr.aws/eks-distro/kubernetes/kube-proxy:v1.23.16-eks-1-23-18"
})
tt.kcp = &controlplanev1.KubeadmControlPlane{
TypeMeta: metav1.TypeMeta{
Kind: "KubeadmControlPlane",
APIVersion: "controlplane.cluster.x-k8s.io/v1beta1",
},
ObjectMeta: metav1.ObjectMeta{
Name: clusterapi.KubeadmControlPlaneName(tt.spec.Cluster),
Namespace: constants.EksaSystemNamespace,
},
Spec: controlplanev1.KubeadmControlPlaneSpec{
Version: "v1.23.16-eks-1-23-16",
KubeadmConfigSpec: v1beta1.KubeadmConfigSpec{
ClusterConfiguration: &v1beta1.ClusterConfiguration{
ImageRepository: "public.ecr.aws/eks-distro/kubernetes",
},
},
},
}
tt.kubeProxy = &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-proxy",
Namespace: "kube-system",
},
Spec: appsv1.DaemonSetSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Image: "public.ecr.aws/eks-distro/kubernetes/kube-proxy:v1.23.16-eks-1-23-16",
},
},
},
},
},
}
tt.nodeCP = &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "cp",
},
}
tt.nodeWorker = &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "worker",
},
}
tt.kubeProxyCP = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-proxy-cp",
Namespace: "kube-system",
Labels: map[string]string{
"k8s-app": "kube-proxy",
},
},
}
tt.kubeProxyWorker = tt.kubeProxyCP.DeepCopy()
tt.kubeProxyWorker.Name = "kube-proxy-worker"
return tt
}
func (tt *prepareKubeProxyTest) initClients(tb testing.TB) {
tt.managementImplClient = fake.NewClientBuilder().WithObjects(tt.kcp).Build()
tt.managementClient = clientutil.NewKubeClient(tt.managementImplClient)
objs := []client.Object{
tt.kubeProxy,
tt.kubeProxyCP,
tt.kubeProxyWorker,
tt.nodeCP,
tt.nodeWorker,
}
objs = append(objs, tt.workloadClusterExtraObjects...)
tt.workloadImplClient = fake.NewClientBuilder().WithObjects(objs...).Build()
tt.workloadClient = clientutil.NewKubeClient(tt.workloadImplClient)
}
// startKCPControllerEmulator stars a routine that reverts the kube-proxy
// version updated for n times and then stops. This is useful to simulate the real
// KCP controller behavior when it hasn't yet seen the skip annotation and it
// keeps reverting the kube-proxy image tag.
func (tt *prepareKubeProxyTest) startKCPControllerEmulator(tb testing.TB, times int) {
go func() {
api := envtest.NewAPIExpecter(tb, tt.workloadImplClient)
kubeProxy := tt.kubeProxy.DeepCopy()
originalImage := kubeProxy.Spec.Template.Spec.Containers[0].Image
for i := 0; i < times; i++ {
api.ShouldEventuallyMatch(tt.ctx, kubeProxy, func(g Gomega) {
// Wait until the image has been updated by KubeProxyUpgrader
currentImage := kubeProxy.Spec.Template.Spec.Containers[0].Image
g.Expect(currentImage).NotTo(Equal(originalImage))
// Then revert the change
kubeProxy.Spec.Template.Spec.Containers[0].Image = originalImage
g.Expect(tt.workloadClient.Update(tt.ctx, kubeProxy))
})
}
}()
}
func TestKubeProxyUpgraderPrepareForUpgradeSuccess(t *testing.T) {
g := NewWithT(t)
tt := newPrepareKubeProxyTest()
tt.initClients(t)
// Revert the kube-proxy image update twice
tt.startKCPControllerEmulator(t, 2)
u := clustermanager.NewKubeProxyUpgrader(
clustermanager.WithUpdateKubeProxyTiming(4, 100*time.Millisecond),
)
g.Expect(
u.PrepareForUpgrade(tt.ctx, tt.log, tt.managementClient, tt.workloadClient, tt.spec),
).To(Succeed())
managementAPI := envtest.NewAPIExpecter(t, tt.managementImplClient)
managementAPI.ShouldEventuallyMatch(tt.ctx, tt.kcp, func(g Gomega) {
g.Expect(tt.kcp.Annotations).To(HaveKeyWithValue(controlplanev1.SkipKubeProxyAnnotation, "true"))
})
workloadAPI := envtest.NewAPIExpecter(t, tt.workloadImplClient)
workloadAPI.ShouldEventuallyMatch(tt.ctx, tt.kubeProxy, func(g Gomega) {
image := tt.kubeProxy.Spec.Template.Spec.Containers[0].Image
g.Expect(image).To(Equal("public.ecr.aws/eks-distro/kubernetes/kube-proxy:v1.23.16-eks-1-23-18"))
firstMatchExpression := tt.kubeProxy.Spec.Template.Spec.Affinity.
NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0]
g.Expect(firstMatchExpression.Key).To(Equal("anywhere.eks.amazonaws.com/iptableslegacy"))
g.Expect(firstMatchExpression.Operator).To(Equal(corev1.NodeSelectorOpDoesNotExist))
})
workloadAPI.ShouldEventuallyNotExist(tt.ctx, tt.kubeProxyCP)
workloadAPI.ShouldEventuallyNotExist(tt.ctx, tt.kubeProxyWorker)
workloadAPI.ShouldEventuallyMatch(tt.ctx, tt.nodeCP, func(g Gomega) {
g.Expect(tt.nodeCP.Labels).To(HaveKeyWithValue("anywhere.eks.amazonaws.com/iptableslegacy", "true"))
})
workloadAPI.ShouldEventuallyMatch(tt.ctx, tt.nodeWorker, func(g Gomega) {
g.Expect(tt.nodeWorker.Labels).To(HaveKeyWithValue("anywhere.eks.amazonaws.com/iptableslegacy", "true"))
})
legacyKubeProxy := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-proxy-iptables-legacy",
Namespace: "kube-system",
},
}
workloadAPI.ShouldEventuallyExist(tt.ctx, legacyKubeProxy)
workloadAPI.ShouldEventuallyMatch(tt.ctx, legacyKubeProxy, func(g Gomega) {
image := legacyKubeProxy.Spec.Template.Spec.Containers[0].Image
g.Expect(image).To(Equal("public.ecr.aws/eks-distro/kubernetes/kube-proxy:v1.23.16-eks-1-23-16"))
firstMatchExpression := legacyKubeProxy.Spec.Template.Spec.Affinity.
NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0]
g.Expect(firstMatchExpression.Key).To(Equal("anywhere.eks.amazonaws.com/iptableslegacy"))
g.Expect(firstMatchExpression.Operator).To(Equal(corev1.NodeSelectorOpExists))
})
}
func TestKubeProxyUpgraderPrepareForUpgradeNoKCP(t *testing.T) {
g := NewWithT(t)
tt := newPrepareKubeProxyTest()
tt.kcp = &controlplanev1.KubeadmControlPlane{} // no kcp
tt.initClients(t)
u := clustermanager.NewKubeProxyUpgrader()
g.Expect(
u.PrepareForUpgrade(tt.ctx, tt.log, tt.managementClient, tt.workloadClient, tt.spec),
).To(MatchError(ContainSubstring("reading the kubeadm control plane for an upgrade")))
}
func TestKubeProxyUpgraderPrepareForUpgradeNoKubeProxy(t *testing.T) {
g := NewWithT(t)
tt := newPrepareKubeProxyTest()
tt.kubeProxy = &appsv1.DaemonSet{} // no kube-proxy
tt.initClients(t)
u := clustermanager.NewKubeProxyUpgrader()
g.Expect(
u.PrepareForUpgrade(tt.ctx, tt.log, tt.managementClient, tt.workloadClient, tt.spec),
).To(MatchError(ContainSubstring("reading kube-proxy for upgrade")))
}
func TestKubeProxyUpgraderPrepareForUpgradeinvalidEKDDTag(t *testing.T) {
g := NewWithT(t)
tt := newPrepareKubeProxyTest()
tt.kcp.Spec.Version = "1.23.16-eks-1-23"
tt.initClients(t)
u := clustermanager.NewKubeProxyUpgrader()
g.Expect(
u.PrepareForUpgrade(tt.ctx, tt.log, tt.managementClient, tt.workloadClient, tt.spec),
).To(MatchError(ContainSubstring("invalid eksd tag format")))
}
func TestKubeProxyUpgraderPrepareForUpgradeAlreadyUsingNewKubeProxy(t *testing.T) {
g := NewWithT(t)
tt := newPrepareKubeProxyTest()
tt.kcp.Spec.Version = "1.23.16-eks-1-23-18"
tt.kubeProxy.Spec.Template.Spec.Containers[0].Image = "public.ecr.aws/eks-distro/kubernetes/kube-proxy:v1.23.16-eks-1-23-18"
tt.initClients(t)
u := clustermanager.NewKubeProxyUpgrader()
g.Expect(
u.PrepareForUpgrade(tt.ctx, tt.log, tt.managementClient, tt.workloadClient, tt.spec),
).To(Succeed())
managementAPI := envtest.NewAPIExpecter(t, tt.managementImplClient)
managementAPI.ShouldEventuallyMatch(tt.ctx, tt.kcp, func(g Gomega) {
g.Expect(tt.kcp.Annotations).NotTo(HaveKeyWithValue(controlplanev1.SkipKubeProxyAnnotation, "true"))
})
workloadAPI := envtest.NewAPIExpecter(t, tt.workloadImplClient)
workloadAPI.ShouldEventuallyMatch(tt.ctx, tt.kubeProxy, func(g Gomega) {
image := tt.kubeProxy.Spec.Template.Spec.Containers[0].Image
g.Expect(image).To(Equal("public.ecr.aws/eks-distro/kubernetes/kube-proxy:v1.23.16-eks-1-23-18"))
g.Expect(tt.kubeProxy.Spec.Template.Spec.Affinity).To(BeNil())
})
workloadAPI.ShouldEventuallyExist(tt.ctx, tt.kubeProxyCP)
workloadAPI.ShouldEventuallyExist(tt.ctx, tt.kubeProxyWorker)
workloadAPI.ShouldEventuallyMatch(tt.ctx, tt.nodeCP, func(g Gomega) {
g.Expect(tt.nodeCP.Labels).NotTo(HaveKeyWithValue("anywhere.eks.amazonaws.com/iptableslegacy", "true"))
})
workloadAPI.ShouldEventuallyMatch(tt.ctx, tt.nodeWorker, func(g Gomega) {
g.Expect(tt.nodeWorker.Labels).NotTo(HaveKeyWithValue("anywhere.eks.amazonaws.com/iptableslegacy", "true"))
})
legacyKubeProxy := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-proxy-iptables-legacy",
Namespace: "kube-system",
},
}
workloadAPI.ShouldEventuallyNotExist(tt.ctx, legacyKubeProxy)
}
func TestKubeProxyUpgraderPrepareForUpgradeNewSpecHasOldKubeProxy(t *testing.T) {
g := NewWithT(t)
tt := newPrepareKubeProxyTest()
tt.spec.VersionsBundle.KubeDistro.EKSD.Channel = "1.23"
tt.spec.VersionsBundle.KubeDistro.EKSD.Number = 16
tt.spec.VersionsBundle.KubeDistro.KubeProxy.URI = "public.ecr.aws/eks-distro/kubernetes/kube-proxy:v1.23.16-eks-1-23-16"
tt.kcp.Spec.Version = "1.23.16-eks-1-23-15"
tt.kubeProxy.Spec.Template.Spec.Containers[0].Image = "public.ecr.aws/eks-distro/kubernetes/kube-proxy:v1.23.16-eks-1-23-15"
tt.initClients(t)
u := clustermanager.NewKubeProxyUpgrader()
g.Expect(
u.PrepareForUpgrade(tt.ctx, tt.log, tt.managementClient, tt.workloadClient, tt.spec),
).To(Succeed())
managementAPI := envtest.NewAPIExpecter(t, tt.managementImplClient)
managementAPI.ShouldEventuallyMatch(tt.ctx, tt.kcp, func(g Gomega) {
g.Expect(tt.kcp.Annotations).NotTo(HaveKeyWithValue(controlplanev1.SkipKubeProxyAnnotation, "true"))
})
workloadAPI := envtest.NewAPIExpecter(t, tt.workloadImplClient)
workloadAPI.ShouldEventuallyMatch(tt.ctx, tt.kubeProxy, func(g Gomega) {
image := tt.kubeProxy.Spec.Template.Spec.Containers[0].Image
g.Expect(image).To(Equal("public.ecr.aws/eks-distro/kubernetes/kube-proxy:v1.23.16-eks-1-23-15"))
g.Expect(tt.kubeProxy.Spec.Template.Spec.Affinity).To(BeNil())
})
workloadAPI.ShouldEventuallyExist(tt.ctx, tt.kubeProxyCP)
workloadAPI.ShouldEventuallyExist(tt.ctx, tt.kubeProxyWorker)
workloadAPI.ShouldEventuallyMatch(tt.ctx, tt.nodeCP, func(g Gomega) {
g.Expect(tt.nodeCP.Labels).NotTo(HaveKeyWithValue("anywhere.eks.amazonaws.com/iptableslegacy", "true"))
})
workloadAPI.ShouldEventuallyMatch(tt.ctx, tt.nodeWorker, func(g Gomega) {
g.Expect(tt.nodeWorker.Labels).NotTo(HaveKeyWithValue("anywhere.eks.amazonaws.com/iptableslegacy", "true"))
})
legacyKubeProxy := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-proxy-iptables-legacy",
Namespace: "kube-system",
},
}
workloadAPI.ShouldEventuallyNotExist(tt.ctx, legacyKubeProxy)
}
func TestKubeProxyUpgraderCleanupAfterUpgradeSuccessWithReentry(t *testing.T) {
g := NewWithT(t)
tt := newPrepareKubeProxyTest()
tt.workloadClusterExtraObjects = append(tt.workloadClusterExtraObjects, &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-proxy-iptables-legacy",
Namespace: "kube-system",
},
})
tt.kubeProxy.Spec.Template.Spec.Affinity = &corev1.Affinity{}
clientutil.AddAnnotation(tt.kcp, controlplanev1.SkipKubeProxyAnnotation, "true")
tt.initClients(t)
u := clustermanager.NewKubeProxyUpgrader()
g.Expect(
u.CleanupAfterUpgrade(tt.ctx, tt.log, tt.managementClient, tt.workloadClient, tt.spec),
).To(Succeed())
managementAPI := envtest.NewAPIExpecter(t, tt.managementImplClient)
managementAPI.ShouldEventuallyMatch(tt.ctx, tt.kcp, func(g Gomega) {
g.Expect(tt.kcp.Annotations).NotTo(HaveKeyWithValue(controlplanev1.SkipKubeProxyAnnotation, "true"))
})
workloadAPI := envtest.NewAPIExpecter(t, tt.workloadImplClient)
workloadAPI.ShouldEventuallyMatch(tt.ctx, tt.kubeProxy, func(g Gomega) {
g.Expect(tt.kubeProxy.Spec.Template.Spec.Affinity).To(BeNil())
})
legacyKubeProxy := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-proxy-iptables-legacy",
Namespace: "kube-system",
},
}
workloadAPI.ShouldEventuallyNotExist(tt.ctx, legacyKubeProxy)
g.Expect(
u.CleanupAfterUpgrade(tt.ctx, tt.log, tt.managementClient, tt.workloadClient, tt.spec),
).To(Succeed())
}
func TestKubeProxyCleanupAfterUpgradeNoKubeProxy(t *testing.T) {
g := NewWithT(t)
tt := newPrepareKubeProxyTest()
tt.kubeProxy = &appsv1.DaemonSet{} // no kube-proxy
tt.initClients(t)
u := clustermanager.NewKubeProxyUpgrader()
g.Expect(
u.CleanupAfterUpgrade(tt.ctx, tt.log, tt.managementClient, tt.workloadClient, tt.spec),
).To(MatchError(ContainSubstring("reading kube-proxy for upgrade")))
}
func TestKubeProxyCleanupAfterUpgradeNoKCP(t *testing.T) {
g := NewWithT(t)
tt := newPrepareKubeProxyTest()
tt.kcp = &controlplanev1.KubeadmControlPlane{} // no kcp
tt.initClients(t)
u := clustermanager.NewKubeProxyUpgrader()
g.Expect(
u.CleanupAfterUpgrade(tt.ctx, tt.log, tt.managementClient, tt.workloadClient, tt.spec),
).To(MatchError(ContainSubstring("reading the kubeadm control plane to cleanup the skip annotations")))
}
func TestEKSDVersionAndNumberFromTag(t *testing.T) {
tests := []struct {
name string
tag string
wantKubeVersion anywherev1.KubernetesVersion
wantNumber int
wantErr string
}{
{
name: "valid eks-d",
tag: "v1.23.16-eks-1-23-16",
wantKubeVersion: anywherev1.Kube123,
wantNumber: 16,
},
{
name: "invalid eks-d, no number",
tag: "v1.23.16-eks-1-23",
wantErr: "invalid eksd tag format",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
kuberVersion, number, err := clustermanager.EKSDVersionAndNumberFromTag(tt.tag)
if tt.wantErr != "" {
g.Expect(err).To(MatchError(ContainSubstring(tt.wantErr)))
} else {
g.Expect(kuberVersion).To(Equal(tt.wantKubeVersion))
g.Expect(number).To(Equal(tt.wantNumber))
}
})
}
}
func TestEKSDIncludesNewKubeProxy(t *testing.T) {
tests := []struct {
name string
kubeVersion anywherev1.KubernetesVersion
number int
want bool
}{
{
name: "eksd 1.22-23",
kubeVersion: anywherev1.Kube122,
number: 23,
want: true,
},
{
name: "eksd 1.22-22",
kubeVersion: anywherev1.Kube122,
number: 22,
want: true,
},
{
name: "eksd 1.22-16",
kubeVersion: anywherev1.Kube122,
number: 16,
want: false,
},
{
name: "eksd 1.23-18",
kubeVersion: anywherev1.Kube123,
number: 18,
want: true,
},
{
name: "eksd 1.23-17",
kubeVersion: anywherev1.Kube123,
number: 17,
want: true,
},
{
name: "eksd 1.23-16",
kubeVersion: anywherev1.Kube123,
number: 16,
want: false,
},
{
name: "eksd 1.24-13",
kubeVersion: anywherev1.Kube124,
number: 13,
want: true,
},
{
name: "eksd 1.24-12",
kubeVersion: anywherev1.Kube124,
number: 12,
want: true,
},
{
name: "eksd 1.24-11",
kubeVersion: anywherev1.Kube124,
number: 11,
want: false,
},
{
name: "eksd 1.25-9",
kubeVersion: anywherev1.Kube125,
number: 9,
want: true,
},
{
name: "eksd 1.25-8",
kubeVersion: anywherev1.Kube125,
number: 8,
want: true,
},
{
name: "eksd 1.25-7",
kubeVersion: anywherev1.Kube125,
number: 7,
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
g.Expect(clustermanager.EKSDIncludesNewKubeProxy(tt.kubeVersion, tt.number)).To(Equal(tt.want))
})
}
}
func TestKubeProxyCLIUpgraderPrepareUpgradeErrorManagementClusterClient(t *testing.T) {
g := NewWithT(t)
tt := newPrepareKubeProxyTest()
managementKubeConfig := "mngmt.yaml"
workloadKubeConfig := "workload.yaml"
ctrl := gomock.NewController(t)
factory := mocks.NewMockClientFactory(ctrl)
factory.EXPECT().BuildClientFromKubeconfig(managementKubeConfig).Return(nil, errors.New("building management client")).Times(2)
u := clustermanager.NewKubeProxyCLIUpgrader(
test.NewNullLogger(),
factory,
clustermanager.KubeProxyCLIUpgraderRetrier(*retrier.NewWithMaxRetries(2, 0)),
)
g.Expect(
u.PrepareUpgrade(tt.ctx, tt.spec, managementKubeConfig, workloadKubeConfig),
).To(MatchError(ContainSubstring("building management client")))
}
func TestKubeProxyCLIUpgraderPrepareUpgradeErrorWorkloadClusterClient(t *testing.T) {
g := NewWithT(t)
tt := newPrepareKubeProxyTest()
tt.initClients(t)
managementKubeConfig := "mngmt.yaml"
workloadKubeConfig := "workload.yaml"
ctrl := gomock.NewController(t)
factory := mocks.NewMockClientFactory(ctrl)
factory.EXPECT().BuildClientFromKubeconfig(managementKubeConfig).Return(tt.managementClient, nil)
factory.EXPECT().BuildClientFromKubeconfig(workloadKubeConfig).Return(nil, errors.New("building workload client")).Times(2)
u := clustermanager.NewKubeProxyCLIUpgrader(
test.NewNullLogger(),
factory,
clustermanager.KubeProxyCLIUpgraderRetrier(*retrier.NewWithMaxRetries(2, 0)),
)
g.Expect(
u.PrepareUpgrade(tt.ctx, tt.spec, managementKubeConfig, workloadKubeConfig),
).To(MatchError(ContainSubstring("building workload client")))
}
func TestKubeProxyCLIUpgraderPrepareUpgradeSuccess(t *testing.T) {
g := NewWithT(t)
tt := newPrepareKubeProxyTest()
tt.initClients(t)
managementKubeConfig := "mngmt.yaml"
workloadKubeConfig := "workload.yaml"
ctrl := gomock.NewController(t)
factory := mocks.NewMockClientFactory(ctrl)
factory.EXPECT().BuildClientFromKubeconfig(managementKubeConfig).Return(tt.managementClient, nil)
factory.EXPECT().BuildClientFromKubeconfig(workloadKubeConfig).Return(tt.workloadClient, nil)
u := clustermanager.NewKubeProxyCLIUpgrader(test.NewNullLogger(), factory)
g.Expect(
u.PrepareUpgrade(tt.ctx, tt.spec, managementKubeConfig, workloadKubeConfig),
).To(Succeed())
}
func TestKubeProxyCLIUpgraderPrepareUpgradeErrorInPrepare(t *testing.T) {
g := NewWithT(t)
tt := newPrepareKubeProxyTest()
tt.kcp = &controlplanev1.KubeadmControlPlane{} // no kcp
tt.initClients(t)
managementKubeConfig := "mngmt.yaml"
workloadKubeConfig := "workload.yaml"
ctrl := gomock.NewController(t)
factory := mocks.NewMockClientFactory(ctrl)
factory.EXPECT().BuildClientFromKubeconfig(managementKubeConfig).Return(tt.managementClient, nil)
factory.EXPECT().BuildClientFromKubeconfig(workloadKubeConfig).Return(tt.workloadClient, nil)
u := clustermanager.NewKubeProxyCLIUpgrader(
test.NewNullLogger(),
factory,
clustermanager.KubeProxyCLIUpgraderRetrier(*retrier.NewWithMaxRetries(1, 0)),
)
g.Expect(
u.PrepareUpgrade(tt.ctx, tt.spec, managementKubeConfig, workloadKubeConfig),
).To(MatchError(ContainSubstring("reading the kubeadm control plane for an upgrade")))
}
func TestKubeProxyCLIUpgraderCleanupAfterUpgradeSuccess(t *testing.T) {
g := NewWithT(t)
tt := newPrepareKubeProxyTest()
tt.initClients(t)
managementKubeConfig := "mngmt.yaml"
workloadKubeConfig := "workload.yaml"
ctrl := gomock.NewController(t)
factory := mocks.NewMockClientFactory(ctrl)
factory.EXPECT().BuildClientFromKubeconfig(managementKubeConfig).Return(tt.managementClient, nil)
factory.EXPECT().BuildClientFromKubeconfig(workloadKubeConfig).Return(tt.workloadClient, nil)
u := clustermanager.NewKubeProxyCLIUpgrader(test.NewNullLogger(), factory)
g.Expect(
u.CleanupAfterUpgrade(tt.ctx, tt.spec, managementKubeConfig, workloadKubeConfig),
).To(Succeed())
}
func TestKubeProxyCLICleanupAfterUpgradeErrorWorkloadClusterClient(t *testing.T) {
g := NewWithT(t)
tt := newPrepareKubeProxyTest()
tt.initClients(t)
managementKubeConfig := "mngmt.yaml"
workloadKubeConfig := "workload.yaml"
ctrl := gomock.NewController(t)
factory := mocks.NewMockClientFactory(ctrl)
factory.EXPECT().BuildClientFromKubeconfig(managementKubeConfig).Return(tt.managementClient, nil)
factory.EXPECT().BuildClientFromKubeconfig(workloadKubeConfig).Return(nil, errors.New("building workload client")).Times(2)
u := clustermanager.NewKubeProxyCLIUpgrader(
test.NewNullLogger(),
factory,
clustermanager.KubeProxyCLIUpgraderRetrier(*retrier.NewWithMaxRetries(2, 0)),
)
g.Expect(
u.CleanupAfterUpgrade(tt.ctx, tt.spec, managementKubeConfig, workloadKubeConfig),
).To(MatchError(ContainSubstring("building workload client")))
}
| 654 |
eks-anywhere | aws | Go | package clustermanager
var (
EKSDVersionAndNumberFromTag = eksdVersionAndNumberFromTag
EKSDIncludesNewKubeProxy = eksdIncludesNewKubeProxy
)
| 7 |
eks-anywhere | aws | Go | package clustermanager
import (
"context"
"k8s.io/apimachinery/pkg/runtime"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/retrier"
"github.com/aws/eks-anywhere/pkg/types"
)
// RetrierClient wraps around a ClusterClient, offering retry functionality for some operations.
type RetrierClient struct {
*clusterManagerClient
retrier *retrier.Retrier
}
// NewRetrierClient constructs a new RetrierClient.
func NewRetrierClient(client ClusterClient, retrier *retrier.Retrier) *RetrierClient {
return &RetrierClient{
clusterManagerClient: newClient(client),
retrier: retrier,
}
}
// ApplyKubeSpecFromBytes creates/updates the objects defined in a yaml manifest against the api server following a client side apply mechanism.
func (c *RetrierClient) ApplyKubeSpecFromBytes(ctx context.Context, cluster *types.Cluster, data []byte) error {
return c.retrier.Retry(
func() error {
return c.ClusterClient.ApplyKubeSpecFromBytes(ctx, cluster, data)
},
)
}
// Apply creates/updates an object against the api server following a client side apply mechanism.
func (c *RetrierClient) Apply(ctx context.Context, kubeconfigPath string, obj runtime.Object) error {
return c.retrier.Retry(
func() error {
return c.ClusterClient.Apply(ctx, kubeconfigPath, obj)
},
)
}
// PauseCAPICluster adds a `spec.Paused: true` to the CAPI cluster resource. This will cause all
// downstream CAPI + provider controllers to skip reconciling on the paused cluster's objects.
func (c *RetrierClient) PauseCAPICluster(ctx context.Context, cluster, kubeconfig string) error {
return c.retrier.Retry(
func() error {
return c.ClusterClient.PauseCAPICluster(ctx, cluster, kubeconfig)
},
)
}
// ResumeCAPICluster removes the `spec.Paused` on the CAPI cluster resource. This will cause all
// downstream CAPI + provider controllers to resume reconciling on the paused cluster's objects.
func (c *RetrierClient) ResumeCAPICluster(ctx context.Context, cluster, kubeconfig string) error {
return c.retrier.Retry(
func() error {
return c.ClusterClient.ResumeCAPICluster(ctx, cluster, kubeconfig)
},
)
}
// ApplyKubeSpecFromBytesForce creates/updates the objects defined in a yaml manifest against the api server following a client side apply mechanism.
// It forces the operation, so if api validation failed, it will delete and re-create the object.
func (c *RetrierClient) ApplyKubeSpecFromBytesForce(ctx context.Context, cluster *types.Cluster, data []byte) error {
return c.retrier.Retry(
func() error {
return c.ClusterClient.ApplyKubeSpecFromBytesForce(ctx, cluster, data)
},
)
}
// ApplyKubeSpecFromBytesWithNamespace creates/updates the objects defined in a yaml manifest against the api server following a client side apply mechanism.
// It applies all objects in the given namespace.
func (c *RetrierClient) ApplyKubeSpecFromBytesWithNamespace(ctx context.Context, cluster *types.Cluster, data []byte, namespace string) error {
return c.retrier.Retry(
func() error {
return c.ClusterClient.ApplyKubeSpecFromBytesWithNamespace(ctx, cluster, data, namespace)
},
)
}
// UpdateAnnotationInNamespace adds/updates an annotation for the given kubernetes resource.
func (c *RetrierClient) UpdateAnnotationInNamespace(ctx context.Context, resourceType, objectName string, annotations map[string]string, cluster *types.Cluster, namespace string) error {
return c.retrier.Retry(
func() error {
return c.ClusterClient.UpdateAnnotationInNamespace(ctx, resourceType, objectName, annotations, cluster, namespace)
},
)
}
// RemoveAnnotationInNamespace deletes an annotation for the given kubernetes resource if present.
func (c *RetrierClient) RemoveAnnotationInNamespace(ctx context.Context, resourceType, objectName, key string, cluster *types.Cluster, namespace string) error {
return c.retrier.Retry(
func() error {
return c.ClusterClient.RemoveAnnotationInNamespace(ctx, resourceType, objectName, key, cluster, namespace)
},
)
}
// ListObjects reads all Objects of a particular resource type in a namespace.
func (c *RetrierClient) ListObjects(ctx context.Context, resourceType, namespace, kubeconfig string, list kubernetes.ObjectList) error {
return c.retrier.Retry(
func() error {
return c.ClusterClient.ListObjects(ctx, resourceType, namespace, kubeconfig, list)
},
)
}
// DeleteGitOpsConfig deletes a GitOpsConfigObject from the cluster.
func (c *RetrierClient) DeleteGitOpsConfig(ctx context.Context, cluster *types.Cluster, name string, namespace string) error {
return c.retrier.Retry(
func() error {
return c.ClusterClient.DeleteGitOpsConfig(ctx, cluster, name, namespace)
},
)
}
// DeleteEKSACluster deletes an EKSA Cluster object from the cluster.
func (c *RetrierClient) DeleteEKSACluster(ctx context.Context, cluster *types.Cluster, name string, namespace string) error {
return c.retrier.Retry(
func() error {
return c.ClusterClient.DeleteEKSACluster(ctx, cluster, name, namespace)
},
)
}
// DeleteAWSIamConfig deletes an AWSIamConfig object from the cluster.
func (c *RetrierClient) DeleteAWSIamConfig(ctx context.Context, cluster *types.Cluster, name string, namespace string) error {
return c.retrier.Retry(
func() error {
return c.ClusterClient.DeleteAWSIamConfig(ctx, cluster, name, namespace)
},
)
}
// DeleteOIDCConfig deletes a OIDCConfig object from the cluster.
func (c *RetrierClient) DeleteOIDCConfig(ctx context.Context, cluster *types.Cluster, name string, namespace string) error {
return c.retrier.Retry(
func() error {
return c.ClusterClient.DeleteOIDCConfig(ctx, cluster, name, namespace)
},
)
}
// DeleteCluster deletes a CAPI Cluster from the cluster.
func (c *RetrierClient) DeleteCluster(ctx context.Context, cluster, clusterToDelete *types.Cluster) error {
return c.retrier.Retry(
func() error {
return c.ClusterClient.DeleteCluster(ctx, cluster, clusterToDelete)
},
)
}
| 156 |
eks-anywhere | aws | Go | package clustermanager
import (
"context"
"fmt"
"io"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/types"
)
// CreateClusterShim is a shim that implements the workload.Cluster interface. It leverages existing
// ClusterManager behavior to create a cluster for new workflows.
type CreateClusterShim struct {
spec *cluster.Spec
manager *ClusterManager
provider providers.Provider
}
// NewCreateClusterShim returns a new CreateClusterShim instance.
func NewCreateClusterShim(
spec *cluster.Spec,
manager *ClusterManager,
provider providers.Provider,
) *CreateClusterShim {
return &CreateClusterShim{
spec: spec,
manager: manager,
}
}
// CreateAsync satisfies the workload.Cluster interface.
func (s CreateClusterShim) CreateAsync(ctx context.Context, management *types.Cluster) error {
if err := s.manager.applyProviderManifests(ctx, s.spec, management, s.provider); err != nil {
return fmt.Errorf("installing cluster creation manifests: %v", err)
}
if err := s.manager.InstallMachineHealthChecks(ctx, s.spec, management); err != nil {
return fmt.Errorf("installing machine health checks: %v", err)
}
return nil
}
// GetName satisfies the workload.Cluster interface.
func (s CreateClusterShim) GetName() string {
return s.spec.Cluster.Name
}
// WriteKubeconfig satisfies the workload.Cluster interface.
func (s CreateClusterShim) WriteKubeconfig(ctx context.Context, w io.Writer, management *types.Cluster) error {
return s.manager.getWorkloadClusterKubeconfig(ctx, s.spec.Cluster.Name, management, w)
}
// WaitUntilControlPlaneAvailable satisfies the workload.Cluster interface.
func (s CreateClusterShim) WaitUntilControlPlaneAvailable(ctx context.Context, management *types.Cluster) error {
return s.manager.waitUntilControlPlaneAvailable(ctx, s.spec, management)
}
// WaitUntilReady satisfies the workload.Cluster interface.
func (s CreateClusterShim) WaitUntilReady(ctx context.Context, management *types.Cluster) error {
return s.manager.waitForNodesReady(
ctx,
management,
s.spec.Cluster.Name,
[]string{clusterv1.MachineControlPlaneNameLabel, clusterv1.MachineDeploymentNameLabel},
types.WithNodeRef(),
)
}
| 73 |
eks-anywhere | aws | Go | package internal
import "github.com/aws/eks-anywhere/pkg/constants"
// CAPIDeployments is a map where key = namespace and value is a capi deployment.
var CAPIDeployments = map[string][]string{
"capi-kubeadm-bootstrap-system": {"capi-kubeadm-bootstrap-controller-manager"},
"capi-kubeadm-control-plane-system": {"capi-kubeadm-control-plane-controller-manager"},
"capi-system": {"capi-controller-manager"},
"cert-manager": {"cert-manager", "cert-manager-cainjector", "cert-manager-webhook"},
}
var ExternalEtcdDeployments = map[string][]string{
"etcdadm-controller-system": {"etcdadm-controller-controller-manager"},
"etcdadm-bootstrap-provider-system": {"etcdadm-bootstrap-provider-controller-manager"},
}
var EksaDeployments = map[string][]string{
constants.EksaSystemNamespace: {constants.EksaControllerManagerDeployment},
}
| 21 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: github.com/aws/eks-anywhere/pkg/clustermanager (interfaces: ClusterClient,Networking,AwsIamAuth,EKSAComponents,KubernetesClient)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
v1alpha1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
kubernetes "github.com/aws/eks-anywhere/pkg/clients/kubernetes"
cluster "github.com/aws/eks-anywhere/pkg/cluster"
executables "github.com/aws/eks-anywhere/pkg/executables"
filewriter "github.com/aws/eks-anywhere/pkg/filewriter"
providers "github.com/aws/eks-anywhere/pkg/providers"
types "github.com/aws/eks-anywhere/pkg/types"
v1alpha10 "github.com/aws/eks-anywhere/release/api/v1alpha1"
v1alpha11 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1"
logr "github.com/go-logr/logr"
gomock "github.com/golang/mock/gomock"
runtime "k8s.io/apimachinery/pkg/runtime"
v1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
v1beta10 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
)
// MockClusterClient is a mock of ClusterClient interface.
type MockClusterClient struct {
ctrl *gomock.Controller
recorder *MockClusterClientMockRecorder
}
// MockClusterClientMockRecorder is the mock recorder for MockClusterClient.
type MockClusterClientMockRecorder struct {
mock *MockClusterClient
}
// NewMockClusterClient creates a new mock instance.
func NewMockClusterClient(ctrl *gomock.Controller) *MockClusterClient {
mock := &MockClusterClient{ctrl: ctrl}
mock.recorder = &MockClusterClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockClusterClient) EXPECT() *MockClusterClientMockRecorder {
return m.recorder
}
// Apply mocks base method.
func (m *MockClusterClient) Apply(arg0 context.Context, arg1 string, arg2 runtime.Object) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Apply", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// Apply indicates an expected call of Apply.
func (mr *MockClusterClientMockRecorder) Apply(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Apply", reflect.TypeOf((*MockClusterClient)(nil).Apply), arg0, arg1, arg2)
}
// ApplyKubeSpecFromBytes mocks base method.
func (m *MockClusterClient) ApplyKubeSpecFromBytes(arg0 context.Context, arg1 *types.Cluster, arg2 []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ApplyKubeSpecFromBytes", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// ApplyKubeSpecFromBytes indicates an expected call of ApplyKubeSpecFromBytes.
func (mr *MockClusterClientMockRecorder) ApplyKubeSpecFromBytes(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyKubeSpecFromBytes", reflect.TypeOf((*MockClusterClient)(nil).ApplyKubeSpecFromBytes), arg0, arg1, arg2)
}
// ApplyKubeSpecFromBytesForce mocks base method.
func (m *MockClusterClient) ApplyKubeSpecFromBytesForce(arg0 context.Context, arg1 *types.Cluster, arg2 []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ApplyKubeSpecFromBytesForce", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// ApplyKubeSpecFromBytesForce indicates an expected call of ApplyKubeSpecFromBytesForce.
func (mr *MockClusterClientMockRecorder) ApplyKubeSpecFromBytesForce(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyKubeSpecFromBytesForce", reflect.TypeOf((*MockClusterClient)(nil).ApplyKubeSpecFromBytesForce), arg0, arg1, arg2)
}
// ApplyKubeSpecFromBytesWithNamespace mocks base method.
func (m *MockClusterClient) ApplyKubeSpecFromBytesWithNamespace(arg0 context.Context, arg1 *types.Cluster, arg2 []byte, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ApplyKubeSpecFromBytesWithNamespace", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// ApplyKubeSpecFromBytesWithNamespace indicates an expected call of ApplyKubeSpecFromBytesWithNamespace.
func (mr *MockClusterClientMockRecorder) ApplyKubeSpecFromBytesWithNamespace(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyKubeSpecFromBytesWithNamespace", reflect.TypeOf((*MockClusterClient)(nil).ApplyKubeSpecFromBytesWithNamespace), arg0, arg1, arg2, arg3)
}
// BackupManagement mocks base method.
func (m *MockClusterClient) BackupManagement(arg0 context.Context, arg1 *types.Cluster, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "BackupManagement", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// BackupManagement indicates an expected call of BackupManagement.
func (mr *MockClusterClientMockRecorder) BackupManagement(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BackupManagement", reflect.TypeOf((*MockClusterClient)(nil).BackupManagement), arg0, arg1, arg2)
}
// CountMachineDeploymentReplicasReady mocks base method.
func (m *MockClusterClient) CountMachineDeploymentReplicasReady(arg0 context.Context, arg1, arg2 string) (int, int, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CountMachineDeploymentReplicasReady", arg0, arg1, arg2)
ret0, _ := ret[0].(int)
ret1, _ := ret[1].(int)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// CountMachineDeploymentReplicasReady indicates an expected call of CountMachineDeploymentReplicasReady.
func (mr *MockClusterClientMockRecorder) CountMachineDeploymentReplicasReady(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountMachineDeploymentReplicasReady", reflect.TypeOf((*MockClusterClient)(nil).CountMachineDeploymentReplicasReady), arg0, arg1, arg2)
}
// CreateNamespaceIfNotPresent mocks base method.
func (m *MockClusterClient) CreateNamespaceIfNotPresent(arg0 context.Context, arg1, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateNamespaceIfNotPresent", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// CreateNamespaceIfNotPresent indicates an expected call of CreateNamespaceIfNotPresent.
func (mr *MockClusterClientMockRecorder) CreateNamespaceIfNotPresent(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNamespaceIfNotPresent", reflect.TypeOf((*MockClusterClient)(nil).CreateNamespaceIfNotPresent), arg0, arg1, arg2)
}
// DeleteAWSIamConfig mocks base method.
func (m *MockClusterClient) DeleteAWSIamConfig(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteAWSIamConfig", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteAWSIamConfig indicates an expected call of DeleteAWSIamConfig.
func (mr *MockClusterClientMockRecorder) DeleteAWSIamConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAWSIamConfig", reflect.TypeOf((*MockClusterClient)(nil).DeleteAWSIamConfig), arg0, arg1, arg2, arg3)
}
// DeleteCluster mocks base method.
func (m *MockClusterClient) DeleteCluster(arg0 context.Context, arg1, arg2 *types.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteCluster", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteCluster indicates an expected call of DeleteCluster.
func (mr *MockClusterClientMockRecorder) DeleteCluster(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCluster", reflect.TypeOf((*MockClusterClient)(nil).DeleteCluster), arg0, arg1, arg2)
}
// DeleteEKSACluster mocks base method.
func (m *MockClusterClient) DeleteEKSACluster(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteEKSACluster", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteEKSACluster indicates an expected call of DeleteEKSACluster.
func (mr *MockClusterClientMockRecorder) DeleteEKSACluster(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteEKSACluster", reflect.TypeOf((*MockClusterClient)(nil).DeleteEKSACluster), arg0, arg1, arg2, arg3)
}
// DeleteGitOpsConfig mocks base method.
func (m *MockClusterClient) DeleteGitOpsConfig(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteGitOpsConfig", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteGitOpsConfig indicates an expected call of DeleteGitOpsConfig.
func (mr *MockClusterClientMockRecorder) DeleteGitOpsConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteGitOpsConfig", reflect.TypeOf((*MockClusterClient)(nil).DeleteGitOpsConfig), arg0, arg1, arg2, arg3)
}
// DeleteOIDCConfig mocks base method.
func (m *MockClusterClient) DeleteOIDCConfig(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteOIDCConfig", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteOIDCConfig indicates an expected call of DeleteOIDCConfig.
func (mr *MockClusterClientMockRecorder) DeleteOIDCConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOIDCConfig", reflect.TypeOf((*MockClusterClient)(nil).DeleteOIDCConfig), arg0, arg1, arg2, arg3)
}
// DeleteOldWorkerNodeGroup mocks base method.
func (m *MockClusterClient) DeleteOldWorkerNodeGroup(arg0 context.Context, arg1 *v1beta1.MachineDeployment, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteOldWorkerNodeGroup", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteOldWorkerNodeGroup indicates an expected call of DeleteOldWorkerNodeGroup.
func (mr *MockClusterClientMockRecorder) DeleteOldWorkerNodeGroup(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOldWorkerNodeGroup", reflect.TypeOf((*MockClusterClient)(nil).DeleteOldWorkerNodeGroup), arg0, arg1, arg2)
}
// DeletePackageResources mocks base method.
func (m *MockClusterClient) DeletePackageResources(arg0 context.Context, arg1 *types.Cluster, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeletePackageResources", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// DeletePackageResources indicates an expected call of DeletePackageResources.
func (mr *MockClusterClientMockRecorder) DeletePackageResources(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePackageResources", reflect.TypeOf((*MockClusterClient)(nil).DeletePackageResources), arg0, arg1, arg2)
}
// GetApiServerUrl mocks base method.
func (m *MockClusterClient) GetApiServerUrl(arg0 context.Context, arg1 *types.Cluster) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetApiServerUrl", arg0, arg1)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetApiServerUrl indicates an expected call of GetApiServerUrl.
func (mr *MockClusterClientMockRecorder) GetApiServerUrl(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetApiServerUrl", reflect.TypeOf((*MockClusterClient)(nil).GetApiServerUrl), arg0, arg1)
}
// GetBundles mocks base method.
func (m *MockClusterClient) GetBundles(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha10.Bundles, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetBundles", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*v1alpha10.Bundles)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetBundles indicates an expected call of GetBundles.
func (mr *MockClusterClientMockRecorder) GetBundles(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBundles", reflect.TypeOf((*MockClusterClient)(nil).GetBundles), arg0, arg1, arg2, arg3)
}
// GetClusters mocks base method.
func (m *MockClusterClient) GetClusters(arg0 context.Context, arg1 *types.Cluster) ([]types.CAPICluster, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetClusters", arg0, arg1)
ret0, _ := ret[0].([]types.CAPICluster)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetClusters indicates an expected call of GetClusters.
func (mr *MockClusterClientMockRecorder) GetClusters(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusters", reflect.TypeOf((*MockClusterClient)(nil).GetClusters), arg0, arg1)
}
// GetEksaAWSIamConfig mocks base method.
func (m *MockClusterClient) GetEksaAWSIamConfig(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.AWSIamConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaAWSIamConfig", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*v1alpha1.AWSIamConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaAWSIamConfig indicates an expected call of GetEksaAWSIamConfig.
func (mr *MockClusterClientMockRecorder) GetEksaAWSIamConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaAWSIamConfig", reflect.TypeOf((*MockClusterClient)(nil).GetEksaAWSIamConfig), arg0, arg1, arg2, arg3)
}
// GetEksaCloudStackMachineConfig mocks base method.
func (m *MockClusterClient) GetEksaCloudStackMachineConfig(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.CloudStackMachineConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaCloudStackMachineConfig", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*v1alpha1.CloudStackMachineConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaCloudStackMachineConfig indicates an expected call of GetEksaCloudStackMachineConfig.
func (mr *MockClusterClientMockRecorder) GetEksaCloudStackMachineConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaCloudStackMachineConfig", reflect.TypeOf((*MockClusterClient)(nil).GetEksaCloudStackMachineConfig), arg0, arg1, arg2, arg3)
}
// GetEksaCluster mocks base method.
func (m *MockClusterClient) GetEksaCluster(arg0 context.Context, arg1 *types.Cluster, arg2 string) (*v1alpha1.Cluster, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaCluster", arg0, arg1, arg2)
ret0, _ := ret[0].(*v1alpha1.Cluster)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaCluster indicates an expected call of GetEksaCluster.
func (mr *MockClusterClientMockRecorder) GetEksaCluster(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaCluster", reflect.TypeOf((*MockClusterClient)(nil).GetEksaCluster), arg0, arg1, arg2)
}
// GetEksaFluxConfig mocks base method.
func (m *MockClusterClient) GetEksaFluxConfig(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.FluxConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaFluxConfig", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*v1alpha1.FluxConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaFluxConfig indicates an expected call of GetEksaFluxConfig.
func (mr *MockClusterClientMockRecorder) GetEksaFluxConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaFluxConfig", reflect.TypeOf((*MockClusterClient)(nil).GetEksaFluxConfig), arg0, arg1, arg2, arg3)
}
// GetEksaGitOpsConfig mocks base method.
func (m *MockClusterClient) GetEksaGitOpsConfig(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.GitOpsConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaGitOpsConfig", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*v1alpha1.GitOpsConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaGitOpsConfig indicates an expected call of GetEksaGitOpsConfig.
func (mr *MockClusterClientMockRecorder) GetEksaGitOpsConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaGitOpsConfig", reflect.TypeOf((*MockClusterClient)(nil).GetEksaGitOpsConfig), arg0, arg1, arg2, arg3)
}
// GetEksaOIDCConfig mocks base method.
func (m *MockClusterClient) GetEksaOIDCConfig(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.OIDCConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaOIDCConfig", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*v1alpha1.OIDCConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaOIDCConfig indicates an expected call of GetEksaOIDCConfig.
func (mr *MockClusterClientMockRecorder) GetEksaOIDCConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaOIDCConfig", reflect.TypeOf((*MockClusterClient)(nil).GetEksaOIDCConfig), arg0, arg1, arg2, arg3)
}
// GetEksaVSphereDatacenterConfig mocks base method.
func (m *MockClusterClient) GetEksaVSphereDatacenterConfig(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.VSphereDatacenterConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaVSphereDatacenterConfig", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*v1alpha1.VSphereDatacenterConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaVSphereDatacenterConfig indicates an expected call of GetEksaVSphereDatacenterConfig.
func (mr *MockClusterClientMockRecorder) GetEksaVSphereDatacenterConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaVSphereDatacenterConfig", reflect.TypeOf((*MockClusterClient)(nil).GetEksaVSphereDatacenterConfig), arg0, arg1, arg2, arg3)
}
// GetEksaVSphereMachineConfig mocks base method.
func (m *MockClusterClient) GetEksaVSphereMachineConfig(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.VSphereMachineConfig, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksaVSphereMachineConfig", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*v1alpha1.VSphereMachineConfig)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksaVSphereMachineConfig indicates an expected call of GetEksaVSphereMachineConfig.
func (mr *MockClusterClientMockRecorder) GetEksaVSphereMachineConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaVSphereMachineConfig", reflect.TypeOf((*MockClusterClient)(nil).GetEksaVSphereMachineConfig), arg0, arg1, arg2, arg3)
}
// GetEksdRelease mocks base method.
func (m *MockClusterClient) GetEksdRelease(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha11.Release, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetEksdRelease", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*v1alpha11.Release)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetEksdRelease indicates an expected call of GetEksdRelease.
func (mr *MockClusterClientMockRecorder) GetEksdRelease(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksdRelease", reflect.TypeOf((*MockClusterClient)(nil).GetEksdRelease), arg0, arg1, arg2, arg3)
}
// GetKubeadmControlPlane mocks base method.
func (m *MockClusterClient) GetKubeadmControlPlane(arg0 context.Context, arg1 *types.Cluster, arg2 string, arg3 ...executables.KubectlOpt) (*v1beta10.KubeadmControlPlane, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1, arg2}
for _, a := range arg3 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetKubeadmControlPlane", varargs...)
ret0, _ := ret[0].(*v1beta10.KubeadmControlPlane)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetKubeadmControlPlane indicates an expected call of GetKubeadmControlPlane.
func (mr *MockClusterClientMockRecorder) GetKubeadmControlPlane(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1, arg2}, arg3...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetKubeadmControlPlane", reflect.TypeOf((*MockClusterClient)(nil).GetKubeadmControlPlane), varargs...)
}
// GetMachineDeployment mocks base method.
func (m *MockClusterClient) GetMachineDeployment(arg0 context.Context, arg1 string, arg2 ...executables.KubectlOpt) (*v1beta1.MachineDeployment, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetMachineDeployment", varargs...)
ret0, _ := ret[0].(*v1beta1.MachineDeployment)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetMachineDeployment indicates an expected call of GetMachineDeployment.
func (mr *MockClusterClientMockRecorder) GetMachineDeployment(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMachineDeployment", reflect.TypeOf((*MockClusterClient)(nil).GetMachineDeployment), varargs...)
}
// GetMachineDeploymentsForCluster mocks base method.
func (m *MockClusterClient) GetMachineDeploymentsForCluster(arg0 context.Context, arg1 string, arg2 ...executables.KubectlOpt) ([]v1beta1.MachineDeployment, error) {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetMachineDeploymentsForCluster", varargs...)
ret0, _ := ret[0].([]v1beta1.MachineDeployment)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetMachineDeploymentsForCluster indicates an expected call of GetMachineDeploymentsForCluster.
func (mr *MockClusterClientMockRecorder) GetMachineDeploymentsForCluster(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMachineDeploymentsForCluster", reflect.TypeOf((*MockClusterClient)(nil).GetMachineDeploymentsForCluster), varargs...)
}
// GetMachines mocks base method.
func (m *MockClusterClient) GetMachines(arg0 context.Context, arg1 *types.Cluster, arg2 string) ([]types.Machine, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetMachines", arg0, arg1, arg2)
ret0, _ := ret[0].([]types.Machine)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetMachines indicates an expected call of GetMachines.
func (mr *MockClusterClientMockRecorder) GetMachines(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMachines", reflect.TypeOf((*MockClusterClient)(nil).GetMachines), arg0, arg1, arg2)
}
// GetWorkloadKubeconfig mocks base method.
func (m *MockClusterClient) GetWorkloadKubeconfig(arg0 context.Context, arg1 string, arg2 *types.Cluster) ([]byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetWorkloadKubeconfig", arg0, arg1, arg2)
ret0, _ := ret[0].([]byte)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetWorkloadKubeconfig indicates an expected call of GetWorkloadKubeconfig.
func (mr *MockClusterClientMockRecorder) GetWorkloadKubeconfig(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkloadKubeconfig", reflect.TypeOf((*MockClusterClient)(nil).GetWorkloadKubeconfig), arg0, arg1, arg2)
}
// InitInfrastructure mocks base method.
func (m *MockClusterClient) InitInfrastructure(arg0 context.Context, arg1 *cluster.Spec, arg2 *types.Cluster, arg3 providers.Provider) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InitInfrastructure", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// InitInfrastructure indicates an expected call of InitInfrastructure.
func (mr *MockClusterClientMockRecorder) InitInfrastructure(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitInfrastructure", reflect.TypeOf((*MockClusterClient)(nil).InitInfrastructure), arg0, arg1, arg2, arg3)
}
// KubeconfigSecretAvailable mocks base method.
func (m *MockClusterClient) KubeconfigSecretAvailable(arg0 context.Context, arg1, arg2, arg3 string) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "KubeconfigSecretAvailable", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// KubeconfigSecretAvailable indicates an expected call of KubeconfigSecretAvailable.
func (mr *MockClusterClientMockRecorder) KubeconfigSecretAvailable(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KubeconfigSecretAvailable", reflect.TypeOf((*MockClusterClient)(nil).KubeconfigSecretAvailable), arg0, arg1, arg2, arg3)
}
// ListObjects mocks base method.
func (m *MockClusterClient) ListObjects(arg0 context.Context, arg1, arg2, arg3 string, arg4 kubernetes.ObjectList) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListObjects", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(error)
return ret0
}
// ListObjects indicates an expected call of ListObjects.
func (mr *MockClusterClientMockRecorder) ListObjects(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjects", reflect.TypeOf((*MockClusterClient)(nil).ListObjects), arg0, arg1, arg2, arg3, arg4)
}
// MoveManagement mocks base method.
func (m *MockClusterClient) MoveManagement(arg0 context.Context, arg1, arg2 *types.Cluster, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "MoveManagement", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// MoveManagement indicates an expected call of MoveManagement.
func (mr *MockClusterClientMockRecorder) MoveManagement(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MoveManagement", reflect.TypeOf((*MockClusterClient)(nil).MoveManagement), arg0, arg1, arg2, arg3)
}
// PauseCAPICluster mocks base method.
func (m *MockClusterClient) PauseCAPICluster(arg0 context.Context, arg1, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PauseCAPICluster", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// PauseCAPICluster indicates an expected call of PauseCAPICluster.
func (mr *MockClusterClientMockRecorder) PauseCAPICluster(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PauseCAPICluster", reflect.TypeOf((*MockClusterClient)(nil).PauseCAPICluster), arg0, arg1, arg2)
}
// RemoveAnnotationInNamespace mocks base method.
func (m *MockClusterClient) RemoveAnnotationInNamespace(arg0 context.Context, arg1, arg2, arg3 string, arg4 *types.Cluster, arg5 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RemoveAnnotationInNamespace", arg0, arg1, arg2, arg3, arg4, arg5)
ret0, _ := ret[0].(error)
return ret0
}
// RemoveAnnotationInNamespace indicates an expected call of RemoveAnnotationInNamespace.
func (mr *MockClusterClientMockRecorder) RemoveAnnotationInNamespace(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveAnnotationInNamespace", reflect.TypeOf((*MockClusterClient)(nil).RemoveAnnotationInNamespace), arg0, arg1, arg2, arg3, arg4, arg5)
}
// ResumeCAPICluster mocks base method.
func (m *MockClusterClient) ResumeCAPICluster(arg0 context.Context, arg1, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ResumeCAPICluster", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// ResumeCAPICluster indicates an expected call of ResumeCAPICluster.
func (mr *MockClusterClientMockRecorder) ResumeCAPICluster(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResumeCAPICluster", reflect.TypeOf((*MockClusterClient)(nil).ResumeCAPICluster), arg0, arg1, arg2)
}
// SaveLog mocks base method.
func (m *MockClusterClient) SaveLog(arg0 context.Context, arg1 *types.Cluster, arg2 *types.Deployment, arg3 string, arg4 filewriter.FileWriter) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SaveLog", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(error)
return ret0
}
// SaveLog indicates an expected call of SaveLog.
func (mr *MockClusterClientMockRecorder) SaveLog(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveLog", reflect.TypeOf((*MockClusterClient)(nil).SaveLog), arg0, arg1, arg2, arg3, arg4)
}
// SetEksaControllerEnvVar mocks base method.
func (m *MockClusterClient) SetEksaControllerEnvVar(arg0 context.Context, arg1, arg2, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetEksaControllerEnvVar", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// SetEksaControllerEnvVar indicates an expected call of SetEksaControllerEnvVar.
func (mr *MockClusterClientMockRecorder) SetEksaControllerEnvVar(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEksaControllerEnvVar", reflect.TypeOf((*MockClusterClient)(nil).SetEksaControllerEnvVar), arg0, arg1, arg2, arg3)
}
// UpdateAnnotationInNamespace mocks base method.
func (m *MockClusterClient) UpdateAnnotationInNamespace(arg0 context.Context, arg1, arg2 string, arg3 map[string]string, arg4 *types.Cluster, arg5 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateAnnotationInNamespace", arg0, arg1, arg2, arg3, arg4, arg5)
ret0, _ := ret[0].(error)
return ret0
}
// UpdateAnnotationInNamespace indicates an expected call of UpdateAnnotationInNamespace.
func (mr *MockClusterClientMockRecorder) UpdateAnnotationInNamespace(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAnnotationInNamespace", reflect.TypeOf((*MockClusterClient)(nil).UpdateAnnotationInNamespace), arg0, arg1, arg2, arg3, arg4, arg5)
}
// UpdateEnvironmentVariablesInNamespace mocks base method.
func (m *MockClusterClient) UpdateEnvironmentVariablesInNamespace(arg0 context.Context, arg1, arg2 string, arg3 map[string]string, arg4 *types.Cluster, arg5 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateEnvironmentVariablesInNamespace", arg0, arg1, arg2, arg3, arg4, arg5)
ret0, _ := ret[0].(error)
return ret0
}
// UpdateEnvironmentVariablesInNamespace indicates an expected call of UpdateEnvironmentVariablesInNamespace.
func (mr *MockClusterClientMockRecorder) UpdateEnvironmentVariablesInNamespace(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateEnvironmentVariablesInNamespace", reflect.TypeOf((*MockClusterClient)(nil).UpdateEnvironmentVariablesInNamespace), arg0, arg1, arg2, arg3, arg4, arg5)
}
// ValidateControlPlaneNodes mocks base method.
func (m *MockClusterClient) ValidateControlPlaneNodes(arg0 context.Context, arg1 *types.Cluster, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateControlPlaneNodes", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// ValidateControlPlaneNodes indicates an expected call of ValidateControlPlaneNodes.
func (mr *MockClusterClientMockRecorder) ValidateControlPlaneNodes(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateControlPlaneNodes", reflect.TypeOf((*MockClusterClient)(nil).ValidateControlPlaneNodes), arg0, arg1, arg2)
}
// ValidateWorkerNodes mocks base method.
func (m *MockClusterClient) ValidateWorkerNodes(arg0 context.Context, arg1, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ValidateWorkerNodes", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// ValidateWorkerNodes indicates an expected call of ValidateWorkerNodes.
func (mr *MockClusterClientMockRecorder) ValidateWorkerNodes(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateWorkerNodes", reflect.TypeOf((*MockClusterClient)(nil).ValidateWorkerNodes), arg0, arg1, arg2)
}
// WaitForClusterReady mocks base method.
func (m *MockClusterClient) WaitForClusterReady(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WaitForClusterReady", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// WaitForClusterReady indicates an expected call of WaitForClusterReady.
func (mr *MockClusterClientMockRecorder) WaitForClusterReady(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForClusterReady", reflect.TypeOf((*MockClusterClient)(nil).WaitForClusterReady), arg0, arg1, arg2, arg3)
}
// WaitForControlPlaneAvailable mocks base method.
func (m *MockClusterClient) WaitForControlPlaneAvailable(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WaitForControlPlaneAvailable", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// WaitForControlPlaneAvailable indicates an expected call of WaitForControlPlaneAvailable.
func (mr *MockClusterClientMockRecorder) WaitForControlPlaneAvailable(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForControlPlaneAvailable", reflect.TypeOf((*MockClusterClient)(nil).WaitForControlPlaneAvailable), arg0, arg1, arg2, arg3)
}
// WaitForControlPlaneNotReady mocks base method.
func (m *MockClusterClient) WaitForControlPlaneNotReady(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WaitForControlPlaneNotReady", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// WaitForControlPlaneNotReady indicates an expected call of WaitForControlPlaneNotReady.
func (mr *MockClusterClientMockRecorder) WaitForControlPlaneNotReady(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForControlPlaneNotReady", reflect.TypeOf((*MockClusterClient)(nil).WaitForControlPlaneNotReady), arg0, arg1, arg2, arg3)
}
// WaitForControlPlaneReady mocks base method.
func (m *MockClusterClient) WaitForControlPlaneReady(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WaitForControlPlaneReady", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// WaitForControlPlaneReady indicates an expected call of WaitForControlPlaneReady.
func (mr *MockClusterClientMockRecorder) WaitForControlPlaneReady(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForControlPlaneReady", reflect.TypeOf((*MockClusterClient)(nil).WaitForControlPlaneReady), arg0, arg1, arg2, arg3)
}
// WaitForDeployment mocks base method.
func (m *MockClusterClient) WaitForDeployment(arg0 context.Context, arg1 *types.Cluster, arg2, arg3, arg4, arg5 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WaitForDeployment", arg0, arg1, arg2, arg3, arg4, arg5)
ret0, _ := ret[0].(error)
return ret0
}
// WaitForDeployment indicates an expected call of WaitForDeployment.
func (mr *MockClusterClientMockRecorder) WaitForDeployment(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForDeployment", reflect.TypeOf((*MockClusterClient)(nil).WaitForDeployment), arg0, arg1, arg2, arg3, arg4, arg5)
}
// WaitForManagedExternalEtcdNotReady mocks base method.
func (m *MockClusterClient) WaitForManagedExternalEtcdNotReady(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WaitForManagedExternalEtcdNotReady", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// WaitForManagedExternalEtcdNotReady indicates an expected call of WaitForManagedExternalEtcdNotReady.
func (mr *MockClusterClientMockRecorder) WaitForManagedExternalEtcdNotReady(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForManagedExternalEtcdNotReady", reflect.TypeOf((*MockClusterClient)(nil).WaitForManagedExternalEtcdNotReady), arg0, arg1, arg2, arg3)
}
// WaitForManagedExternalEtcdReady mocks base method.
func (m *MockClusterClient) WaitForManagedExternalEtcdReady(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WaitForManagedExternalEtcdReady", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// WaitForManagedExternalEtcdReady indicates an expected call of WaitForManagedExternalEtcdReady.
func (mr *MockClusterClientMockRecorder) WaitForManagedExternalEtcdReady(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForManagedExternalEtcdReady", reflect.TypeOf((*MockClusterClient)(nil).WaitForManagedExternalEtcdReady), arg0, arg1, arg2, arg3)
}
// MockNetworking is a mock of Networking interface.
type MockNetworking struct {
ctrl *gomock.Controller
recorder *MockNetworkingMockRecorder
}
// MockNetworkingMockRecorder is the mock recorder for MockNetworking.
type MockNetworkingMockRecorder struct {
mock *MockNetworking
}
// NewMockNetworking creates a new mock instance.
func NewMockNetworking(ctrl *gomock.Controller) *MockNetworking {
mock := &MockNetworking{ctrl: ctrl}
mock.recorder = &MockNetworkingMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockNetworking) EXPECT() *MockNetworkingMockRecorder {
return m.recorder
}
// Install mocks base method.
func (m *MockNetworking) Install(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec, arg3 []string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Install", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// Install indicates an expected call of Install.
func (mr *MockNetworkingMockRecorder) Install(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Install", reflect.TypeOf((*MockNetworking)(nil).Install), arg0, arg1, arg2, arg3)
}
// RunPostControlPlaneUpgradeSetup mocks base method.
func (m *MockNetworking) RunPostControlPlaneUpgradeSetup(arg0 context.Context, arg1 *types.Cluster) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RunPostControlPlaneUpgradeSetup", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// RunPostControlPlaneUpgradeSetup indicates an expected call of RunPostControlPlaneUpgradeSetup.
func (mr *MockNetworkingMockRecorder) RunPostControlPlaneUpgradeSetup(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunPostControlPlaneUpgradeSetup", reflect.TypeOf((*MockNetworking)(nil).RunPostControlPlaneUpgradeSetup), arg0, arg1)
}
// Upgrade mocks base method.
func (m *MockNetworking) Upgrade(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 *cluster.Spec, arg4 []string) (*types.ChangeDiff, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Upgrade", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(*types.ChangeDiff)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Upgrade indicates an expected call of Upgrade.
func (mr *MockNetworkingMockRecorder) Upgrade(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Upgrade", reflect.TypeOf((*MockNetworking)(nil).Upgrade), arg0, arg1, arg2, arg3, arg4)
}
// MockAwsIamAuth is a mock of AwsIamAuth interface.
type MockAwsIamAuth struct {
ctrl *gomock.Controller
recorder *MockAwsIamAuthMockRecorder
}
// MockAwsIamAuthMockRecorder is the mock recorder for MockAwsIamAuth.
type MockAwsIamAuthMockRecorder struct {
mock *MockAwsIamAuth
}
// NewMockAwsIamAuth creates a new mock instance.
func NewMockAwsIamAuth(ctrl *gomock.Controller) *MockAwsIamAuth {
mock := &MockAwsIamAuth{ctrl: ctrl}
mock.recorder = &MockAwsIamAuthMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockAwsIamAuth) EXPECT() *MockAwsIamAuthMockRecorder {
return m.recorder
}
// CreateAndInstallAWSIAMAuthCASecret mocks base method.
func (m *MockAwsIamAuth) CreateAndInstallAWSIAMAuthCASecret(arg0 context.Context, arg1 *types.Cluster, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateAndInstallAWSIAMAuthCASecret", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// CreateAndInstallAWSIAMAuthCASecret indicates an expected call of CreateAndInstallAWSIAMAuthCASecret.
func (mr *MockAwsIamAuthMockRecorder) CreateAndInstallAWSIAMAuthCASecret(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAndInstallAWSIAMAuthCASecret", reflect.TypeOf((*MockAwsIamAuth)(nil).CreateAndInstallAWSIAMAuthCASecret), arg0, arg1, arg2)
}
// InstallAWSIAMAuth mocks base method.
func (m *MockAwsIamAuth) InstallAWSIAMAuth(arg0 context.Context, arg1, arg2 *types.Cluster, arg3 *cluster.Spec) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InstallAWSIAMAuth", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// InstallAWSIAMAuth indicates an expected call of InstallAWSIAMAuth.
func (mr *MockAwsIamAuthMockRecorder) InstallAWSIAMAuth(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallAWSIAMAuth", reflect.TypeOf((*MockAwsIamAuth)(nil).InstallAWSIAMAuth), arg0, arg1, arg2, arg3)
}
// UpgradeAWSIAMAuth mocks base method.
func (m *MockAwsIamAuth) UpgradeAWSIAMAuth(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpgradeAWSIAMAuth", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// UpgradeAWSIAMAuth indicates an expected call of UpgradeAWSIAMAuth.
func (mr *MockAwsIamAuthMockRecorder) UpgradeAWSIAMAuth(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpgradeAWSIAMAuth", reflect.TypeOf((*MockAwsIamAuth)(nil).UpgradeAWSIAMAuth), arg0, arg1, arg2)
}
// MockEKSAComponents is a mock of EKSAComponents interface.
type MockEKSAComponents struct {
ctrl *gomock.Controller
recorder *MockEKSAComponentsMockRecorder
}
// MockEKSAComponentsMockRecorder is the mock recorder for MockEKSAComponents.
type MockEKSAComponentsMockRecorder struct {
mock *MockEKSAComponents
}
// NewMockEKSAComponents creates a new mock instance.
func NewMockEKSAComponents(ctrl *gomock.Controller) *MockEKSAComponents {
mock := &MockEKSAComponents{ctrl: ctrl}
mock.recorder = &MockEKSAComponentsMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockEKSAComponents) EXPECT() *MockEKSAComponentsMockRecorder {
return m.recorder
}
// Install mocks base method.
func (m *MockEKSAComponents) Install(arg0 context.Context, arg1 logr.Logger, arg2 *types.Cluster, arg3 *cluster.Spec) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Install", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// Install indicates an expected call of Install.
func (mr *MockEKSAComponentsMockRecorder) Install(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Install", reflect.TypeOf((*MockEKSAComponents)(nil).Install), arg0, arg1, arg2, arg3)
}
// Upgrade mocks base method.
func (m *MockEKSAComponents) Upgrade(arg0 context.Context, arg1 logr.Logger, arg2 *types.Cluster, arg3, arg4 *cluster.Spec) (*types.ChangeDiff, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Upgrade", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(*types.ChangeDiff)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Upgrade indicates an expected call of Upgrade.
func (mr *MockEKSAComponentsMockRecorder) Upgrade(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Upgrade", reflect.TypeOf((*MockEKSAComponents)(nil).Upgrade), arg0, arg1, arg2, arg3, arg4)
}
// MockKubernetesClient is a mock of KubernetesClient interface.
type MockKubernetesClient struct {
ctrl *gomock.Controller
recorder *MockKubernetesClientMockRecorder
}
// MockKubernetesClientMockRecorder is the mock recorder for MockKubernetesClient.
type MockKubernetesClientMockRecorder struct {
mock *MockKubernetesClient
}
// NewMockKubernetesClient creates a new mock instance.
func NewMockKubernetesClient(ctrl *gomock.Controller) *MockKubernetesClient {
mock := &MockKubernetesClient{ctrl: ctrl}
mock.recorder = &MockKubernetesClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockKubernetesClient) EXPECT() *MockKubernetesClientMockRecorder {
return m.recorder
}
// Apply mocks base method.
func (m *MockKubernetesClient) Apply(arg0 context.Context, arg1 string, arg2 runtime.Object) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Apply", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// Apply indicates an expected call of Apply.
func (mr *MockKubernetesClientMockRecorder) Apply(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Apply", reflect.TypeOf((*MockKubernetesClient)(nil).Apply), arg0, arg1, arg2)
}
// ApplyKubeSpecFromBytes mocks base method.
func (m *MockKubernetesClient) ApplyKubeSpecFromBytes(arg0 context.Context, arg1 *types.Cluster, arg2 []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ApplyKubeSpecFromBytes", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// ApplyKubeSpecFromBytes indicates an expected call of ApplyKubeSpecFromBytes.
func (mr *MockKubernetesClientMockRecorder) ApplyKubeSpecFromBytes(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyKubeSpecFromBytes", reflect.TypeOf((*MockKubernetesClient)(nil).ApplyKubeSpecFromBytes), arg0, arg1, arg2)
}
// ApplyKubeSpecFromBytesForce mocks base method.
func (m *MockKubernetesClient) ApplyKubeSpecFromBytesForce(arg0 context.Context, arg1 *types.Cluster, arg2 []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ApplyKubeSpecFromBytesForce", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// ApplyKubeSpecFromBytesForce indicates an expected call of ApplyKubeSpecFromBytesForce.
func (mr *MockKubernetesClientMockRecorder) ApplyKubeSpecFromBytesForce(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyKubeSpecFromBytesForce", reflect.TypeOf((*MockKubernetesClient)(nil).ApplyKubeSpecFromBytesForce), arg0, arg1, arg2)
}
// ApplyKubeSpecFromBytesWithNamespace mocks base method.
func (m *MockKubernetesClient) ApplyKubeSpecFromBytesWithNamespace(arg0 context.Context, arg1 *types.Cluster, arg2 []byte, arg3 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ApplyKubeSpecFromBytesWithNamespace", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// ApplyKubeSpecFromBytesWithNamespace indicates an expected call of ApplyKubeSpecFromBytesWithNamespace.
func (mr *MockKubernetesClientMockRecorder) ApplyKubeSpecFromBytesWithNamespace(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyKubeSpecFromBytesWithNamespace", reflect.TypeOf((*MockKubernetesClient)(nil).ApplyKubeSpecFromBytesWithNamespace), arg0, arg1, arg2, arg3)
}
// RemoveAnnotationInNamespace mocks base method.
func (m *MockKubernetesClient) RemoveAnnotationInNamespace(arg0 context.Context, arg1, arg2, arg3 string, arg4 *types.Cluster, arg5 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RemoveAnnotationInNamespace", arg0, arg1, arg2, arg3, arg4, arg5)
ret0, _ := ret[0].(error)
return ret0
}
// RemoveAnnotationInNamespace indicates an expected call of RemoveAnnotationInNamespace.
func (mr *MockKubernetesClientMockRecorder) RemoveAnnotationInNamespace(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveAnnotationInNamespace", reflect.TypeOf((*MockKubernetesClient)(nil).RemoveAnnotationInNamespace), arg0, arg1, arg2, arg3, arg4, arg5)
}
// UpdateAnnotationInNamespace mocks base method.
func (m *MockKubernetesClient) UpdateAnnotationInNamespace(arg0 context.Context, arg1, arg2 string, arg3 map[string]string, arg4 *types.Cluster, arg5 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateAnnotationInNamespace", arg0, arg1, arg2, arg3, arg4, arg5)
ret0, _ := ret[0].(error)
return ret0
}
// UpdateAnnotationInNamespace indicates an expected call of UpdateAnnotationInNamespace.
func (mr *MockKubernetesClientMockRecorder) UpdateAnnotationInNamespace(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAnnotationInNamespace", reflect.TypeOf((*MockKubernetesClient)(nil).UpdateAnnotationInNamespace), arg0, arg1, arg2, arg3, arg4, arg5)
}
// WaitForDeployment mocks base method.
func (m *MockKubernetesClient) WaitForDeployment(arg0 context.Context, arg1 *types.Cluster, arg2, arg3, arg4, arg5 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WaitForDeployment", arg0, arg1, arg2, arg3, arg4, arg5)
ret0, _ := ret[0].(error)
return ret0
}
// WaitForDeployment indicates an expected call of WaitForDeployment.
func (mr *MockKubernetesClientMockRecorder) WaitForDeployment(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForDeployment", reflect.TypeOf((*MockKubernetesClient)(nil).WaitForDeployment), arg0, arg1, arg2, arg3, arg4, arg5)
}
| 1,102 |
eks-anywhere | aws | Go | // Code generated by MockGen. DO NOT EDIT.
// Source: pkg/clustermanager/kube_proxy.go
// Package mocks is a generated GoMock package.
package mocks
import (
reflect "reflect"
kubernetes "github.com/aws/eks-anywhere/pkg/clients/kubernetes"
gomock "github.com/golang/mock/gomock"
)
// MockClientFactory is a mock of ClientFactory interface.
type MockClientFactory struct {
ctrl *gomock.Controller
recorder *MockClientFactoryMockRecorder
}
// MockClientFactoryMockRecorder is the mock recorder for MockClientFactory.
type MockClientFactoryMockRecorder struct {
mock *MockClientFactory
}
// NewMockClientFactory creates a new mock instance.
func NewMockClientFactory(ctrl *gomock.Controller) *MockClientFactory {
mock := &MockClientFactory{ctrl: ctrl}
mock.recorder = &MockClientFactoryMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockClientFactory) EXPECT() *MockClientFactoryMockRecorder {
return m.recorder
}
// BuildClientFromKubeconfig mocks base method.
func (m *MockClientFactory) BuildClientFromKubeconfig(kubeconfigPath string) (kubernetes.Client, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "BuildClientFromKubeconfig", kubeconfigPath)
ret0, _ := ret[0].(kubernetes.Client)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// BuildClientFromKubeconfig indicates an expected call of BuildClientFromKubeconfig.
func (mr *MockClientFactoryMockRecorder) BuildClientFromKubeconfig(kubeconfigPath interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildClientFromKubeconfig", reflect.TypeOf((*MockClientFactory)(nil).BuildClientFromKubeconfig), kubeconfigPath)
}
| 51 |
eks-anywhere | aws | Go | package clustermarshaller
import (
"fmt"
"sigs.k8s.io/yaml"
"github.com/aws/eks-anywhere/internal/pkg/api"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/filewriter"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/templater"
)
func MarshalClusterSpec(clusterSpec *cluster.Spec, datacenterConfig providers.DatacenterConfig, machineConfigs []providers.MachineConfig) ([]byte, error) {
marshallables := make([]v1alpha1.Marshallable, 0, 5+len(machineConfigs)+len(clusterSpec.TinkerbellTemplateConfigs)+len(clusterSpec.SnowIPPools))
marshallables = append(marshallables,
clusterSpec.Cluster.ConvertConfigToConfigGenerateStruct(),
datacenterConfig.Marshallable(),
)
for _, machineConfig := range machineConfigs {
marshallables = append(marshallables, machineConfig.Marshallable())
}
// If a GitOpsConfig is present, marshal the GitOpsConfig to file; otherwise, use the FluxConfig
// Allows us to use the FluxConfig internally but preserve the provided spec while GitOpsConfig is being deprecated
if clusterSpec.GitOpsConfig != nil {
marshallables = append(marshallables, clusterSpec.GitOpsConfig.ConvertConfigToConfigGenerateStruct())
}
if clusterSpec.FluxConfig != nil && clusterSpec.GitOpsConfig == nil {
marshallables = append(marshallables, clusterSpec.FluxConfig.ConvertConfigToConfigGenerateStruct())
}
if clusterSpec.OIDCConfig != nil {
marshallables = append(marshallables, clusterSpec.OIDCConfig.ConvertConfigToConfigGenerateStruct())
}
if clusterSpec.AWSIamConfig != nil {
marshallables = append(marshallables, clusterSpec.AWSIamConfig.ConvertConfigToConfigGenerateStruct())
}
if clusterSpec.TinkerbellTemplateConfigs != nil {
for _, t := range clusterSpec.TinkerbellTemplateConfigs {
marshallables = append(marshallables, t.ConvertConfigToConfigGenerateStruct())
}
}
if clusterSpec.SnowIPPools != nil {
for _, t := range clusterSpec.SnowIPPools {
marshallables = append(marshallables, t.ConvertConfigToConfigGenerateStruct())
}
}
resources := make([][]byte, 0, len(marshallables))
for _, marshallable := range marshallables {
resource, err := yaml.Marshal(marshallable)
if err != nil {
return nil, fmt.Errorf("failed marshalling resource for cluster spec: %v", err)
}
if clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf == nil {
removeFromDefaultConfig := []string{"spec.clusterNetwork.dns"}
resource, err = api.CleanupPathsFromYaml(resource, removeFromDefaultConfig)
if err != nil {
return nil, fmt.Errorf("cleaning paths from yaml: %v", err)
}
}
resources = append(resources, resource)
}
return templater.AppendYamlResources(resources...), nil
}
func WriteClusterConfig(clusterSpec *cluster.Spec, datacenterConfig providers.DatacenterConfig, machineConfigs []providers.MachineConfig, writer filewriter.FileWriter) error {
resourcesSpec, err := MarshalClusterSpec(clusterSpec, datacenterConfig, machineConfigs)
if err != nil {
return err
}
if filePath, err := writer.Write(fmt.Sprintf("%s-eks-a-cluster.yaml", clusterSpec.Cluster.ObjectMeta.Name), resourcesSpec, filewriter.PersistentFile); err != nil {
err = fmt.Errorf("writing eks-a cluster config file into %s: %v", filePath, err)
return err
}
return nil
}
| 84 |
eks-anywhere | aws | Go | package clustermarshaller_test
import (
"path/filepath"
"testing"
"time"
. "github.com/onsi/gomega"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/aws/eks-anywhere/internal/test"
"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/clustermarshaller"
"github.com/aws/eks-anywhere/pkg/providers"
)
func TestWriteClusterConfigWithOIDCAndGitOps(t *testing.T) {
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.APIVersion = v1alpha1.GroupVersion.String()
s.Cluster.TypeMeta.Kind = v1alpha1.ClusterKind
s.Cluster.CreationTimestamp = v1.Time{Time: time.Now()}
s.Cluster.Name = "mycluster"
s.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{
Count: 3,
}
s.Cluster.Spec.GitOpsRef = &v1alpha1.Ref{
Kind: v1alpha1.GitOpsConfigKind,
Name: "config",
}
s.Cluster.Spec.IdentityProviderRefs = []v1alpha1.Ref{
{
Kind: v1alpha1.OIDCConfigKind,
Name: "config",
},
}
s.OIDCConfig = &v1alpha1.OIDCConfig{
TypeMeta: v1.TypeMeta{
Kind: v1alpha1.OIDCConfigKind,
APIVersion: v1alpha1.GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: "config",
CreationTimestamp: v1.Time{Time: time.Now()},
},
Spec: v1alpha1.OIDCConfigSpec{
IssuerUrl: "https://url",
},
}
s.GitOpsConfig = &v1alpha1.GitOpsConfig{
TypeMeta: v1.TypeMeta{
Kind: v1alpha1.GitOpsConfigKind,
APIVersion: v1alpha1.GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: "config",
CreationTimestamp: v1.Time{Time: time.Now()},
},
Spec: v1alpha1.GitOpsConfigSpec{
Flux: v1alpha1.Flux{
Github: v1alpha1.Github{
Owner: "me",
Branch: "main",
ClusterConfigPath: "clusters/mycluster",
FluxSystemNamespace: "flux-system",
},
},
},
}
s.Cluster.SetSelfManaged()
})
datacenterConfig := &v1alpha1.VSphereDatacenterConfig{
TypeMeta: v1.TypeMeta{
Kind: v1alpha1.VSphereDatacenterKind,
APIVersion: v1alpha1.GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: "config",
CreationTimestamp: v1.Time{Time: time.Now()},
},
Spec: v1alpha1.VSphereDatacenterConfigSpec{
Server: "https://url",
},
}
machineConfigs := []providers.MachineConfig{
&v1alpha1.VSphereMachineConfig{
TypeMeta: v1.TypeMeta{
Kind: v1alpha1.VSphereMachineConfigKind,
APIVersion: v1alpha1.GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: "machineconf-1",
CreationTimestamp: v1.Time{Time: time.Now()},
},
Spec: v1alpha1.VSphereMachineConfigSpec{
Folder: "my-folder",
},
},
&v1alpha1.VSphereMachineConfig{
TypeMeta: v1.TypeMeta{
Kind: v1alpha1.VSphereMachineConfigKind,
APIVersion: v1alpha1.GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: "machineconf-2",
CreationTimestamp: v1.Time{Time: time.Now()},
},
Spec: v1alpha1.VSphereMachineConfigSpec{
Folder: "my-folder",
},
},
}
g := NewWithT(t)
folder, writer := test.NewWriter(t)
gotFile := filepath.Join(folder, "mycluster-eks-a-cluster.yaml")
g.Expect(clustermarshaller.WriteClusterConfig(clusterSpec, datacenterConfig, machineConfigs, writer)).To(Succeed())
test.AssertFilesEquals(t, gotFile, "testdata/expected_marshalled_cluster.yaml")
}
func TestWriteClusterConfigWithFluxAndGitOpsConfigs(t *testing.T) {
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.APIVersion = v1alpha1.GroupVersion.String()
s.Cluster.TypeMeta.Kind = v1alpha1.ClusterKind
s.Cluster.CreationTimestamp = v1.Time{Time: time.Now()}
s.Cluster.Name = "mycluster"
s.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{
Count: 3,
}
s.Cluster.Spec.GitOpsRef = &v1alpha1.Ref{
Kind: v1alpha1.GitOpsConfigKind,
Name: "config",
}
s.FluxConfig = &v1alpha1.FluxConfig{
TypeMeta: v1.TypeMeta{
Kind: v1alpha1.FluxConfigKind,
APIVersion: v1alpha1.GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: "config",
CreationTimestamp: v1.Time{Time: time.Now()},
},
Spec: v1alpha1.FluxConfigSpec{
Github: &v1alpha1.GithubProviderConfig{
Owner: "test",
Repository: "test",
},
},
}
s.GitOpsConfig = &v1alpha1.GitOpsConfig{
TypeMeta: v1.TypeMeta{
Kind: v1alpha1.GitOpsConfigKind,
APIVersion: v1alpha1.GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: "config",
CreationTimestamp: v1.Time{Time: time.Now()},
},
Spec: v1alpha1.GitOpsConfigSpec{
Flux: v1alpha1.Flux{
Github: v1alpha1.Github{
Owner: "me",
Branch: "main",
ClusterConfigPath: "clusters/mycluster",
FluxSystemNamespace: "flux-system",
Repository: "test",
},
},
},
}
s.Cluster.SetSelfManaged()
})
datacenterConfig := &v1alpha1.VSphereDatacenterConfig{
TypeMeta: v1.TypeMeta{
Kind: v1alpha1.VSphereDatacenterKind,
APIVersion: v1alpha1.GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: "config",
CreationTimestamp: v1.Time{Time: time.Now()},
},
Spec: v1alpha1.VSphereDatacenterConfigSpec{
Server: "https://url",
},
}
machineConfigs := []providers.MachineConfig{
&v1alpha1.VSphereMachineConfig{
TypeMeta: v1.TypeMeta{
Kind: v1alpha1.VSphereMachineConfigKind,
APIVersion: v1alpha1.GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: "machineconf-1",
CreationTimestamp: v1.Time{Time: time.Now()},
},
Spec: v1alpha1.VSphereMachineConfigSpec{
Folder: "my-folder",
},
},
&v1alpha1.VSphereMachineConfig{
TypeMeta: v1.TypeMeta{
Kind: v1alpha1.VSphereMachineConfigKind,
APIVersion: v1alpha1.GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: "machineconf-2",
CreationTimestamp: v1.Time{Time: time.Now()},
},
Spec: v1alpha1.VSphereMachineConfigSpec{
Folder: "my-folder",
},
},
}
g := NewWithT(t)
folder, writer := test.NewWriter(t)
gotFile := filepath.Join(folder, "mycluster-eks-a-cluster.yaml")
g.Expect(clustermarshaller.WriteClusterConfig(clusterSpec, datacenterConfig, machineConfigs, writer)).To(Succeed())
test.AssertFilesEquals(t, gotFile, "testdata/expected_marshalled_cluster_flux_and_gitops.yaml")
}
func TestWriteClusterConfigWithFluxConfig(t *testing.T) {
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.APIVersion = v1alpha1.GroupVersion.String()
s.Cluster.TypeMeta.Kind = v1alpha1.ClusterKind
s.Cluster.CreationTimestamp = v1.Time{Time: time.Now()}
s.Cluster.Name = "mycluster"
s.Cluster.Spec.ExternalEtcdConfiguration = &v1alpha1.ExternalEtcdConfiguration{
Count: 3,
}
s.Cluster.Spec.GitOpsRef = &v1alpha1.Ref{
Kind: v1alpha1.FluxConfigKind,
Name: "config",
}
s.FluxConfig = &v1alpha1.FluxConfig{
TypeMeta: v1.TypeMeta{
Kind: v1alpha1.FluxConfigKind,
APIVersion: v1alpha1.GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: "config",
CreationTimestamp: v1.Time{Time: time.Now()},
},
Spec: v1alpha1.FluxConfigSpec{
Github: &v1alpha1.GithubProviderConfig{
Owner: "test",
Repository: "test",
},
},
}
s.Cluster.SetSelfManaged()
})
datacenterConfig := &v1alpha1.VSphereDatacenterConfig{
TypeMeta: v1.TypeMeta{
Kind: v1alpha1.VSphereDatacenterKind,
APIVersion: v1alpha1.GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: "config",
CreationTimestamp: v1.Time{Time: time.Now()},
},
Spec: v1alpha1.VSphereDatacenterConfigSpec{
Server: "https://url",
},
}
machineConfigs := []providers.MachineConfig{
&v1alpha1.VSphereMachineConfig{
TypeMeta: v1.TypeMeta{
Kind: v1alpha1.VSphereMachineConfigKind,
APIVersion: v1alpha1.GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: "machineconf-1",
CreationTimestamp: v1.Time{Time: time.Now()},
},
Spec: v1alpha1.VSphereMachineConfigSpec{
Folder: "my-folder",
},
},
&v1alpha1.VSphereMachineConfig{
TypeMeta: v1.TypeMeta{
Kind: v1alpha1.VSphereMachineConfigKind,
APIVersion: v1alpha1.GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: "machineconf-2",
CreationTimestamp: v1.Time{Time: time.Now()},
},
Spec: v1alpha1.VSphereMachineConfigSpec{
Folder: "my-folder",
},
},
}
g := NewWithT(t)
folder, writer := test.NewWriter(t)
gotFile := filepath.Join(folder, "mycluster-eks-a-cluster.yaml")
g.Expect(clustermarshaller.WriteClusterConfig(clusterSpec, datacenterConfig, machineConfigs, writer)).To(Succeed())
test.AssertFilesEquals(t, gotFile, "testdata/expected_marshalled_cluster_flux_config.yaml")
}
func TestWriteClusterConfigSnow(t *testing.T) {
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster = &v1alpha1.Cluster{
TypeMeta: v1.TypeMeta{
Kind: v1alpha1.ClusterKind,
APIVersion: v1alpha1.GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: "testcluster",
},
Spec: v1alpha1.ClusterSpec{
DatacenterRef: v1alpha1.Ref{
Kind: v1alpha1.SnowDatacenterKind,
Name: "testsnow",
},
ControlPlaneConfiguration: v1alpha1.ControlPlaneConfiguration{
MachineGroupRef: &v1alpha1.Ref{
Kind: v1alpha1.SnowMachineConfigKind,
Name: "testsnow",
},
},
},
}
s.SnowIPPools = map[string]*v1alpha1.SnowIPPool{
"ippool": {
TypeMeta: v1.TypeMeta{
Kind: v1alpha1.SnowIPPoolKind,
APIVersion: v1alpha1.GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: "ippool",
},
Spec: v1alpha1.SnowIPPoolSpec{
Pools: []v1alpha1.IPPool{
{
IPStart: "start",
IPEnd: "end",
Gateway: "gateway",
Subnet: "subnet",
},
},
},
},
}
s.Cluster.SetSelfManaged()
})
datacenterConfig := &v1alpha1.SnowDatacenterConfig{
TypeMeta: v1.TypeMeta{
Kind: v1alpha1.SnowDatacenterKind,
APIVersion: v1alpha1.GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: "testsnow",
},
}
machineConfigs := []providers.MachineConfig{
&v1alpha1.SnowMachineConfig{
TypeMeta: v1.TypeMeta{
Kind: v1alpha1.SnowMachineConfigKind,
APIVersion: v1alpha1.GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: "testsnow",
},
Spec: v1alpha1.SnowMachineConfigSpec{
Network: v1alpha1.SnowNetwork{
DirectNetworkInterfaces: []v1alpha1.SnowDirectNetworkInterface{
{
Index: 1,
IPPoolRef: &v1alpha1.Ref{
Kind: v1alpha1.SnowIPPoolKind,
Name: "ippool",
},
Primary: true,
},
},
},
},
},
}
g := NewWithT(t)
folder, writer := test.NewWriter(t)
g.Expect(clustermarshaller.WriteClusterConfig(clusterSpec, datacenterConfig, machineConfigs, writer)).To(Succeed())
test.AssertFilesEquals(t, filepath.Join(folder, "testcluster-eks-a-cluster.yaml"), "testdata/expected_marshalled_snow.yaml")
}
| 405 |
eks-anywhere | aws | Go | package collection
// Set is a collection that only contains unique elements.
type Set[T comparable] map[T]struct{}
// NewSet creates an empty Set.
func NewSet[T comparable]() Set[T] {
return newSet[T](0)
}
// NewSetFrom creates a Set from a list of elements.
func NewSetFrom[T comparable](elements ...T) Set[T] {
s := NewSet[T]()
for _, e := range elements {
s.Add(e)
}
return s
}
func newSet[T comparable](size int) Set[T] {
return make(Set[T], size)
}
// Add stores a new element in the Set if wasn't contained yet.
func (s Set[T]) Add(e T) {
s[e] = struct{}{}
}
// Delete removes an element from the Set if it existed.
func (s Set[T]) Delete(e T) {
delete(s, e)
}
// Contains checks if an element is contained in the Set.
func (s Set[T]) Contains(e T) bool {
_, present := s[e]
return present
}
// ToSlice generates a new slice with all elements in the Set.
// Order is non deterministic.
func (s Set[T]) ToSlice() []T {
keys := make([]T, 0, len(s))
for k := range s {
keys = append(keys, k)
}
return keys
}
// MapSet allows to map a collection to a Set using a closure to extract
// the values of type T.
func MapSet[G any, T comparable](c []G, f func(G) T) Set[T] {
s := NewSet[T]()
for _, element := range c {
s.Add(f(element))
}
return s
}
| 61 |
eks-anywhere | aws | Go | package collection_test
import (
"testing"
. "github.com/onsi/gomega"
"github.com/aws/eks-anywhere/pkg/collection"
)
func TestSetContains(t *testing.T) {
testCases := []struct {
testName string
set collection.Set[string]
element string
want bool
}{
{
testName: "empty set",
set: collection.NewSet[string](),
element: "a",
want: false,
},
{
testName: "contained in non empty set",
set: collection.NewSetFrom("b", "a", "c"),
element: "a",
want: true,
},
{
testName: "not contained in non empty set",
set: collection.NewSetFrom("b", "a", "c"),
element: "d",
want: false,
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.set.Contains(tt.element)).To(Equal(tt.want))
})
}
}
func TestSetDelete(t *testing.T) {
g := NewWithT(t)
s := collection.NewSetFrom("b", "a", "c")
g.Expect(s.Contains("c")).To(BeTrue())
s.Delete("c")
g.Expect(s.Contains("c")).To(BeFalse())
g.Expect(s.ToSlice()).To(ConsistOf("a", "b"))
}
func TestSetToSlice(t *testing.T) {
testCases := []struct {
testName string
set collection.Set[string]
want []string
}{
{
testName: "empty set",
set: collection.NewSet[string](),
want: []string{},
},
{
testName: "non empty set",
set: collection.NewSetFrom("b", "a", "c", "d", "a", "b"),
want: []string{
"a", "b", "c", "d",
},
},
}
for _, tt := range testCases {
t.Run(tt.testName, func(t *testing.T) {
g := NewWithT(t)
g.Expect(tt.set.ToSlice()).To(ConsistOf(tt.want))
})
}
}
func TestMapSet(t *testing.T) {
g := NewWithT(t)
elements := []myStruct{
{
name: "a",
},
{
name: "b",
},
{
name: "b",
},
}
s := collection.MapSet(elements, func(e myStruct) string {
return e.name
})
g.Expect(s.ToSlice()).To(ConsistOf("a", "b"))
}
type myStruct struct {
name string
}
| 106 |
eks-anywhere | aws | Go | package config
const (
EksaGitPassphraseTokenEnv = "EKSA_GIT_SSH_KEY_PASSPHRASE"
EksaGitPrivateKeyTokenEnv = "EKSA_GIT_PRIVATE_KEY"
EksaGitKnownHostsFileEnv = "EKSA_GIT_KNOWN_HOSTS"
SshKnownHostsEnv = "SSH_KNOWN_HOSTS"
EksaAccessKeyIdEnv = "EKSA_AWS_ACCESS_KEY_ID"
EksaSecretAccessKeyEnv = "EKSA_AWS_SECRET_ACCESS_KEY"
AwsAccessKeyIdEnv = "AWS_ACCESS_KEY_ID"
AwsSecretAccessKeyEnv = "AWS_SECRET_ACCESS_KEY"
EksaRegionEnv = "EKSA_AWS_REGION"
)
type CliConfig struct {
GitSshKeyPassphrase string
GitPrivateKeyFile string
GitKnownHostsFile string
}
| 20 |
eks-anywhere | aws | Go | package config
import (
"os"
)
const (
HttpsProxyKey = "HTTPS_PROXY"
HttpProxyKey = "HTTP_PROXY"
NoProxyKey = "NO_PROXY"
)
func GetProxyConfigFromEnv() map[string]string {
return map[string]string{
HttpsProxyKey: os.Getenv(HttpsProxyKey),
HttpProxyKey: os.Getenv(HttpProxyKey),
NoProxyKey: os.Getenv(NoProxyKey),
}
}
| 20 |
eks-anywhere | aws | Go | package config_test
import (
"testing"
"github.com/aws/eks-anywhere/pkg/config"
)
func TestGetProxyConfigFromEnv(t *testing.T) {
wantHttpsProxy := "FOO"
wantHttpProxy := "BAR"
wantNoProxy := "localhost,anotherhost"
wantEnv := map[string]string{
config.HttpsProxyKey: wantHttpsProxy,
config.HttpProxyKey: wantHttpProxy,
config.NoProxyKey: wantNoProxy,
}
for k, v := range wantEnv {
t.Setenv(k, v)
}
env := config.GetProxyConfigFromEnv()
for k, target := range wantEnv {
if val := env[k]; val != target {
t.Fatalf("config.GetProxyConfigFromEnv %s = %s, want %s", k, val, target)
}
}
}
| 29 |
eks-anywhere | aws | Go | package config
import (
"context"
"fmt"
"os"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/aws/eks-anywhere/pkg/constants"
)
const registryAuthSecretName = "registry-credentials"
func ReadCredentials() (username, password string, err error) {
username, ok := os.LookupEnv(constants.RegistryUsername)
if !ok {
return "", "", errors.New("please set REGISTRY_USERNAME env var")
}
password, ok = os.LookupEnv(constants.RegistryPassword)
if !ok {
return "", "", errors.New("please set REGISTRY_PASSWORD env var")
}
return username, password, nil
}
// ReadCredentialsFromSecret reads from Kubernetes secret registry-credentials.
// Returns the username and password, or error.
func ReadCredentialsFromSecret(ctx context.Context, client client.Client) (username, password string, err error) {
registryAuthSecret := &corev1.Secret{}
key := types.NamespacedName{Name: registryAuthSecretName, Namespace: constants.EksaSystemNamespace}
if err := client.Get(ctx, key, registryAuthSecret); err != nil {
return "", "", errors.Wrap(err, "fetching registry auth secret")
}
rUsername := registryAuthSecret.Data["username"]
rPassword := registryAuthSecret.Data["password"]
return string(rUsername), string(rPassword), nil
}
// SetCredentialsEnv sets the registry username and password env variables.
func SetCredentialsEnv(username, password string) error {
if err := os.Setenv(constants.RegistryUsername, username); err != nil {
return fmt.Errorf("failed setting env %s: %v", constants.RegistryUsername, err)
}
if err := os.Setenv(constants.RegistryPassword, password); err != nil {
return fmt.Errorf("failed setting env %s: %v", constants.RegistryPassword, err)
}
return nil
}
| 59 |
eks-anywhere | aws | Go | package config
import (
"context"
"os"
"testing"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/aws/eks-anywhere/pkg/constants"
)
func TestReadConfig(t *testing.T) {
os.Unsetenv(constants.RegistryUsername)
os.Unsetenv(constants.RegistryPassword)
_, _, err := ReadCredentials()
assert.Error(t, err)
expectedUser := "testuser"
expectedPassword := "testpass"
t.Setenv(constants.RegistryUsername, expectedUser)
t.Setenv(constants.RegistryPassword, expectedPassword)
username, password, err := ReadCredentials()
assert.NoError(t, err)
assert.Equal(t, expectedUser, username)
assert.Equal(t, expectedPassword, password)
}
func TestSetCredentialsEnv(t *testing.T) {
uName := ""
uPass := ""
err := SetCredentialsEnv(uName, uPass)
assert.NoError(t, err)
}
func TestReadCredentialsFromSecret(t *testing.T) {
ctx := context.Background()
expectedUser := "testuser"
expectedPassword := "testpass"
sec := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: registryAuthSecretName,
Namespace: constants.EksaSystemNamespace,
},
Data: map[string][]byte{
"username": []byte(expectedUser),
"password": []byte(expectedPassword),
},
}
objs := []runtime.Object{sec}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(objs...).Build()
u, p, err := ReadCredentialsFromSecret(ctx, cl)
assert.NoError(t, err)
assert.Equal(t, expectedUser, u)
assert.Equal(t, expectedPassword, p)
}
func TestReadCredentialsFromSecretNotFound(t *testing.T) {
ctx := context.Background()
cb := fake.NewClientBuilder()
cl := cb.Build()
u, p, err := ReadCredentialsFromSecret(ctx, cl)
assert.ErrorContains(t, err, "fetching registry auth secret")
assert.Empty(t, u)
assert.Empty(t, p)
}
| 74 |
eks-anywhere | aws | Go | package config
import (
_ "embed"
"os"
)
const (
EksavSphereUsernameKey = "EKSA_VSPHERE_USERNAME"
EksavSpherePasswordKey = "EKSA_VSPHERE_PASSWORD"
// EksavSphereCPUsernameKey holds Username for cloud provider.
EksavSphereCPUsernameKey = "EKSA_VSPHERE_CP_USERNAME"
// EksavSphereCPPasswordKey holds Password for cloud provider.
EksavSphereCPPasswordKey = "EKSA_VSPHERE_CP_PASSWORD"
)
type VSphereUserConfig struct {
EksaVsphereUsername string
EksaVspherePassword string
EksaVsphereCPUsername string
EksaVsphereCPPassword string
}
//go:embed static/globalPrivs.json
var VSphereGlobalPrivsFile string
//go:embed static/eksUserPrivs.json
var VSphereUserPrivsFile string
//go:embed static/adminPrivs.json
var VSphereAdminPrivsFile string
//go:embed static/readOnlyPrivs.json
var VSphereReadOnlyPrivs string
func NewVsphereUserConfig() *VSphereUserConfig {
eksaVsphereUsername := os.Getenv(EksavSphereUsernameKey)
eksaVspherePassword := os.Getenv(EksavSpherePasswordKey)
// Cloud provider credentials
eksaCPUsername := os.Getenv(EksavSphereCPUsernameKey)
eksaCPPassword := os.Getenv(EksavSphereCPPasswordKey)
if eksaCPUsername == "" {
eksaCPUsername = eksaVsphereUsername
eksaCPPassword = eksaVspherePassword
}
vuc := VSphereUserConfig{
eksaVsphereUsername,
eksaVspherePassword,
eksaCPUsername,
eksaCPPassword,
}
return &vuc
}
| 58 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.